xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 4586880f596e61c5a599d0766bb47c004bbd2dd6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "ext_dev.c"
17 #include "blob/blobstore.c"
18 #include "blob/request.c"
19 #include "blob/zeroes.c"
20 #include "blob/blob_bs_dev.c"
21 #include "esnap_dev.c"
22 
23 struct spdk_blob_store *g_bs;
24 spdk_blob_id g_blobid;
25 struct spdk_blob *g_blob, *g_blob2;
26 int g_bserrno, g_bserrno2;
27 struct spdk_xattr_names *g_names;
28 int g_done;
29 char *g_xattr_names[] = {"first", "second", "third"};
30 char *g_xattr_values[] = {"one", "two", "three"};
31 uint64_t g_ctx = 1729;
32 bool g_use_extent_table = false;
33 uint64_t g_copied_clusters_count = 0;
34 
35 struct spdk_bs_super_block_ver1 {
36 	uint8_t		signature[8];
37 	uint32_t        version;
38 	uint32_t        length;
39 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
40 	spdk_blob_id	super_blob;
41 
42 	uint32_t	cluster_size; /* In bytes */
43 
44 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
45 	uint32_t	used_page_mask_len; /* Count, in pages */
46 
47 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
48 	uint32_t	used_cluster_mask_len; /* Count, in pages */
49 
50 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
51 	uint32_t	md_len; /* Count, in pages */
52 
53 	uint8_t		reserved[4036];
54 	uint32_t	crc;
55 } __attribute__((packed));
56 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
57 
58 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
59 		struct spdk_blob_opts *blob_opts);
60 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
61 static void suite_blob_setup(void);
62 static void suite_blob_cleanup(void);
63 
64 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
65 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
66 		void *cpl_cb_arg), 0);
67 
68 static bool
69 is_esnap_clone(struct spdk_blob *_blob, const void *id, size_t id_len)
70 {
71 	const void *val = NULL;
72 	size_t len = 0;
73 	bool c0, c1, c2, c3;
74 
75 	CU_ASSERT(blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
76 				       true) == 0);
77 	CU_ASSERT((c0 = (len == id_len)));
78 	CU_ASSERT((c1 = (val != NULL && memcmp(val, id, len) == 0)));
79 	CU_ASSERT((c2 = !!(_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT)));
80 	CU_ASSERT((c3 = (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
81 
82 	return c0 && c1 && c2 && c3;
83 }
84 
85 static bool
86 is_not_esnap_clone(struct spdk_blob *_blob)
87 {
88 	const void *val = NULL;
89 	size_t len = 0;
90 	bool c1, c2, c3, c4;
91 
92 	CU_ASSERT((c1 = (blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
93 					      true) == -ENOENT)));
94 	CU_ASSERT((c2 = (val == NULL)));
95 	CU_ASSERT((c3 = ((_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT) == 0)));
96 	CU_ASSERT((c4 = (_blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
97 
98 	return c1 && c2 && c3 && c4;
99 }
100 
101 #define UT_ASSERT_IS_ESNAP_CLONE(_blob, _id, _len) CU_ASSERT(is_esnap_clone(_blob, _id, _len))
102 #define UT_ASSERT_IS_NOT_ESNAP_CLONE(_blob) CU_ASSERT(is_not_esnap_clone(_blob))
103 
104 static void
105 _get_xattr_value(void *arg, const char *name,
106 		 const void **value, size_t *value_len)
107 {
108 	uint64_t i;
109 
110 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
111 	SPDK_CU_ASSERT_FATAL(value != NULL);
112 	CU_ASSERT(arg == &g_ctx);
113 
114 	for (i = 0; i < sizeof(g_xattr_names); i++) {
115 		if (!strcmp(name, g_xattr_names[i])) {
116 			*value_len = strlen(g_xattr_values[i]);
117 			*value = g_xattr_values[i];
118 			break;
119 		}
120 	}
121 }
122 
123 static void
124 _get_xattr_value_null(void *arg, const char *name,
125 		      const void **value, size_t *value_len)
126 {
127 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
128 	SPDK_CU_ASSERT_FATAL(value != NULL);
129 	CU_ASSERT(arg == NULL);
130 
131 	*value_len = 0;
132 	*value = NULL;
133 }
134 
135 static int
136 _get_snapshots_count(struct spdk_blob_store *bs)
137 {
138 	struct spdk_blob_list *snapshot = NULL;
139 	int count = 0;
140 
141 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
142 		count += 1;
143 	}
144 
145 	return count;
146 }
147 
148 static void
149 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
150 {
151 	spdk_blob_opts_init(opts, sizeof(*opts));
152 	opts->use_extent_table = g_use_extent_table;
153 }
154 
155 static void
156 bs_op_complete(void *cb_arg, int bserrno)
157 {
158 	g_bserrno = bserrno;
159 }
160 
161 static void
162 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
163 			   int bserrno)
164 {
165 	g_bs = bs;
166 	g_bserrno = bserrno;
167 }
168 
169 static void
170 blob_op_complete(void *cb_arg, int bserrno)
171 {
172 	if (cb_arg != NULL) {
173 		int *errp = cb_arg;
174 
175 		*errp = bserrno;
176 	}
177 	g_bserrno = bserrno;
178 }
179 
180 static void
181 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
182 {
183 	g_blobid = blobid;
184 	g_bserrno = bserrno;
185 }
186 
187 static void
188 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
189 {
190 	g_blob = blb;
191 	g_bserrno = bserrno;
192 }
193 
194 static void
195 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
196 {
197 	if (g_blob == NULL) {
198 		g_blob = blob;
199 		g_bserrno = bserrno;
200 	} else {
201 		g_blob2 = blob;
202 		g_bserrno2 = bserrno;
203 	}
204 }
205 
206 static void
207 blob_shallow_copy_status_cb(uint64_t copied_clusters, void *cb_arg)
208 {
209 	g_copied_clusters_count = copied_clusters;
210 }
211 
212 static void
213 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
214 {
215 	struct spdk_bs_dev *dev;
216 
217 	/* Unload the blob store */
218 	spdk_bs_unload(*bs, bs_op_complete, NULL);
219 	poll_threads();
220 	CU_ASSERT(g_bserrno == 0);
221 
222 	dev = init_dev();
223 	/* Load an existing blob store */
224 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
225 	poll_threads();
226 	CU_ASSERT(g_bserrno == 0);
227 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
228 	*bs = g_bs;
229 
230 	g_bserrno = -1;
231 }
232 
233 static void
234 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
235 {
236 	struct spdk_bs_dev *dev;
237 
238 	/* Dirty shutdown */
239 	bs_free(*bs);
240 
241 	dev = init_dev();
242 	/* Load an existing blob store */
243 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
244 	poll_threads();
245 	CU_ASSERT(g_bserrno == 0);
246 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
247 	*bs = g_bs;
248 
249 	g_bserrno = -1;
250 }
251 
252 static void
253 blob_init(void)
254 {
255 	struct spdk_blob_store *bs;
256 	struct spdk_bs_dev *dev;
257 
258 	dev = init_dev();
259 
260 	/* should fail for an unsupported blocklen */
261 	dev->blocklen = 500;
262 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
263 	poll_threads();
264 	CU_ASSERT(g_bserrno == -EINVAL);
265 
266 	dev = init_dev();
267 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
268 	poll_threads();
269 	CU_ASSERT(g_bserrno == 0);
270 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
271 	bs = g_bs;
272 
273 	spdk_bs_unload(bs, bs_op_complete, NULL);
274 	poll_threads();
275 	CU_ASSERT(g_bserrno == 0);
276 	g_bs = NULL;
277 }
278 
279 static void
280 blob_super(void)
281 {
282 	struct spdk_blob_store *bs = g_bs;
283 	spdk_blob_id blobid;
284 	struct spdk_blob_opts blob_opts;
285 
286 	/* Get the super blob without having set one */
287 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
288 	poll_threads();
289 	CU_ASSERT(g_bserrno == -ENOENT);
290 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
291 
292 	/* Create a blob */
293 	ut_spdk_blob_opts_init(&blob_opts);
294 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
295 	poll_threads();
296 	CU_ASSERT(g_bserrno == 0);
297 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
298 	blobid = g_blobid;
299 
300 	/* Set the blob as the super blob */
301 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
302 	poll_threads();
303 	CU_ASSERT(g_bserrno == 0);
304 
305 	/* Get the super blob */
306 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
307 	poll_threads();
308 	CU_ASSERT(g_bserrno == 0);
309 	CU_ASSERT(blobid == g_blobid);
310 }
311 
312 static void
313 blob_open(void)
314 {
315 	struct spdk_blob_store *bs = g_bs;
316 	struct spdk_blob *blob;
317 	struct spdk_blob_opts blob_opts;
318 	spdk_blob_id blobid, blobid2;
319 
320 	ut_spdk_blob_opts_init(&blob_opts);
321 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
322 	poll_threads();
323 	CU_ASSERT(g_bserrno == 0);
324 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
325 	blobid = g_blobid;
326 
327 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
328 	poll_threads();
329 	CU_ASSERT(g_bserrno == 0);
330 	CU_ASSERT(g_blob != NULL);
331 	blob = g_blob;
332 
333 	blobid2 = spdk_blob_get_id(blob);
334 	CU_ASSERT(blobid == blobid2);
335 
336 	/* Try to open file again.  It should return success. */
337 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
338 	poll_threads();
339 	CU_ASSERT(g_bserrno == 0);
340 	CU_ASSERT(blob == g_blob);
341 
342 	spdk_blob_close(blob, blob_op_complete, NULL);
343 	poll_threads();
344 	CU_ASSERT(g_bserrno == 0);
345 
346 	/*
347 	 * Close the file a second time, releasing the second reference.  This
348 	 *  should succeed.
349 	 */
350 	blob = g_blob;
351 	spdk_blob_close(blob, blob_op_complete, NULL);
352 	poll_threads();
353 	CU_ASSERT(g_bserrno == 0);
354 
355 	/*
356 	 * Try to open file again.  It should succeed.  This tests the case
357 	 *  where the file is opened, closed, then re-opened again.
358 	 */
359 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
360 	poll_threads();
361 	CU_ASSERT(g_bserrno == 0);
362 	CU_ASSERT(g_blob != NULL);
363 	blob = g_blob;
364 	spdk_blob_close(blob, blob_op_complete, NULL);
365 	poll_threads();
366 	CU_ASSERT(g_bserrno == 0);
367 
368 	/* Try to open file twice in succession.  This should return the same
369 	 * blob object.
370 	 */
371 	g_blob = NULL;
372 	g_blob2 = NULL;
373 	g_bserrno = -1;
374 	g_bserrno2 = -1;
375 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
376 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
377 	poll_threads();
378 	CU_ASSERT(g_bserrno == 0);
379 	CU_ASSERT(g_bserrno2 == 0);
380 	CU_ASSERT(g_blob != NULL);
381 	CU_ASSERT(g_blob2 != NULL);
382 	CU_ASSERT(g_blob == g_blob2);
383 
384 	g_bserrno = -1;
385 	spdk_blob_close(g_blob, blob_op_complete, NULL);
386 	poll_threads();
387 	CU_ASSERT(g_bserrno == 0);
388 
389 	ut_blob_close_and_delete(bs, g_blob);
390 }
391 
392 static void
393 blob_create(void)
394 {
395 	struct spdk_blob_store *bs = g_bs;
396 	struct spdk_blob *blob;
397 	struct spdk_blob_opts opts;
398 	spdk_blob_id blobid;
399 
400 	/* Create blob with 10 clusters */
401 
402 	ut_spdk_blob_opts_init(&opts);
403 	opts.num_clusters = 10;
404 
405 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
406 	poll_threads();
407 	CU_ASSERT(g_bserrno == 0);
408 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
409 	blobid = g_blobid;
410 
411 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
412 	poll_threads();
413 	CU_ASSERT(g_bserrno == 0);
414 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
415 	blob = g_blob;
416 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
417 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
418 
419 	spdk_blob_close(blob, blob_op_complete, NULL);
420 	poll_threads();
421 	CU_ASSERT(g_bserrno == 0);
422 
423 	/* Create blob with 0 clusters */
424 
425 	ut_spdk_blob_opts_init(&opts);
426 	opts.num_clusters = 0;
427 
428 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
429 	poll_threads();
430 	CU_ASSERT(g_bserrno == 0);
431 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
432 	blobid = g_blobid;
433 
434 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
435 	poll_threads();
436 	CU_ASSERT(g_bserrno == 0);
437 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
438 	blob = g_blob;
439 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
440 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
441 
442 	spdk_blob_close(blob, blob_op_complete, NULL);
443 	poll_threads();
444 	CU_ASSERT(g_bserrno == 0);
445 
446 	/* Create blob with default options (opts == NULL) */
447 
448 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
449 	poll_threads();
450 	CU_ASSERT(g_bserrno == 0);
451 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
452 	blobid = g_blobid;
453 
454 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
455 	poll_threads();
456 	CU_ASSERT(g_bserrno == 0);
457 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
458 	blob = g_blob;
459 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
460 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
461 
462 	spdk_blob_close(blob, blob_op_complete, NULL);
463 	poll_threads();
464 	CU_ASSERT(g_bserrno == 0);
465 
466 	/* Try to create blob with size larger than blobstore */
467 
468 	ut_spdk_blob_opts_init(&opts);
469 	opts.num_clusters = bs->total_clusters + 1;
470 
471 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
472 	poll_threads();
473 	CU_ASSERT(g_bserrno == -ENOSPC);
474 }
475 
476 static void
477 blob_create_zero_extent(void)
478 {
479 	struct spdk_blob_store *bs = g_bs;
480 	struct spdk_blob *blob;
481 	spdk_blob_id blobid;
482 
483 	/* Create blob with default options (opts == NULL) */
484 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
485 	poll_threads();
486 	CU_ASSERT(g_bserrno == 0);
487 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
488 	blobid = g_blobid;
489 
490 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
491 	poll_threads();
492 	CU_ASSERT(g_bserrno == 0);
493 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
494 	blob = g_blob;
495 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
496 	CU_ASSERT(blob->extent_table_found == true);
497 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
498 	CU_ASSERT(blob->active.extent_pages == NULL);
499 
500 	spdk_blob_close(blob, blob_op_complete, NULL);
501 	poll_threads();
502 	CU_ASSERT(g_bserrno == 0);
503 
504 	/* Create blob with NULL internal options  */
505 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
506 	poll_threads();
507 	CU_ASSERT(g_bserrno == 0);
508 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
509 	blobid = g_blobid;
510 
511 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
512 	poll_threads();
513 	CU_ASSERT(g_bserrno == 0);
514 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
515 	blob = g_blob;
516 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
517 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
518 	CU_ASSERT(blob->extent_table_found == true);
519 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
520 	CU_ASSERT(blob->active.extent_pages == NULL);
521 
522 	spdk_blob_close(blob, blob_op_complete, NULL);
523 	poll_threads();
524 	CU_ASSERT(g_bserrno == 0);
525 }
526 
527 /*
528  * Create and delete one blob in a loop over and over again.  This helps ensure
529  * that the internal bit masks tracking used clusters and md_pages are being
530  * tracked correctly.
531  */
532 static void
533 blob_create_loop(void)
534 {
535 	struct spdk_blob_store *bs = g_bs;
536 	struct spdk_blob_opts opts;
537 	uint32_t i, loop_count;
538 
539 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
540 				  spdk_bit_pool_capacity(bs->used_clusters));
541 
542 	for (i = 0; i < loop_count; i++) {
543 		ut_spdk_blob_opts_init(&opts);
544 		opts.num_clusters = 1;
545 		g_bserrno = -1;
546 		g_blobid = SPDK_BLOBID_INVALID;
547 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
548 		poll_threads();
549 		CU_ASSERT(g_bserrno == 0);
550 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
551 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
552 		poll_threads();
553 		CU_ASSERT(g_bserrno == 0);
554 	}
555 }
556 
557 static void
558 blob_create_fail(void)
559 {
560 	struct spdk_blob_store *bs = g_bs;
561 	struct spdk_blob_opts opts;
562 	spdk_blob_id blobid;
563 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
564 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
565 
566 	/* NULL callback */
567 	ut_spdk_blob_opts_init(&opts);
568 	opts.xattrs.names = g_xattr_names;
569 	opts.xattrs.get_value = NULL;
570 	opts.xattrs.count = 1;
571 	opts.xattrs.ctx = &g_ctx;
572 
573 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
574 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
575 	poll_threads();
576 	CU_ASSERT(g_bserrno == -EINVAL);
577 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
578 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
579 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
580 
581 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
582 	poll_threads();
583 	CU_ASSERT(g_bserrno == -ENOENT);
584 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
585 
586 	ut_bs_reload(&bs, NULL);
587 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
588 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
589 
590 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
591 	poll_threads();
592 	CU_ASSERT(g_blob == NULL);
593 	CU_ASSERT(g_bserrno == -ENOENT);
594 }
595 
596 static void
597 blob_create_internal(void)
598 {
599 	struct spdk_blob_store *bs = g_bs;
600 	struct spdk_blob *blob;
601 	struct spdk_blob_opts opts;
602 	struct spdk_blob_xattr_opts internal_xattrs;
603 	const void *value;
604 	size_t value_len;
605 	spdk_blob_id blobid;
606 	int rc;
607 
608 	/* Create blob with custom xattrs */
609 
610 	ut_spdk_blob_opts_init(&opts);
611 	blob_xattrs_init(&internal_xattrs);
612 	internal_xattrs.count = 3;
613 	internal_xattrs.names = g_xattr_names;
614 	internal_xattrs.get_value = _get_xattr_value;
615 	internal_xattrs.ctx = &g_ctx;
616 
617 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
618 	poll_threads();
619 	CU_ASSERT(g_bserrno == 0);
620 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
621 	blobid = g_blobid;
622 
623 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
624 	poll_threads();
625 	CU_ASSERT(g_bserrno == 0);
626 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
627 	blob = g_blob;
628 
629 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
630 	CU_ASSERT(rc == 0);
631 	SPDK_CU_ASSERT_FATAL(value != NULL);
632 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
633 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
634 
635 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
636 	CU_ASSERT(rc == 0);
637 	SPDK_CU_ASSERT_FATAL(value != NULL);
638 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
639 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
640 
641 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
642 	CU_ASSERT(rc == 0);
643 	SPDK_CU_ASSERT_FATAL(value != NULL);
644 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
645 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
646 
647 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
648 	CU_ASSERT(rc != 0);
649 
650 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
651 	CU_ASSERT(rc != 0);
652 
653 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
654 	CU_ASSERT(rc != 0);
655 
656 	spdk_blob_close(blob, blob_op_complete, NULL);
657 	poll_threads();
658 	CU_ASSERT(g_bserrno == 0);
659 
660 	/* Create blob with NULL internal options  */
661 
662 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
663 	poll_threads();
664 	CU_ASSERT(g_bserrno == 0);
665 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
666 	blobid = g_blobid;
667 
668 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
669 	poll_threads();
670 	CU_ASSERT(g_bserrno == 0);
671 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
672 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
673 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
674 
675 	blob = g_blob;
676 
677 	spdk_blob_close(blob, blob_op_complete, NULL);
678 	poll_threads();
679 	CU_ASSERT(g_bserrno == 0);
680 }
681 
682 static void
683 blob_thin_provision(void)
684 {
685 	struct spdk_blob_store *bs;
686 	struct spdk_bs_dev *dev;
687 	struct spdk_blob *blob;
688 	struct spdk_blob_opts opts;
689 	struct spdk_bs_opts bs_opts;
690 	spdk_blob_id blobid;
691 
692 	dev = init_dev();
693 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
694 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
695 
696 	/* Initialize a new blob store */
697 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
698 	poll_threads();
699 	CU_ASSERT(g_bserrno == 0);
700 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
701 
702 	bs = g_bs;
703 
704 	/* Create blob with thin provisioning enabled */
705 
706 	ut_spdk_blob_opts_init(&opts);
707 	opts.thin_provision = true;
708 	opts.num_clusters = 10;
709 
710 	blob = ut_blob_create_and_open(bs, &opts);
711 	blobid = spdk_blob_get_id(blob);
712 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
713 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
714 	/* In thin provisioning with num_clusters is set, if not using the
715 	 * extent table, there is no allocation. If extent table is used,
716 	 * there is related allocation happened. */
717 	if (blob->extent_table_found == true) {
718 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
719 		CU_ASSERT(blob->active.extent_pages != NULL);
720 	} else {
721 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
722 		CU_ASSERT(blob->active.extent_pages == NULL);
723 	}
724 
725 	spdk_blob_close(blob, blob_op_complete, NULL);
726 	CU_ASSERT(g_bserrno == 0);
727 
728 	/* Do not shut down cleanly.  This makes sure that when we load again
729 	 *  and try to recover a valid used_cluster map, that blobstore will
730 	 *  ignore clusters with index 0 since these are unallocated clusters.
731 	 */
732 	ut_bs_dirty_load(&bs, &bs_opts);
733 
734 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
735 	poll_threads();
736 	CU_ASSERT(g_bserrno == 0);
737 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
738 	blob = g_blob;
739 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
740 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
741 
742 	ut_blob_close_and_delete(bs, blob);
743 
744 	spdk_bs_unload(bs, bs_op_complete, NULL);
745 	poll_threads();
746 	CU_ASSERT(g_bserrno == 0);
747 	g_bs = NULL;
748 }
749 
750 static void
751 blob_snapshot(void)
752 {
753 	struct spdk_blob_store *bs = g_bs;
754 	struct spdk_blob *blob;
755 	struct spdk_blob *snapshot, *snapshot2;
756 	struct spdk_blob_bs_dev *blob_bs_dev;
757 	struct spdk_blob_opts opts;
758 	struct spdk_blob_xattr_opts xattrs;
759 	spdk_blob_id blobid;
760 	spdk_blob_id snapshotid;
761 	spdk_blob_id snapshotid2;
762 	const void *value;
763 	size_t value_len;
764 	int rc;
765 	spdk_blob_id ids[2];
766 	size_t count;
767 
768 	/* Create blob with 10 clusters */
769 	ut_spdk_blob_opts_init(&opts);
770 	opts.num_clusters = 10;
771 
772 	blob = ut_blob_create_and_open(bs, &opts);
773 	blobid = spdk_blob_get_id(blob);
774 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
775 
776 	/* Create snapshot from blob */
777 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
778 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
779 	poll_threads();
780 	CU_ASSERT(g_bserrno == 0);
781 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
782 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
783 	snapshotid = g_blobid;
784 
785 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
786 	poll_threads();
787 	CU_ASSERT(g_bserrno == 0);
788 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
789 	snapshot = g_blob;
790 	CU_ASSERT(snapshot->data_ro == true);
791 	CU_ASSERT(snapshot->md_ro == true);
792 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
793 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
794 
795 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
796 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
797 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
798 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
799 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
800 
801 	/* Try to create snapshot from clone with xattrs */
802 	xattrs.names = g_xattr_names;
803 	xattrs.get_value = _get_xattr_value;
804 	xattrs.count = 3;
805 	xattrs.ctx = &g_ctx;
806 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
807 	poll_threads();
808 	CU_ASSERT(g_bserrno == 0);
809 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
810 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
811 	snapshotid2 = g_blobid;
812 
813 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
814 	CU_ASSERT(g_bserrno == 0);
815 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
816 	snapshot2 = g_blob;
817 	CU_ASSERT(snapshot2->data_ro == true);
818 	CU_ASSERT(snapshot2->md_ro == true);
819 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
820 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
821 
822 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
823 	CU_ASSERT(snapshot->back_bs_dev == NULL);
824 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
825 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
826 
827 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
828 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
829 
830 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
831 	CU_ASSERT(blob_bs_dev->blob == snapshot);
832 
833 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
834 	CU_ASSERT(rc == 0);
835 	SPDK_CU_ASSERT_FATAL(value != NULL);
836 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
837 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
838 
839 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
840 	CU_ASSERT(rc == 0);
841 	SPDK_CU_ASSERT_FATAL(value != NULL);
842 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
843 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
844 
845 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
846 	CU_ASSERT(rc == 0);
847 	SPDK_CU_ASSERT_FATAL(value != NULL);
848 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
849 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
850 
851 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
852 	count = 2;
853 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
854 	CU_ASSERT(count == 1);
855 	CU_ASSERT(ids[0] == blobid);
856 
857 	count = 2;
858 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
859 	CU_ASSERT(count == 1);
860 	CU_ASSERT(ids[0] == snapshotid2);
861 
862 	/* Try to create snapshot from snapshot */
863 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
864 	poll_threads();
865 	CU_ASSERT(g_bserrno == -EINVAL);
866 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
867 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
868 
869 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
870 	ut_blob_close_and_delete(bs, blob);
871 	count = 2;
872 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
873 	CU_ASSERT(count == 0);
874 
875 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
876 	ut_blob_close_and_delete(bs, snapshot2);
877 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
878 	count = 2;
879 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
880 	CU_ASSERT(count == 0);
881 
882 	ut_blob_close_and_delete(bs, snapshot);
883 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
884 }
885 
886 static void
887 blob_snapshot_freeze_io(void)
888 {
889 	struct spdk_io_channel *channel;
890 	struct spdk_bs_channel *bs_channel;
891 	struct spdk_blob_store *bs = g_bs;
892 	struct spdk_blob *blob;
893 	struct spdk_blob_opts opts;
894 	spdk_blob_id blobid;
895 	uint32_t num_of_pages = 10;
896 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
897 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
898 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
899 
900 	memset(payload_write, 0xE5, sizeof(payload_write));
901 	memset(payload_read, 0x00, sizeof(payload_read));
902 	memset(payload_zero, 0x00, sizeof(payload_zero));
903 
904 	/* Test freeze I/O during snapshot */
905 	channel = spdk_bs_alloc_io_channel(bs);
906 	bs_channel = spdk_io_channel_get_ctx(channel);
907 
908 	/* Create blob with 10 clusters */
909 	ut_spdk_blob_opts_init(&opts);
910 	opts.num_clusters = 10;
911 	opts.thin_provision = false;
912 
913 	blob = ut_blob_create_and_open(bs, &opts);
914 	blobid = spdk_blob_get_id(blob);
915 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
916 
917 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
918 
919 	/* This is implementation specific.
920 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
921 	 * Four async I/O operations happen before that. */
922 	poll_thread_times(0, 5);
923 
924 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
925 
926 	/* Blob I/O should be frozen here */
927 	CU_ASSERT(blob->frozen_refcnt == 1);
928 
929 	/* Write to the blob */
930 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
931 
932 	/* Verify that I/O is queued */
933 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
934 	/* Verify that payload is not written to disk, at this point the blobs already switched */
935 	CU_ASSERT(blob->active.clusters[0] == 0);
936 
937 	/* Finish all operations including spdk_bs_create_snapshot */
938 	poll_threads();
939 
940 	/* Verify snapshot */
941 	CU_ASSERT(g_bserrno == 0);
942 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
943 
944 	/* Verify that blob has unset frozen_io */
945 	CU_ASSERT(blob->frozen_refcnt == 0);
946 
947 	/* Verify that postponed I/O completed successfully by comparing payload */
948 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
949 	poll_threads();
950 	CU_ASSERT(g_bserrno == 0);
951 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
952 
953 	spdk_bs_free_io_channel(channel);
954 	poll_threads();
955 
956 	ut_blob_close_and_delete(bs, blob);
957 }
958 
959 static void
960 blob_clone(void)
961 {
962 	struct spdk_blob_store *bs = g_bs;
963 	struct spdk_blob_opts opts;
964 	struct spdk_blob *blob, *snapshot, *clone;
965 	spdk_blob_id blobid, cloneid, snapshotid;
966 	struct spdk_blob_xattr_opts xattrs;
967 	const void *value;
968 	size_t value_len;
969 	int rc;
970 
971 	/* Create blob with 10 clusters */
972 
973 	ut_spdk_blob_opts_init(&opts);
974 	opts.num_clusters = 10;
975 
976 	blob = ut_blob_create_and_open(bs, &opts);
977 	blobid = spdk_blob_get_id(blob);
978 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
979 
980 	/* Create snapshot */
981 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
982 	poll_threads();
983 	CU_ASSERT(g_bserrno == 0);
984 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
985 	snapshotid = g_blobid;
986 
987 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
988 	poll_threads();
989 	CU_ASSERT(g_bserrno == 0);
990 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
991 	snapshot = g_blob;
992 	CU_ASSERT(snapshot->data_ro == true);
993 	CU_ASSERT(snapshot->md_ro == true);
994 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
995 
996 	spdk_blob_close(snapshot, blob_op_complete, NULL);
997 	poll_threads();
998 	CU_ASSERT(g_bserrno == 0);
999 
1000 	/* Create clone from snapshot with xattrs */
1001 	xattrs.names = g_xattr_names;
1002 	xattrs.get_value = _get_xattr_value;
1003 	xattrs.count = 3;
1004 	xattrs.ctx = &g_ctx;
1005 
1006 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
1007 	poll_threads();
1008 	CU_ASSERT(g_bserrno == 0);
1009 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1010 	cloneid = g_blobid;
1011 
1012 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1013 	poll_threads();
1014 	CU_ASSERT(g_bserrno == 0);
1015 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1016 	clone = g_blob;
1017 	CU_ASSERT(clone->data_ro == false);
1018 	CU_ASSERT(clone->md_ro == false);
1019 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1020 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(clone) == 0);
1021 
1022 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
1023 	CU_ASSERT(rc == 0);
1024 	SPDK_CU_ASSERT_FATAL(value != NULL);
1025 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
1026 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
1027 
1028 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
1029 	CU_ASSERT(rc == 0);
1030 	SPDK_CU_ASSERT_FATAL(value != NULL);
1031 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1032 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1033 
1034 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1035 	CU_ASSERT(rc == 0);
1036 	SPDK_CU_ASSERT_FATAL(value != NULL);
1037 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1038 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1039 
1040 
1041 	spdk_blob_close(clone, blob_op_complete, NULL);
1042 	poll_threads();
1043 	CU_ASSERT(g_bserrno == 0);
1044 
1045 	/* Try to create clone from not read only blob */
1046 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1047 	poll_threads();
1048 	CU_ASSERT(g_bserrno == -EINVAL);
1049 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1050 
1051 	/* Mark blob as read only */
1052 	spdk_blob_set_read_only(blob);
1053 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1054 	poll_threads();
1055 	CU_ASSERT(g_bserrno == 0);
1056 
1057 	/* Create clone from read only blob */
1058 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1059 	poll_threads();
1060 	CU_ASSERT(g_bserrno == 0);
1061 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1062 	cloneid = g_blobid;
1063 
1064 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1065 	poll_threads();
1066 	CU_ASSERT(g_bserrno == 0);
1067 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1068 	clone = g_blob;
1069 	CU_ASSERT(clone->data_ro == false);
1070 	CU_ASSERT(clone->md_ro == false);
1071 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1072 
1073 	ut_blob_close_and_delete(bs, clone);
1074 	ut_blob_close_and_delete(bs, blob);
1075 }
1076 
1077 static void
1078 _blob_inflate(bool decouple_parent)
1079 {
1080 	struct spdk_blob_store *bs = g_bs;
1081 	struct spdk_blob_opts opts;
1082 	struct spdk_blob *blob, *snapshot;
1083 	spdk_blob_id blobid, snapshotid;
1084 	struct spdk_io_channel *channel;
1085 	uint64_t free_clusters;
1086 
1087 	channel = spdk_bs_alloc_io_channel(bs);
1088 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1089 
1090 	/* Create blob with 10 clusters */
1091 
1092 	ut_spdk_blob_opts_init(&opts);
1093 	opts.num_clusters = 10;
1094 	opts.thin_provision = true;
1095 
1096 	blob = ut_blob_create_and_open(bs, &opts);
1097 	blobid = spdk_blob_get_id(blob);
1098 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1099 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1100 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1101 
1102 	/* 1) Blob with no parent */
1103 	if (decouple_parent) {
1104 		/* Decouple parent of blob with no parent (should fail) */
1105 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1106 		poll_threads();
1107 		CU_ASSERT(g_bserrno != 0);
1108 	} else {
1109 		/* Inflate of thin blob with no parent should made it thick */
1110 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1111 		poll_threads();
1112 		CU_ASSERT(g_bserrno == 0);
1113 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1114 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1115 	}
1116 
1117 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1118 	poll_threads();
1119 	CU_ASSERT(g_bserrno == 0);
1120 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1121 	snapshotid = g_blobid;
1122 
1123 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1124 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1125 
1126 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1127 	poll_threads();
1128 	CU_ASSERT(g_bserrno == 0);
1129 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1130 	snapshot = g_blob;
1131 	CU_ASSERT(snapshot->data_ro == true);
1132 	CU_ASSERT(snapshot->md_ro == true);
1133 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1134 
1135 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1136 	poll_threads();
1137 	CU_ASSERT(g_bserrno == 0);
1138 
1139 	free_clusters = spdk_bs_free_cluster_count(bs);
1140 
1141 	/* 2) Blob with parent */
1142 	if (!decouple_parent) {
1143 		/* Do full blob inflation */
1144 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1145 		poll_threads();
1146 		CU_ASSERT(g_bserrno == 0);
1147 		/* all 10 clusters should be allocated */
1148 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1149 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1150 	} else {
1151 		/* Decouple parent of blob */
1152 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1153 		poll_threads();
1154 		CU_ASSERT(g_bserrno == 0);
1155 		/* when only parent is removed, none of the clusters should be allocated */
1156 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1157 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1158 	}
1159 
1160 	/* Now, it should be possible to delete snapshot */
1161 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1162 	poll_threads();
1163 	CU_ASSERT(g_bserrno == 0);
1164 
1165 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1166 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1167 
1168 	spdk_bs_free_io_channel(channel);
1169 	poll_threads();
1170 
1171 	ut_blob_close_and_delete(bs, blob);
1172 }
1173 
1174 static void
1175 blob_inflate(void)
1176 {
1177 	_blob_inflate(false);
1178 	_blob_inflate(true);
1179 }
1180 
1181 static void
1182 blob_delete(void)
1183 {
1184 	struct spdk_blob_store *bs = g_bs;
1185 	struct spdk_blob_opts blob_opts;
1186 	spdk_blob_id blobid;
1187 
1188 	/* Create a blob and then delete it. */
1189 	ut_spdk_blob_opts_init(&blob_opts);
1190 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1191 	poll_threads();
1192 	CU_ASSERT(g_bserrno == 0);
1193 	CU_ASSERT(g_blobid > 0);
1194 	blobid = g_blobid;
1195 
1196 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1197 	poll_threads();
1198 	CU_ASSERT(g_bserrno == 0);
1199 
1200 	/* Try to open the blob */
1201 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1202 	poll_threads();
1203 	CU_ASSERT(g_bserrno == -ENOENT);
1204 }
1205 
1206 static void
1207 blob_resize_test(void)
1208 {
1209 	struct spdk_blob_store *bs = g_bs;
1210 	struct spdk_blob *blob;
1211 	uint64_t free_clusters;
1212 
1213 	free_clusters = spdk_bs_free_cluster_count(bs);
1214 
1215 	blob = ut_blob_create_and_open(bs, NULL);
1216 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1217 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1218 
1219 	/* Confirm that resize fails if blob is marked read-only. */
1220 	blob->md_ro = true;
1221 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1222 	poll_threads();
1223 	CU_ASSERT(g_bserrno == -EPERM);
1224 	blob->md_ro = false;
1225 
1226 	/* The blob started at 0 clusters. Resize it to be 5. */
1227 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1228 	poll_threads();
1229 	CU_ASSERT(g_bserrno == 0);
1230 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1231 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 5);
1232 
1233 	/* Shrink the blob to 3 clusters. This will not actually release
1234 	 * the old clusters until the blob is synced.
1235 	 */
1236 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1237 	poll_threads();
1238 	CU_ASSERT(g_bserrno == 0);
1239 	/* Verify there are still 5 clusters in use */
1240 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1241 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 3);
1242 
1243 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1244 	poll_threads();
1245 	CU_ASSERT(g_bserrno == 0);
1246 	/* Now there are only 3 clusters in use */
1247 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1248 
1249 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1250 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1251 	poll_threads();
1252 	CU_ASSERT(g_bserrno == 0);
1253 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1254 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1255 
1256 	/* Try to resize the blob to size larger than blobstore. */
1257 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1258 	poll_threads();
1259 	CU_ASSERT(g_bserrno == -ENOSPC);
1260 
1261 	ut_blob_close_and_delete(bs, blob);
1262 }
1263 
1264 static void
1265 blob_resize_thin_test(void)
1266 {
1267 	struct spdk_blob_store *bs = g_bs;
1268 	struct spdk_blob *blob;
1269 	struct spdk_blob_opts opts;
1270 	struct spdk_io_channel *blob_ch;
1271 	uint64_t free_clusters;
1272 	uint64_t io_units_per_cluster;
1273 	uint64_t offset;
1274 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
1275 
1276 	free_clusters = spdk_bs_free_cluster_count(bs);
1277 
1278 	blob_ch = spdk_bs_alloc_io_channel(bs);
1279 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
1280 
1281 	/* Create blob with thin provisioning enabled */
1282 	ut_spdk_blob_opts_init(&opts);
1283 	opts.thin_provision = true;
1284 	opts.num_clusters = 0;
1285 
1286 	blob = ut_blob_create_and_open(bs, &opts);
1287 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1288 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1289 	io_units_per_cluster = bs_io_units_per_cluster(blob);
1290 
1291 	/* The blob started at 0 clusters. Resize it to be 6. */
1292 	spdk_blob_resize(blob, 6, blob_op_complete, NULL);
1293 	poll_threads();
1294 	CU_ASSERT(g_bserrno == 0);
1295 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1296 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1297 
1298 	/* Write on cluster 0,2,4 and 5 of blob */
1299 	for (offset = 0; offset < io_units_per_cluster; offset++) {
1300 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1301 		poll_threads();
1302 		CU_ASSERT(g_bserrno == 0);
1303 	}
1304 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
1305 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1306 		poll_threads();
1307 		CU_ASSERT(g_bserrno == 0);
1308 	}
1309 	for (offset = 4 * io_units_per_cluster; offset < 5 * io_units_per_cluster; offset++) {
1310 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1311 		poll_threads();
1312 		CU_ASSERT(g_bserrno == 0);
1313 	}
1314 	for (offset = 5 * io_units_per_cluster; offset < 6 * io_units_per_cluster; offset++) {
1315 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1316 		poll_threads();
1317 		CU_ASSERT(g_bserrno == 0);
1318 	}
1319 
1320 	/* Check allocated clusters after write */
1321 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1322 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 4);
1323 
1324 	/* Shrink the blob to 2 clusters. This will not actually release
1325 	 * the old clusters until the blob is synced.
1326 	 */
1327 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1328 	poll_threads();
1329 	CU_ASSERT(g_bserrno == 0);
1330 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1331 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1332 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1333 
1334 	/* Sync blob: 4 clusters were truncated but only 3 of them was allocated */
1335 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1336 	poll_threads();
1337 	CU_ASSERT(g_bserrno == 0);
1338 	CU_ASSERT((free_clusters - 1) == spdk_bs_free_cluster_count(bs));
1339 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1340 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1341 
1342 	spdk_bs_free_io_channel(blob_ch);
1343 	ut_blob_close_and_delete(bs, blob);
1344 }
1345 
1346 static void
1347 blob_read_only(void)
1348 {
1349 	struct spdk_blob_store *bs;
1350 	struct spdk_bs_dev *dev;
1351 	struct spdk_blob *blob;
1352 	struct spdk_bs_opts opts;
1353 	spdk_blob_id blobid;
1354 	int rc;
1355 
1356 	dev = init_dev();
1357 	spdk_bs_opts_init(&opts, sizeof(opts));
1358 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1359 
1360 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1361 	poll_threads();
1362 	CU_ASSERT(g_bserrno == 0);
1363 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1364 	bs = g_bs;
1365 
1366 	blob = ut_blob_create_and_open(bs, NULL);
1367 	blobid = spdk_blob_get_id(blob);
1368 
1369 	rc = spdk_blob_set_read_only(blob);
1370 	CU_ASSERT(rc == 0);
1371 
1372 	CU_ASSERT(blob->data_ro == false);
1373 	CU_ASSERT(blob->md_ro == false);
1374 
1375 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1376 	poll_threads();
1377 
1378 	CU_ASSERT(blob->data_ro == true);
1379 	CU_ASSERT(blob->md_ro == true);
1380 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1381 
1382 	spdk_blob_close(blob, blob_op_complete, NULL);
1383 	poll_threads();
1384 	CU_ASSERT(g_bserrno == 0);
1385 
1386 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1387 	poll_threads();
1388 	CU_ASSERT(g_bserrno == 0);
1389 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1390 	blob = g_blob;
1391 
1392 	CU_ASSERT(blob->data_ro == true);
1393 	CU_ASSERT(blob->md_ro == true);
1394 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1395 
1396 	spdk_blob_close(blob, blob_op_complete, NULL);
1397 	poll_threads();
1398 	CU_ASSERT(g_bserrno == 0);
1399 
1400 	ut_bs_reload(&bs, &opts);
1401 
1402 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1403 	poll_threads();
1404 	CU_ASSERT(g_bserrno == 0);
1405 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1406 	blob = g_blob;
1407 
1408 	CU_ASSERT(blob->data_ro == true);
1409 	CU_ASSERT(blob->md_ro == true);
1410 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1411 
1412 	ut_blob_close_and_delete(bs, blob);
1413 
1414 	spdk_bs_unload(bs, bs_op_complete, NULL);
1415 	poll_threads();
1416 	CU_ASSERT(g_bserrno == 0);
1417 }
1418 
1419 static void
1420 channel_ops(void)
1421 {
1422 	struct spdk_blob_store *bs = g_bs;
1423 	struct spdk_io_channel *channel;
1424 
1425 	channel = spdk_bs_alloc_io_channel(bs);
1426 	CU_ASSERT(channel != NULL);
1427 
1428 	spdk_bs_free_io_channel(channel);
1429 	poll_threads();
1430 }
1431 
1432 static void
1433 blob_write(void)
1434 {
1435 	struct spdk_blob_store *bs = g_bs;
1436 	struct spdk_blob *blob = g_blob;
1437 	struct spdk_io_channel *channel;
1438 	uint64_t pages_per_cluster;
1439 	uint8_t payload[10 * 4096];
1440 
1441 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1442 
1443 	channel = spdk_bs_alloc_io_channel(bs);
1444 	CU_ASSERT(channel != NULL);
1445 
1446 	/* Write to a blob with 0 size */
1447 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1448 	poll_threads();
1449 	CU_ASSERT(g_bserrno == -EINVAL);
1450 
1451 	/* Resize the blob */
1452 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1453 	poll_threads();
1454 	CU_ASSERT(g_bserrno == 0);
1455 
1456 	/* Confirm that write fails if blob is marked read-only. */
1457 	blob->data_ro = true;
1458 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1459 	poll_threads();
1460 	CU_ASSERT(g_bserrno == -EPERM);
1461 	blob->data_ro = false;
1462 
1463 	/* Write to the blob */
1464 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1465 	poll_threads();
1466 	CU_ASSERT(g_bserrno == 0);
1467 
1468 	/* Write starting beyond the end */
1469 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1470 			   NULL);
1471 	poll_threads();
1472 	CU_ASSERT(g_bserrno == -EINVAL);
1473 
1474 	/* Write starting at a valid location but going off the end */
1475 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1476 			   blob_op_complete, NULL);
1477 	poll_threads();
1478 	CU_ASSERT(g_bserrno == -EINVAL);
1479 
1480 	spdk_bs_free_io_channel(channel);
1481 	poll_threads();
1482 }
1483 
1484 static void
1485 blob_read(void)
1486 {
1487 	struct spdk_blob_store *bs = g_bs;
1488 	struct spdk_blob *blob = g_blob;
1489 	struct spdk_io_channel *channel;
1490 	uint64_t pages_per_cluster;
1491 	uint8_t payload[10 * 4096];
1492 
1493 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1494 
1495 	channel = spdk_bs_alloc_io_channel(bs);
1496 	CU_ASSERT(channel != NULL);
1497 
1498 	/* Read from a blob with 0 size */
1499 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1500 	poll_threads();
1501 	CU_ASSERT(g_bserrno == -EINVAL);
1502 
1503 	/* Resize the blob */
1504 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1505 	poll_threads();
1506 	CU_ASSERT(g_bserrno == 0);
1507 
1508 	/* Confirm that read passes if blob is marked read-only. */
1509 	blob->data_ro = true;
1510 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1511 	poll_threads();
1512 	CU_ASSERT(g_bserrno == 0);
1513 	blob->data_ro = false;
1514 
1515 	/* Read from the blob */
1516 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1517 	poll_threads();
1518 	CU_ASSERT(g_bserrno == 0);
1519 
1520 	/* Read starting beyond the end */
1521 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1522 			  NULL);
1523 	poll_threads();
1524 	CU_ASSERT(g_bserrno == -EINVAL);
1525 
1526 	/* Read starting at a valid location but going off the end */
1527 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1528 			  blob_op_complete, NULL);
1529 	poll_threads();
1530 	CU_ASSERT(g_bserrno == -EINVAL);
1531 
1532 	spdk_bs_free_io_channel(channel);
1533 	poll_threads();
1534 }
1535 
1536 static void
1537 blob_rw_verify(void)
1538 {
1539 	struct spdk_blob_store *bs = g_bs;
1540 	struct spdk_blob *blob = g_blob;
1541 	struct spdk_io_channel *channel;
1542 	uint8_t payload_read[10 * 4096];
1543 	uint8_t payload_write[10 * 4096];
1544 
1545 	channel = spdk_bs_alloc_io_channel(bs);
1546 	CU_ASSERT(channel != NULL);
1547 
1548 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1549 	poll_threads();
1550 	CU_ASSERT(g_bserrno == 0);
1551 
1552 	memset(payload_write, 0xE5, sizeof(payload_write));
1553 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1554 	poll_threads();
1555 	CU_ASSERT(g_bserrno == 0);
1556 
1557 	memset(payload_read, 0x00, sizeof(payload_read));
1558 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1559 	poll_threads();
1560 	CU_ASSERT(g_bserrno == 0);
1561 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1562 
1563 	spdk_bs_free_io_channel(channel);
1564 	poll_threads();
1565 }
1566 
1567 static void
1568 blob_rw_verify_iov(void)
1569 {
1570 	struct spdk_blob_store *bs = g_bs;
1571 	struct spdk_blob *blob;
1572 	struct spdk_io_channel *channel;
1573 	uint8_t payload_read[10 * 4096];
1574 	uint8_t payload_write[10 * 4096];
1575 	struct iovec iov_read[3];
1576 	struct iovec iov_write[3];
1577 	void *buf;
1578 
1579 	channel = spdk_bs_alloc_io_channel(bs);
1580 	CU_ASSERT(channel != NULL);
1581 
1582 	blob = ut_blob_create_and_open(bs, NULL);
1583 
1584 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1585 	poll_threads();
1586 	CU_ASSERT(g_bserrno == 0);
1587 
1588 	/*
1589 	 * Manually adjust the offset of the blob's second cluster.  This allows
1590 	 *  us to make sure that the readv/write code correctly accounts for I/O
1591 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1592 	 *  clusters are where we expect before modifying the second cluster.
1593 	 */
1594 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1595 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1596 	blob->active.clusters[1] = 3 * 256;
1597 
1598 	memset(payload_write, 0xE5, sizeof(payload_write));
1599 	iov_write[0].iov_base = payload_write;
1600 	iov_write[0].iov_len = 1 * 4096;
1601 	iov_write[1].iov_base = payload_write + 1 * 4096;
1602 	iov_write[1].iov_len = 5 * 4096;
1603 	iov_write[2].iov_base = payload_write + 6 * 4096;
1604 	iov_write[2].iov_len = 4 * 4096;
1605 	/*
1606 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1607 	 *  will get written to the first cluster, the last 4 to the second cluster.
1608 	 */
1609 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1610 	poll_threads();
1611 	CU_ASSERT(g_bserrno == 0);
1612 
1613 	memset(payload_read, 0xAA, sizeof(payload_read));
1614 	iov_read[0].iov_base = payload_read;
1615 	iov_read[0].iov_len = 3 * 4096;
1616 	iov_read[1].iov_base = payload_read + 3 * 4096;
1617 	iov_read[1].iov_len = 4 * 4096;
1618 	iov_read[2].iov_base = payload_read + 7 * 4096;
1619 	iov_read[2].iov_len = 3 * 4096;
1620 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1621 	poll_threads();
1622 	CU_ASSERT(g_bserrno == 0);
1623 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1624 
1625 	buf = calloc(1, 256 * 4096);
1626 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1627 	/* Check that cluster 2 on "disk" was not modified. */
1628 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1629 	free(buf);
1630 
1631 	spdk_blob_close(blob, blob_op_complete, NULL);
1632 	poll_threads();
1633 	CU_ASSERT(g_bserrno == 0);
1634 
1635 	spdk_bs_free_io_channel(channel);
1636 	poll_threads();
1637 }
1638 
1639 static uint32_t
1640 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1641 {
1642 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1643 	struct spdk_bs_request_set *set;
1644 	uint32_t count = 0;
1645 
1646 	TAILQ_FOREACH(set, &channel->reqs, link) {
1647 		count++;
1648 	}
1649 
1650 	return count;
1651 }
1652 
1653 static void
1654 blob_rw_verify_iov_nomem(void)
1655 {
1656 	struct spdk_blob_store *bs = g_bs;
1657 	struct spdk_blob *blob = g_blob;
1658 	struct spdk_io_channel *channel;
1659 	uint8_t payload_write[10 * 4096];
1660 	struct iovec iov_write[3];
1661 	uint32_t req_count;
1662 
1663 	channel = spdk_bs_alloc_io_channel(bs);
1664 	CU_ASSERT(channel != NULL);
1665 
1666 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1667 	poll_threads();
1668 	CU_ASSERT(g_bserrno == 0);
1669 
1670 	/*
1671 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1672 	 *  will get written to the first cluster, the last 4 to the second cluster.
1673 	 */
1674 	iov_write[0].iov_base = payload_write;
1675 	iov_write[0].iov_len = 1 * 4096;
1676 	iov_write[1].iov_base = payload_write + 1 * 4096;
1677 	iov_write[1].iov_len = 5 * 4096;
1678 	iov_write[2].iov_base = payload_write + 6 * 4096;
1679 	iov_write[2].iov_len = 4 * 4096;
1680 	MOCK_SET(calloc, NULL);
1681 	req_count = bs_channel_get_req_count(channel);
1682 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1683 	poll_threads();
1684 	CU_ASSERT(g_bserrno = -ENOMEM);
1685 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1686 	MOCK_CLEAR(calloc);
1687 
1688 	spdk_bs_free_io_channel(channel);
1689 	poll_threads();
1690 }
1691 
1692 static void
1693 blob_rw_iov_read_only(void)
1694 {
1695 	struct spdk_blob_store *bs = g_bs;
1696 	struct spdk_blob *blob = g_blob;
1697 	struct spdk_io_channel *channel;
1698 	uint8_t payload_read[4096];
1699 	uint8_t payload_write[4096];
1700 	struct iovec iov_read;
1701 	struct iovec iov_write;
1702 
1703 	channel = spdk_bs_alloc_io_channel(bs);
1704 	CU_ASSERT(channel != NULL);
1705 
1706 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1707 	poll_threads();
1708 	CU_ASSERT(g_bserrno == 0);
1709 
1710 	/* Verify that writev failed if read_only flag is set. */
1711 	blob->data_ro = true;
1712 	iov_write.iov_base = payload_write;
1713 	iov_write.iov_len = sizeof(payload_write);
1714 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1715 	poll_threads();
1716 	CU_ASSERT(g_bserrno == -EPERM);
1717 
1718 	/* Verify that reads pass if data_ro flag is set. */
1719 	iov_read.iov_base = payload_read;
1720 	iov_read.iov_len = sizeof(payload_read);
1721 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1722 	poll_threads();
1723 	CU_ASSERT(g_bserrno == 0);
1724 
1725 	spdk_bs_free_io_channel(channel);
1726 	poll_threads();
1727 }
1728 
1729 static void
1730 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1731 		       uint8_t *payload, uint64_t offset, uint64_t length,
1732 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1733 {
1734 	uint64_t i;
1735 	uint8_t *buf;
1736 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1737 
1738 	/* To be sure that operation is NOT split, read one page at the time */
1739 	buf = payload;
1740 	for (i = 0; i < length; i++) {
1741 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1742 		poll_threads();
1743 		if (g_bserrno != 0) {
1744 			/* Pass the error code up */
1745 			break;
1746 		}
1747 		buf += page_size;
1748 	}
1749 
1750 	cb_fn(cb_arg, g_bserrno);
1751 }
1752 
1753 static void
1754 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1755 			uint8_t *payload, uint64_t offset, uint64_t length,
1756 			spdk_blob_op_complete cb_fn, void *cb_arg)
1757 {
1758 	uint64_t i;
1759 	uint8_t *buf;
1760 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1761 
1762 	/* To be sure that operation is NOT split, write one page at the time */
1763 	buf = payload;
1764 	for (i = 0; i < length; i++) {
1765 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1766 		poll_threads();
1767 		if (g_bserrno != 0) {
1768 			/* Pass the error code up */
1769 			break;
1770 		}
1771 		buf += page_size;
1772 	}
1773 
1774 	cb_fn(cb_arg, g_bserrno);
1775 }
1776 
1777 static void
1778 blob_operation_split_rw(void)
1779 {
1780 	struct spdk_blob_store *bs = g_bs;
1781 	struct spdk_blob *blob;
1782 	struct spdk_io_channel *channel;
1783 	struct spdk_blob_opts opts;
1784 	uint64_t cluster_size;
1785 
1786 	uint64_t payload_size;
1787 	uint8_t *payload_read;
1788 	uint8_t *payload_write;
1789 	uint8_t *payload_pattern;
1790 
1791 	uint64_t page_size;
1792 	uint64_t pages_per_cluster;
1793 	uint64_t pages_per_payload;
1794 
1795 	uint64_t i;
1796 
1797 	cluster_size = spdk_bs_get_cluster_size(bs);
1798 	page_size = spdk_bs_get_page_size(bs);
1799 	pages_per_cluster = cluster_size / page_size;
1800 	pages_per_payload = pages_per_cluster * 5;
1801 	payload_size = cluster_size * 5;
1802 
1803 	payload_read = malloc(payload_size);
1804 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1805 
1806 	payload_write = malloc(payload_size);
1807 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1808 
1809 	payload_pattern = malloc(payload_size);
1810 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1811 
1812 	/* Prepare random pattern to write */
1813 	memset(payload_pattern, 0xFF, payload_size);
1814 	for (i = 0; i < pages_per_payload; i++) {
1815 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1816 	}
1817 
1818 	channel = spdk_bs_alloc_io_channel(bs);
1819 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1820 
1821 	/* Create blob */
1822 	ut_spdk_blob_opts_init(&opts);
1823 	opts.thin_provision = false;
1824 	opts.num_clusters = 5;
1825 
1826 	blob = ut_blob_create_and_open(bs, &opts);
1827 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1828 
1829 	/* Initial read should return zeroed payload */
1830 	memset(payload_read, 0xFF, payload_size);
1831 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1832 	poll_threads();
1833 	CU_ASSERT(g_bserrno == 0);
1834 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1835 
1836 	/* Fill whole blob except last page */
1837 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1838 			   blob_op_complete, NULL);
1839 	poll_threads();
1840 	CU_ASSERT(g_bserrno == 0);
1841 
1842 	/* Write last page with a pattern */
1843 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1844 			   blob_op_complete, NULL);
1845 	poll_threads();
1846 	CU_ASSERT(g_bserrno == 0);
1847 
1848 	/* Read whole blob and check consistency */
1849 	memset(payload_read, 0xFF, payload_size);
1850 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1851 	poll_threads();
1852 	CU_ASSERT(g_bserrno == 0);
1853 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1854 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1855 
1856 	/* Fill whole blob except first page */
1857 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1858 			   blob_op_complete, NULL);
1859 	poll_threads();
1860 	CU_ASSERT(g_bserrno == 0);
1861 
1862 	/* Write first page with a pattern */
1863 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1864 			   blob_op_complete, NULL);
1865 	poll_threads();
1866 	CU_ASSERT(g_bserrno == 0);
1867 
1868 	/* Read whole blob and check consistency */
1869 	memset(payload_read, 0xFF, payload_size);
1870 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1871 	poll_threads();
1872 	CU_ASSERT(g_bserrno == 0);
1873 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1874 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1875 
1876 
1877 	/* Fill whole blob with a pattern (5 clusters) */
1878 
1879 	/* 1. Read test. */
1880 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1881 				blob_op_complete, NULL);
1882 	poll_threads();
1883 	CU_ASSERT(g_bserrno == 0);
1884 
1885 	memset(payload_read, 0xFF, payload_size);
1886 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1887 	poll_threads();
1888 	poll_threads();
1889 	CU_ASSERT(g_bserrno == 0);
1890 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1891 
1892 	/* 2. Write test. */
1893 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1894 			   blob_op_complete, NULL);
1895 	poll_threads();
1896 	CU_ASSERT(g_bserrno == 0);
1897 
1898 	memset(payload_read, 0xFF, payload_size);
1899 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1900 	poll_threads();
1901 	CU_ASSERT(g_bserrno == 0);
1902 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1903 
1904 	spdk_bs_free_io_channel(channel);
1905 	poll_threads();
1906 
1907 	g_blob = NULL;
1908 	g_blobid = 0;
1909 
1910 	free(payload_read);
1911 	free(payload_write);
1912 	free(payload_pattern);
1913 
1914 	ut_blob_close_and_delete(bs, blob);
1915 }
1916 
1917 static void
1918 blob_operation_split_rw_iov(void)
1919 {
1920 	struct spdk_blob_store *bs = g_bs;
1921 	struct spdk_blob *blob;
1922 	struct spdk_io_channel *channel;
1923 	struct spdk_blob_opts opts;
1924 	uint64_t cluster_size;
1925 
1926 	uint64_t payload_size;
1927 	uint8_t *payload_read;
1928 	uint8_t *payload_write;
1929 	uint8_t *payload_pattern;
1930 
1931 	uint64_t page_size;
1932 	uint64_t pages_per_cluster;
1933 	uint64_t pages_per_payload;
1934 
1935 	struct iovec iov_read[2];
1936 	struct iovec iov_write[2];
1937 
1938 	uint64_t i, j;
1939 
1940 	cluster_size = spdk_bs_get_cluster_size(bs);
1941 	page_size = spdk_bs_get_page_size(bs);
1942 	pages_per_cluster = cluster_size / page_size;
1943 	pages_per_payload = pages_per_cluster * 5;
1944 	payload_size = cluster_size * 5;
1945 
1946 	payload_read = malloc(payload_size);
1947 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1948 
1949 	payload_write = malloc(payload_size);
1950 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1951 
1952 	payload_pattern = malloc(payload_size);
1953 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1954 
1955 	/* Prepare random pattern to write */
1956 	for (i = 0; i < pages_per_payload; i++) {
1957 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1958 			uint64_t *tmp;
1959 
1960 			tmp = (uint64_t *)payload_pattern;
1961 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1962 			*tmp = i + 1;
1963 		}
1964 	}
1965 
1966 	channel = spdk_bs_alloc_io_channel(bs);
1967 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1968 
1969 	/* Create blob */
1970 	ut_spdk_blob_opts_init(&opts);
1971 	opts.thin_provision = false;
1972 	opts.num_clusters = 5;
1973 
1974 	blob = ut_blob_create_and_open(bs, &opts);
1975 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1976 
1977 	/* Initial read should return zeroes payload */
1978 	memset(payload_read, 0xFF, payload_size);
1979 	iov_read[0].iov_base = payload_read;
1980 	iov_read[0].iov_len = cluster_size * 3;
1981 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1982 	iov_read[1].iov_len = cluster_size * 2;
1983 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1984 	poll_threads();
1985 	CU_ASSERT(g_bserrno == 0);
1986 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1987 
1988 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1989 	 *  with a pattern. */
1990 	iov_write[0].iov_base = payload_pattern;
1991 	iov_write[0].iov_len = payload_size - page_size;
1992 	iov_write[1].iov_base = payload_pattern;
1993 	iov_write[1].iov_len = page_size;
1994 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1995 	poll_threads();
1996 	CU_ASSERT(g_bserrno == 0);
1997 
1998 	/* Read whole blob and check consistency */
1999 	memset(payload_read, 0xFF, payload_size);
2000 	iov_read[0].iov_base = payload_read;
2001 	iov_read[0].iov_len = cluster_size * 2;
2002 	iov_read[1].iov_base = payload_read + cluster_size * 2;
2003 	iov_read[1].iov_len = cluster_size * 3;
2004 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
2005 	poll_threads();
2006 	CU_ASSERT(g_bserrno == 0);
2007 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
2008 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
2009 
2010 	/* First of iovs fills only first page and second of iovs writes whole blob except
2011 	 *  first page with a pattern. */
2012 	iov_write[0].iov_base = payload_pattern;
2013 	iov_write[0].iov_len = page_size;
2014 	iov_write[1].iov_base = payload_pattern;
2015 	iov_write[1].iov_len = payload_size - page_size;
2016 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
2017 	poll_threads();
2018 	CU_ASSERT(g_bserrno == 0);
2019 
2020 	/* Read whole blob and check consistency */
2021 	memset(payload_read, 0xFF, payload_size);
2022 	iov_read[0].iov_base = payload_read;
2023 	iov_read[0].iov_len = cluster_size * 4;
2024 	iov_read[1].iov_base = payload_read + cluster_size * 4;
2025 	iov_read[1].iov_len = cluster_size;
2026 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
2027 	poll_threads();
2028 	CU_ASSERT(g_bserrno == 0);
2029 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
2030 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
2031 
2032 
2033 	/* Fill whole blob with a pattern (5 clusters) */
2034 
2035 	/* 1. Read test. */
2036 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
2037 				blob_op_complete, NULL);
2038 	poll_threads();
2039 	CU_ASSERT(g_bserrno == 0);
2040 
2041 	memset(payload_read, 0xFF, payload_size);
2042 	iov_read[0].iov_base = payload_read;
2043 	iov_read[0].iov_len = cluster_size;
2044 	iov_read[1].iov_base = payload_read + cluster_size;
2045 	iov_read[1].iov_len = cluster_size * 4;
2046 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
2047 	poll_threads();
2048 	CU_ASSERT(g_bserrno == 0);
2049 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2050 
2051 	/* 2. Write test. */
2052 	iov_write[0].iov_base = payload_read;
2053 	iov_write[0].iov_len = cluster_size * 2;
2054 	iov_write[1].iov_base = payload_read + cluster_size * 2;
2055 	iov_write[1].iov_len = cluster_size * 3;
2056 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
2057 	poll_threads();
2058 	CU_ASSERT(g_bserrno == 0);
2059 
2060 	memset(payload_read, 0xFF, payload_size);
2061 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
2062 	poll_threads();
2063 	CU_ASSERT(g_bserrno == 0);
2064 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2065 
2066 	spdk_bs_free_io_channel(channel);
2067 	poll_threads();
2068 
2069 	g_blob = NULL;
2070 	g_blobid = 0;
2071 
2072 	free(payload_read);
2073 	free(payload_write);
2074 	free(payload_pattern);
2075 
2076 	ut_blob_close_and_delete(bs, blob);
2077 }
2078 
2079 static void
2080 blob_unmap(void)
2081 {
2082 	struct spdk_blob_store *bs = g_bs;
2083 	struct spdk_blob *blob;
2084 	struct spdk_io_channel *channel;
2085 	struct spdk_blob_opts opts;
2086 	uint8_t payload[4096];
2087 	int i;
2088 
2089 	channel = spdk_bs_alloc_io_channel(bs);
2090 	CU_ASSERT(channel != NULL);
2091 
2092 	ut_spdk_blob_opts_init(&opts);
2093 	opts.num_clusters = 10;
2094 
2095 	blob = ut_blob_create_and_open(bs, &opts);
2096 
2097 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2098 	poll_threads();
2099 	CU_ASSERT(g_bserrno == 0);
2100 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2101 
2102 	memset(payload, 0, sizeof(payload));
2103 	payload[0] = 0xFF;
2104 
2105 	/*
2106 	 * Set first byte of every cluster to 0xFF.
2107 	 * First cluster on device is reserved so let's start from cluster number 1
2108 	 */
2109 	for (i = 1; i < 11; i++) {
2110 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
2111 	}
2112 
2113 	/* Confirm writes */
2114 	for (i = 0; i < 10; i++) {
2115 		payload[0] = 0;
2116 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
2117 				  blob_op_complete, NULL);
2118 		poll_threads();
2119 		CU_ASSERT(g_bserrno == 0);
2120 		CU_ASSERT(payload[0] == 0xFF);
2121 	}
2122 
2123 	/* Mark some clusters as unallocated */
2124 	blob->active.clusters[1] = 0;
2125 	blob->active.clusters[2] = 0;
2126 	blob->active.clusters[3] = 0;
2127 	blob->active.clusters[6] = 0;
2128 	blob->active.clusters[8] = 0;
2129 	blob->active.num_allocated_clusters -= 5;
2130 
2131 	/* Unmap clusters by resizing to 0 */
2132 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2133 	poll_threads();
2134 	CU_ASSERT(g_bserrno == 0);
2135 
2136 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2137 	poll_threads();
2138 	CU_ASSERT(g_bserrno == 0);
2139 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
2140 
2141 	/* Confirm that only 'allocated' clusters were unmapped */
2142 	for (i = 1; i < 11; i++) {
2143 		switch (i) {
2144 		case 2:
2145 		case 3:
2146 		case 4:
2147 		case 7:
2148 		case 9:
2149 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2150 			break;
2151 		default:
2152 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2153 			break;
2154 		}
2155 	}
2156 
2157 	spdk_bs_free_io_channel(channel);
2158 	poll_threads();
2159 
2160 	ut_blob_close_and_delete(bs, blob);
2161 }
2162 
2163 static void
2164 blob_iter(void)
2165 {
2166 	struct spdk_blob_store *bs = g_bs;
2167 	struct spdk_blob *blob;
2168 	spdk_blob_id blobid;
2169 	struct spdk_blob_opts blob_opts;
2170 
2171 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2172 	poll_threads();
2173 	CU_ASSERT(g_blob == NULL);
2174 	CU_ASSERT(g_bserrno == -ENOENT);
2175 
2176 	ut_spdk_blob_opts_init(&blob_opts);
2177 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2178 	poll_threads();
2179 	CU_ASSERT(g_bserrno == 0);
2180 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2181 	blobid = g_blobid;
2182 
2183 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2184 	poll_threads();
2185 	CU_ASSERT(g_blob != NULL);
2186 	CU_ASSERT(g_bserrno == 0);
2187 	blob = g_blob;
2188 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2189 
2190 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2191 	poll_threads();
2192 	CU_ASSERT(g_blob == NULL);
2193 	CU_ASSERT(g_bserrno == -ENOENT);
2194 }
2195 
2196 static void
2197 blob_xattr(void)
2198 {
2199 	struct spdk_blob_store *bs = g_bs;
2200 	struct spdk_blob *blob = g_blob;
2201 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2202 	uint64_t length;
2203 	int rc;
2204 	const char *name1, *name2;
2205 	const void *value;
2206 	size_t value_len;
2207 	struct spdk_xattr_names *names;
2208 
2209 	/* Test that set_xattr fails if md_ro flag is set. */
2210 	blob->md_ro = true;
2211 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2212 	CU_ASSERT(rc == -EPERM);
2213 
2214 	blob->md_ro = false;
2215 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2216 	CU_ASSERT(rc == 0);
2217 
2218 	length = 2345;
2219 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2220 	CU_ASSERT(rc == 0);
2221 
2222 	/* Overwrite "length" xattr. */
2223 	length = 3456;
2224 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2225 	CU_ASSERT(rc == 0);
2226 
2227 	/* get_xattr should still work even if md_ro flag is set. */
2228 	value = NULL;
2229 	blob->md_ro = true;
2230 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2231 	CU_ASSERT(rc == 0);
2232 	SPDK_CU_ASSERT_FATAL(value != NULL);
2233 	CU_ASSERT(*(uint64_t *)value == length);
2234 	CU_ASSERT(value_len == 8);
2235 	blob->md_ro = false;
2236 
2237 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2238 	CU_ASSERT(rc == -ENOENT);
2239 
2240 	names = NULL;
2241 	rc = spdk_blob_get_xattr_names(blob, &names);
2242 	CU_ASSERT(rc == 0);
2243 	SPDK_CU_ASSERT_FATAL(names != NULL);
2244 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2245 	name1 = spdk_xattr_names_get_name(names, 0);
2246 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2247 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2248 	name2 = spdk_xattr_names_get_name(names, 1);
2249 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2250 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2251 	CU_ASSERT(strcmp(name1, name2));
2252 	spdk_xattr_names_free(names);
2253 
2254 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2255 	blob->md_ro = true;
2256 	rc = spdk_blob_remove_xattr(blob, "name");
2257 	CU_ASSERT(rc == -EPERM);
2258 
2259 	blob->md_ro = false;
2260 	rc = spdk_blob_remove_xattr(blob, "name");
2261 	CU_ASSERT(rc == 0);
2262 
2263 	rc = spdk_blob_remove_xattr(blob, "foobar");
2264 	CU_ASSERT(rc == -ENOENT);
2265 
2266 	/* Set internal xattr */
2267 	length = 7898;
2268 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2269 	CU_ASSERT(rc == 0);
2270 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2271 	CU_ASSERT(rc == 0);
2272 	CU_ASSERT(*(uint64_t *)value == length);
2273 	/* try to get public xattr with same name */
2274 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2275 	CU_ASSERT(rc != 0);
2276 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2277 	CU_ASSERT(rc != 0);
2278 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2279 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2280 		  SPDK_BLOB_INTERNAL_XATTR);
2281 
2282 	spdk_blob_close(blob, blob_op_complete, NULL);
2283 	poll_threads();
2284 
2285 	/* Check if xattrs are persisted */
2286 	ut_bs_reload(&bs, NULL);
2287 
2288 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2289 	poll_threads();
2290 	CU_ASSERT(g_bserrno == 0);
2291 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2292 	blob = g_blob;
2293 
2294 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2295 	CU_ASSERT(rc == 0);
2296 	CU_ASSERT(*(uint64_t *)value == length);
2297 
2298 	/* try to get internal xattr through public call */
2299 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2300 	CU_ASSERT(rc != 0);
2301 
2302 	rc = blob_remove_xattr(blob, "internal", true);
2303 	CU_ASSERT(rc == 0);
2304 
2305 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2306 }
2307 
2308 static void
2309 blob_parse_md(void)
2310 {
2311 	struct spdk_blob_store *bs = g_bs;
2312 	struct spdk_blob *blob;
2313 	int rc;
2314 	uint32_t used_pages;
2315 	size_t xattr_length;
2316 	char *xattr;
2317 
2318 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2319 	blob = ut_blob_create_and_open(bs, NULL);
2320 
2321 	/* Create large extent to force more than 1 page of metadata. */
2322 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2323 		       strlen("large_xattr");
2324 	xattr = calloc(xattr_length, sizeof(char));
2325 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2326 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2327 	free(xattr);
2328 	SPDK_CU_ASSERT_FATAL(rc == 0);
2329 
2330 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2331 	poll_threads();
2332 
2333 	/* Delete the blob and verify that number of pages returned to before its creation. */
2334 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2335 	ut_blob_close_and_delete(bs, blob);
2336 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2337 }
2338 
2339 static void
2340 bs_load(void)
2341 {
2342 	struct spdk_blob_store *bs;
2343 	struct spdk_bs_dev *dev;
2344 	spdk_blob_id blobid;
2345 	struct spdk_blob *blob;
2346 	struct spdk_bs_super_block *super_block;
2347 	uint64_t length;
2348 	int rc;
2349 	const void *value;
2350 	size_t value_len;
2351 	struct spdk_bs_opts opts;
2352 	struct spdk_blob_opts blob_opts;
2353 
2354 	dev = init_dev();
2355 	spdk_bs_opts_init(&opts, sizeof(opts));
2356 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2357 
2358 	/* Initialize a new blob store */
2359 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2360 	poll_threads();
2361 	CU_ASSERT(g_bserrno == 0);
2362 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2363 	bs = g_bs;
2364 
2365 	/* Try to open a blobid that does not exist */
2366 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2367 	poll_threads();
2368 	CU_ASSERT(g_bserrno == -ENOENT);
2369 	CU_ASSERT(g_blob == NULL);
2370 
2371 	/* Create a blob */
2372 	blob = ut_blob_create_and_open(bs, NULL);
2373 	blobid = spdk_blob_get_id(blob);
2374 
2375 	/* Try again to open valid blob but without the upper bit set */
2376 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2377 	poll_threads();
2378 	CU_ASSERT(g_bserrno == -ENOENT);
2379 	CU_ASSERT(g_blob == NULL);
2380 
2381 	/* Set some xattrs */
2382 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2383 	CU_ASSERT(rc == 0);
2384 
2385 	length = 2345;
2386 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2387 	CU_ASSERT(rc == 0);
2388 
2389 	/* Resize the blob */
2390 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2391 	poll_threads();
2392 	CU_ASSERT(g_bserrno == 0);
2393 
2394 	spdk_blob_close(blob, blob_op_complete, NULL);
2395 	poll_threads();
2396 	CU_ASSERT(g_bserrno == 0);
2397 	blob = NULL;
2398 	g_blob = NULL;
2399 	g_blobid = SPDK_BLOBID_INVALID;
2400 
2401 	/* Unload the blob store */
2402 	spdk_bs_unload(bs, bs_op_complete, NULL);
2403 	poll_threads();
2404 	CU_ASSERT(g_bserrno == 0);
2405 	g_bs = NULL;
2406 	g_blob = NULL;
2407 	g_blobid = 0;
2408 
2409 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2410 	CU_ASSERT(super_block->clean == 1);
2411 
2412 	/* Load should fail for device with an unsupported blocklen */
2413 	dev = init_dev();
2414 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2415 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2416 	poll_threads();
2417 	CU_ASSERT(g_bserrno == -EINVAL);
2418 
2419 	/* Load should when max_md_ops is set to zero */
2420 	dev = init_dev();
2421 	spdk_bs_opts_init(&opts, sizeof(opts));
2422 	opts.max_md_ops = 0;
2423 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2424 	poll_threads();
2425 	CU_ASSERT(g_bserrno == -EINVAL);
2426 
2427 	/* Load should when max_channel_ops is set to zero */
2428 	dev = init_dev();
2429 	spdk_bs_opts_init(&opts, sizeof(opts));
2430 	opts.max_channel_ops = 0;
2431 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2432 	poll_threads();
2433 	CU_ASSERT(g_bserrno == -EINVAL);
2434 
2435 	/* Load an existing blob store */
2436 	dev = init_dev();
2437 	spdk_bs_opts_init(&opts, sizeof(opts));
2438 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2439 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2440 	poll_threads();
2441 	CU_ASSERT(g_bserrno == 0);
2442 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2443 	bs = g_bs;
2444 
2445 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2446 	CU_ASSERT(super_block->clean == 1);
2447 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2448 
2449 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2450 	poll_threads();
2451 	CU_ASSERT(g_bserrno == 0);
2452 	CU_ASSERT(g_blob != NULL);
2453 	blob = g_blob;
2454 
2455 	/* Verify that blobstore is marked dirty after first metadata sync */
2456 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2457 	CU_ASSERT(super_block->clean == 1);
2458 
2459 	/* Get the xattrs */
2460 	value = NULL;
2461 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2462 	CU_ASSERT(rc == 0);
2463 	SPDK_CU_ASSERT_FATAL(value != NULL);
2464 	CU_ASSERT(*(uint64_t *)value == length);
2465 	CU_ASSERT(value_len == 8);
2466 
2467 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2468 	CU_ASSERT(rc == -ENOENT);
2469 
2470 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2471 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2472 
2473 	spdk_blob_close(blob, blob_op_complete, NULL);
2474 	poll_threads();
2475 	CU_ASSERT(g_bserrno == 0);
2476 	blob = NULL;
2477 	g_blob = NULL;
2478 
2479 	spdk_bs_unload(bs, bs_op_complete, NULL);
2480 	poll_threads();
2481 	CU_ASSERT(g_bserrno == 0);
2482 	g_bs = NULL;
2483 
2484 	/* Load should fail: bdev size < saved size */
2485 	dev = init_dev();
2486 	dev->blockcnt /= 2;
2487 
2488 	spdk_bs_opts_init(&opts, sizeof(opts));
2489 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2490 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2491 	poll_threads();
2492 
2493 	CU_ASSERT(g_bserrno == -EILSEQ);
2494 
2495 	/* Load should succeed: bdev size > saved size */
2496 	dev = init_dev();
2497 	dev->blockcnt *= 4;
2498 
2499 	spdk_bs_opts_init(&opts, sizeof(opts));
2500 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2501 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2502 	poll_threads();
2503 	CU_ASSERT(g_bserrno == 0);
2504 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2505 	bs = g_bs;
2506 
2507 	CU_ASSERT(g_bserrno == 0);
2508 	spdk_bs_unload(bs, bs_op_complete, NULL);
2509 	poll_threads();
2510 
2511 
2512 	/* Test compatibility mode */
2513 
2514 	dev = init_dev();
2515 	super_block->size = 0;
2516 	super_block->crc = blob_md_page_calc_crc(super_block);
2517 
2518 	spdk_bs_opts_init(&opts, sizeof(opts));
2519 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2520 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2521 	poll_threads();
2522 	CU_ASSERT(g_bserrno == 0);
2523 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2524 	bs = g_bs;
2525 
2526 	/* Create a blob */
2527 	ut_spdk_blob_opts_init(&blob_opts);
2528 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2529 	poll_threads();
2530 	CU_ASSERT(g_bserrno == 0);
2531 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2532 
2533 	/* Blobstore should update number of blocks in super_block */
2534 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2535 	CU_ASSERT(super_block->clean == 0);
2536 
2537 	spdk_bs_unload(bs, bs_op_complete, NULL);
2538 	poll_threads();
2539 	CU_ASSERT(g_bserrno == 0);
2540 	CU_ASSERT(super_block->clean == 1);
2541 	g_bs = NULL;
2542 
2543 }
2544 
2545 static void
2546 bs_load_pending_removal(void)
2547 {
2548 	struct spdk_blob_store *bs = g_bs;
2549 	struct spdk_blob_opts opts;
2550 	struct spdk_blob *blob, *snapshot;
2551 	spdk_blob_id blobid, snapshotid;
2552 	const void *value;
2553 	size_t value_len;
2554 	int rc;
2555 
2556 	/* Create blob */
2557 	ut_spdk_blob_opts_init(&opts);
2558 	opts.num_clusters = 10;
2559 
2560 	blob = ut_blob_create_and_open(bs, &opts);
2561 	blobid = spdk_blob_get_id(blob);
2562 
2563 	/* Create snapshot */
2564 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2565 	poll_threads();
2566 	CU_ASSERT(g_bserrno == 0);
2567 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2568 	snapshotid = g_blobid;
2569 
2570 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2571 	poll_threads();
2572 	CU_ASSERT(g_bserrno == 0);
2573 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2574 	snapshot = g_blob;
2575 
2576 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2577 	snapshot->md_ro = false;
2578 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2579 	CU_ASSERT(rc == 0);
2580 	snapshot->md_ro = true;
2581 
2582 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2583 	poll_threads();
2584 	CU_ASSERT(g_bserrno == 0);
2585 
2586 	spdk_blob_close(blob, blob_op_complete, NULL);
2587 	poll_threads();
2588 	CU_ASSERT(g_bserrno == 0);
2589 
2590 	/* Reload blobstore */
2591 	ut_bs_reload(&bs, NULL);
2592 
2593 	/* Snapshot should not be removed as blob is still pointing to it */
2594 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2595 	poll_threads();
2596 	CU_ASSERT(g_bserrno == 0);
2597 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2598 	snapshot = g_blob;
2599 
2600 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2601 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2602 	CU_ASSERT(rc != 0);
2603 
2604 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2605 	snapshot->md_ro = false;
2606 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2607 	CU_ASSERT(rc == 0);
2608 	snapshot->md_ro = true;
2609 
2610 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2611 	poll_threads();
2612 	CU_ASSERT(g_bserrno == 0);
2613 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2614 	blob = g_blob;
2615 
2616 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2617 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2618 
2619 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2620 	poll_threads();
2621 	CU_ASSERT(g_bserrno == 0);
2622 
2623 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2624 	poll_threads();
2625 	CU_ASSERT(g_bserrno == 0);
2626 
2627 	spdk_blob_close(blob, blob_op_complete, NULL);
2628 	poll_threads();
2629 	CU_ASSERT(g_bserrno == 0);
2630 
2631 	/* Reload blobstore */
2632 	ut_bs_reload(&bs, NULL);
2633 
2634 	/* Snapshot should be removed as blob is not pointing to it anymore */
2635 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2636 	poll_threads();
2637 	CU_ASSERT(g_bserrno != 0);
2638 }
2639 
2640 static void
2641 bs_load_custom_cluster_size(void)
2642 {
2643 	struct spdk_blob_store *bs;
2644 	struct spdk_bs_dev *dev;
2645 	struct spdk_bs_super_block *super_block;
2646 	struct spdk_bs_opts opts;
2647 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2648 	uint32_t cluster_sz;
2649 	uint64_t total_clusters;
2650 
2651 	dev = init_dev();
2652 	spdk_bs_opts_init(&opts, sizeof(opts));
2653 	opts.cluster_sz = custom_cluster_size;
2654 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2655 
2656 	/* Initialize a new blob store */
2657 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2658 	poll_threads();
2659 	CU_ASSERT(g_bserrno == 0);
2660 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2661 	bs = g_bs;
2662 	cluster_sz = bs->cluster_sz;
2663 	total_clusters = bs->total_clusters;
2664 
2665 	/* Unload the blob store */
2666 	spdk_bs_unload(bs, bs_op_complete, NULL);
2667 	poll_threads();
2668 	CU_ASSERT(g_bserrno == 0);
2669 	g_bs = NULL;
2670 	g_blob = NULL;
2671 	g_blobid = 0;
2672 
2673 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2674 	CU_ASSERT(super_block->clean == 1);
2675 
2676 	/* Load an existing blob store */
2677 	dev = init_dev();
2678 	spdk_bs_opts_init(&opts, sizeof(opts));
2679 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2680 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2681 	poll_threads();
2682 	CU_ASSERT(g_bserrno == 0);
2683 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2684 	bs = g_bs;
2685 	/* Compare cluster size and number to one after initialization */
2686 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2687 	CU_ASSERT(total_clusters == bs->total_clusters);
2688 
2689 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2690 	CU_ASSERT(super_block->clean == 1);
2691 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2692 
2693 	spdk_bs_unload(bs, bs_op_complete, NULL);
2694 	poll_threads();
2695 	CU_ASSERT(g_bserrno == 0);
2696 	CU_ASSERT(super_block->clean == 1);
2697 	g_bs = NULL;
2698 }
2699 
2700 static void
2701 bs_load_after_failed_grow(void)
2702 {
2703 	struct spdk_blob_store *bs;
2704 	struct spdk_bs_dev *dev;
2705 	struct spdk_bs_super_block *super_block;
2706 	struct spdk_bs_opts opts;
2707 	struct spdk_bs_md_mask *mask;
2708 	struct spdk_blob_opts blob_opts;
2709 	struct spdk_blob *blob, *snapshot;
2710 	spdk_blob_id blobid, snapshotid;
2711 	uint64_t total_data_clusters;
2712 
2713 	dev = init_dev();
2714 	spdk_bs_opts_init(&opts, sizeof(opts));
2715 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2716 	/*
2717 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2718 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2719 	 * thus the blobstore could grow to the double size.
2720 	 */
2721 	opts.num_md_pages = 128;
2722 
2723 	/* Initialize a new blob store */
2724 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2725 	poll_threads();
2726 	CU_ASSERT(g_bserrno == 0);
2727 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2728 	bs = g_bs;
2729 
2730 	/* Create blob */
2731 	ut_spdk_blob_opts_init(&blob_opts);
2732 	blob_opts.num_clusters = 10;
2733 
2734 	blob = ut_blob_create_and_open(bs, &blob_opts);
2735 	blobid = spdk_blob_get_id(blob);
2736 
2737 	/* Create snapshot */
2738 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2739 	poll_threads();
2740 	CU_ASSERT(g_bserrno == 0);
2741 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2742 	snapshotid = g_blobid;
2743 
2744 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2745 	poll_threads();
2746 	CU_ASSERT(g_bserrno == 0);
2747 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2748 	snapshot = g_blob;
2749 
2750 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2751 	poll_threads();
2752 	CU_ASSERT(g_bserrno == 0);
2753 
2754 	spdk_blob_close(blob, blob_op_complete, NULL);
2755 	poll_threads();
2756 	CU_ASSERT(g_bserrno == 0);
2757 
2758 	total_data_clusters = bs->total_data_clusters;
2759 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2760 
2761 	/* Unload the blob store */
2762 	spdk_bs_unload(bs, bs_op_complete, NULL);
2763 	poll_threads();
2764 	CU_ASSERT(g_bserrno == 0);
2765 	g_bs = NULL;
2766 	g_blob = NULL;
2767 	g_blobid = 0;
2768 
2769 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2770 	CU_ASSERT(super_block->clean == 1);
2771 
2772 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * 4096);
2773 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2774 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2775 
2776 	/*
2777 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2778 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2779 	 */
2780 	mask->length *= 2;
2781 
2782 	/* Load an existing blob store */
2783 	dev = init_dev();
2784 	dev->blockcnt *= 2;
2785 	spdk_bs_opts_init(&opts, sizeof(opts));
2786 	opts.clear_method = BS_CLEAR_WITH_NONE;
2787 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2788 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2789 	poll_threads();
2790 	CU_ASSERT(g_bserrno == 0);
2791 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2792 	bs = g_bs;
2793 
2794 	/* Check the capacity is the same as before */
2795 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2796 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2797 
2798 	/* Check the blob and the snapshot are still available */
2799 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2800 	poll_threads();
2801 	CU_ASSERT(g_bserrno == 0);
2802 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2803 	blob = g_blob;
2804 
2805 	spdk_blob_close(blob, blob_op_complete, NULL);
2806 	poll_threads();
2807 	CU_ASSERT(g_bserrno == 0);
2808 
2809 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2810 	poll_threads();
2811 	CU_ASSERT(g_bserrno == 0);
2812 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2813 	snapshot = g_blob;
2814 
2815 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2816 	poll_threads();
2817 	CU_ASSERT(g_bserrno == 0);
2818 
2819 	spdk_bs_unload(bs, bs_op_complete, NULL);
2820 	poll_threads();
2821 	CU_ASSERT(g_bserrno == 0);
2822 	CU_ASSERT(super_block->clean == 1);
2823 	g_bs = NULL;
2824 }
2825 
2826 static void
2827 bs_type(void)
2828 {
2829 	struct spdk_blob_store *bs;
2830 	struct spdk_bs_dev *dev;
2831 	struct spdk_bs_opts opts;
2832 
2833 	dev = init_dev();
2834 	spdk_bs_opts_init(&opts, sizeof(opts));
2835 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2836 
2837 	/* Initialize a new blob store */
2838 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2839 	poll_threads();
2840 	CU_ASSERT(g_bserrno == 0);
2841 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2842 	bs = g_bs;
2843 
2844 	/* Unload the blob store */
2845 	spdk_bs_unload(bs, bs_op_complete, NULL);
2846 	poll_threads();
2847 	CU_ASSERT(g_bserrno == 0);
2848 	g_bs = NULL;
2849 	g_blob = NULL;
2850 	g_blobid = 0;
2851 
2852 	/* Load non existing blobstore type */
2853 	dev = init_dev();
2854 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2855 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2856 	poll_threads();
2857 	CU_ASSERT(g_bserrno != 0);
2858 
2859 	/* Load with empty blobstore type */
2860 	dev = init_dev();
2861 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2862 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2863 	poll_threads();
2864 	CU_ASSERT(g_bserrno == 0);
2865 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2866 	bs = g_bs;
2867 
2868 	spdk_bs_unload(bs, bs_op_complete, NULL);
2869 	poll_threads();
2870 	CU_ASSERT(g_bserrno == 0);
2871 	g_bs = NULL;
2872 
2873 	/* Initialize a new blob store with empty bstype */
2874 	dev = init_dev();
2875 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2876 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2877 	poll_threads();
2878 	CU_ASSERT(g_bserrno == 0);
2879 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2880 	bs = g_bs;
2881 
2882 	spdk_bs_unload(bs, bs_op_complete, NULL);
2883 	poll_threads();
2884 	CU_ASSERT(g_bserrno == 0);
2885 	g_bs = NULL;
2886 
2887 	/* Load non existing blobstore type */
2888 	dev = init_dev();
2889 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2890 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2891 	poll_threads();
2892 	CU_ASSERT(g_bserrno != 0);
2893 
2894 	/* Load with empty blobstore type */
2895 	dev = init_dev();
2896 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2897 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2898 	poll_threads();
2899 	CU_ASSERT(g_bserrno == 0);
2900 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2901 	bs = g_bs;
2902 
2903 	spdk_bs_unload(bs, bs_op_complete, NULL);
2904 	poll_threads();
2905 	CU_ASSERT(g_bserrno == 0);
2906 	g_bs = NULL;
2907 }
2908 
2909 static void
2910 bs_super_block(void)
2911 {
2912 	struct spdk_blob_store *bs;
2913 	struct spdk_bs_dev *dev;
2914 	struct spdk_bs_super_block *super_block;
2915 	struct spdk_bs_opts opts;
2916 	struct spdk_bs_super_block_ver1 super_block_v1;
2917 
2918 	dev = init_dev();
2919 	spdk_bs_opts_init(&opts, sizeof(opts));
2920 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2921 
2922 	/* Initialize a new blob store */
2923 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2924 	poll_threads();
2925 	CU_ASSERT(g_bserrno == 0);
2926 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2927 	bs = g_bs;
2928 
2929 	/* Unload the blob store */
2930 	spdk_bs_unload(bs, bs_op_complete, NULL);
2931 	poll_threads();
2932 	CU_ASSERT(g_bserrno == 0);
2933 	g_bs = NULL;
2934 	g_blob = NULL;
2935 	g_blobid = 0;
2936 
2937 	/* Load an existing blob store with version newer than supported */
2938 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2939 	super_block->version++;
2940 
2941 	dev = init_dev();
2942 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2943 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2944 	poll_threads();
2945 	CU_ASSERT(g_bserrno != 0);
2946 
2947 	/* Create a new blob store with super block version 1 */
2948 	dev = init_dev();
2949 	super_block_v1.version = 1;
2950 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2951 	super_block_v1.length = 0x1000;
2952 	super_block_v1.clean = 1;
2953 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2954 	super_block_v1.cluster_size = 0x100000;
2955 	super_block_v1.used_page_mask_start = 0x01;
2956 	super_block_v1.used_page_mask_len = 0x01;
2957 	super_block_v1.used_cluster_mask_start = 0x02;
2958 	super_block_v1.used_cluster_mask_len = 0x01;
2959 	super_block_v1.md_start = 0x03;
2960 	super_block_v1.md_len = 0x40;
2961 	memset(super_block_v1.reserved, 0, 4036);
2962 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2963 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2964 
2965 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2966 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2967 	poll_threads();
2968 	CU_ASSERT(g_bserrno == 0);
2969 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2970 	bs = g_bs;
2971 
2972 	spdk_bs_unload(bs, bs_op_complete, NULL);
2973 	poll_threads();
2974 	CU_ASSERT(g_bserrno == 0);
2975 	g_bs = NULL;
2976 }
2977 
2978 static void
2979 bs_test_recover_cluster_count(void)
2980 {
2981 	struct spdk_blob_store *bs;
2982 	struct spdk_bs_dev *dev;
2983 	struct spdk_bs_super_block super_block;
2984 	struct spdk_bs_opts opts;
2985 
2986 	dev = init_dev();
2987 	spdk_bs_opts_init(&opts, sizeof(opts));
2988 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2989 
2990 	super_block.version = 3;
2991 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2992 	super_block.length = 0x1000;
2993 	super_block.clean = 0;
2994 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2995 	super_block.cluster_size = 4096;
2996 	super_block.used_page_mask_start = 0x01;
2997 	super_block.used_page_mask_len = 0x01;
2998 	super_block.used_cluster_mask_start = 0x02;
2999 	super_block.used_cluster_mask_len = 0x01;
3000 	super_block.used_blobid_mask_start = 0x03;
3001 	super_block.used_blobid_mask_len = 0x01;
3002 	super_block.md_start = 0x04;
3003 	super_block.md_len = 0x40;
3004 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
3005 	super_block.size = dev->blockcnt * dev->blocklen;
3006 	super_block.io_unit_size = 0x1000;
3007 	memset(super_block.reserved, 0, 4000);
3008 	super_block.crc = blob_md_page_calc_crc(&super_block);
3009 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
3010 
3011 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
3012 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
3013 	poll_threads();
3014 	CU_ASSERT(g_bserrno == 0);
3015 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3016 	bs = g_bs;
3017 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
3018 			super_block.md_len));
3019 
3020 	spdk_bs_unload(bs, bs_op_complete, NULL);
3021 	poll_threads();
3022 	CU_ASSERT(g_bserrno == 0);
3023 	g_bs = NULL;
3024 }
3025 
3026 static void
3027 bs_grow_live_size(uint64_t new_blockcnt)
3028 {
3029 	struct spdk_blob_store *bs;
3030 	struct spdk_bs_dev *dev;
3031 	struct spdk_bs_super_block super_block;
3032 	struct spdk_bs_opts opts;
3033 	struct spdk_bs_md_mask mask;
3034 	uint64_t bdev_size;
3035 	uint64_t total_data_clusters;
3036 
3037 	/*
3038 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3039 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3040 	 * will write beyond the end of g_dev_buffer.
3041 	 */
3042 	dev = init_dev();
3043 	spdk_bs_opts_init(&opts, sizeof(opts));
3044 	opts.clear_method = BS_CLEAR_WITH_NONE;
3045 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3046 	poll_threads();
3047 	CU_ASSERT(g_bserrno == 0);
3048 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3049 	bs = g_bs;
3050 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == 63);
3051 
3052 	/*
3053 	 * Set the dev size according to the new_blockcnt,
3054 	 * then the blobstore will adjust the metadata according to the new size.
3055 	 */
3056 	dev->blockcnt = new_blockcnt;
3057 	bdev_size = dev->blockcnt * dev->blocklen;
3058 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3059 	poll_threads();
3060 	CU_ASSERT(g_bserrno == 0);
3061 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3062 	/* One cluster of 1MiB size is used for metadata */
3063 	CU_ASSERT(total_data_clusters == (bdev_size / (1 * 1024 * 1024)) - 1);
3064 
3065 	/* Make sure the super block is updated. */
3066 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3067 	CU_ASSERT(super_block.size == bdev_size);
3068 	CU_ASSERT(super_block.clean == 0);
3069 	/* The used_cluster mask is not written out until first spdk_bs_unload. */
3070 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
3071 	       sizeof(struct spdk_bs_md_mask));
3072 	CU_ASSERT(mask.type == 0);
3073 	CU_ASSERT(mask.length == 0);
3074 
3075 	spdk_bs_unload(bs, bs_op_complete, NULL);
3076 	poll_threads();
3077 	CU_ASSERT(g_bserrno == 0);
3078 	g_bs = NULL;
3079 
3080 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3081 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3082 	CU_ASSERT(super_block.size == bdev_size);
3083 	CU_ASSERT(super_block.clean == 1);
3084 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
3085 	       sizeof(struct spdk_bs_md_mask));
3086 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3087 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3088 
3089 	/* Load blobstore and check the cluster counts again. */
3090 	dev = init_dev();
3091 	dev->blockcnt = new_blockcnt;
3092 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3093 	poll_threads();
3094 	CU_ASSERT(g_bserrno == 0);
3095 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3096 	CU_ASSERT(super_block.clean == 1);
3097 	bs = g_bs;
3098 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3099 
3100 	/* Perform grow without change in size, expected pass. */
3101 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3102 	poll_threads();
3103 	CU_ASSERT(g_bserrno == 0);
3104 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3105 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3106 	CU_ASSERT(super_block.size == bdev_size);
3107 	CU_ASSERT(super_block.clean == 1);
3108 
3109 	spdk_bs_unload(bs, bs_op_complete, NULL);
3110 	poll_threads();
3111 	CU_ASSERT(g_bserrno == 0);
3112 	g_bs = NULL;
3113 }
3114 
3115 static void
3116 bs_grow_live(void)
3117 {
3118 	/* No change expected */
3119 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT);
3120 
3121 	/* Size slightly increased, but not enough to increase cluster count */
3122 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT + 1);
3123 
3124 	/* Size doubled, increasing the cluster count */
3125 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT * 2);
3126 }
3127 
3128 static void
3129 bs_grow_live_no_space(void)
3130 {
3131 	struct spdk_blob_store *bs;
3132 	struct spdk_bs_dev *dev;
3133 	struct spdk_bs_super_block super_block;
3134 	struct spdk_bs_opts opts;
3135 	struct spdk_bs_md_mask mask;
3136 	uint64_t bdev_size_init;
3137 	uint64_t total_data_clusters, max_clusters;
3138 
3139 	/*
3140 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3141 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3142 	 * will write beyond the end of g_dev_buffer.
3143 	 */
3144 	dev = init_dev();
3145 	bdev_size_init = dev->blockcnt * dev->blocklen;
3146 	spdk_bs_opts_init(&opts, sizeof(opts));
3147 	opts.clear_method = BS_CLEAR_WITH_NONE;
3148 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3149 	poll_threads();
3150 	CU_ASSERT(g_bserrno == 0);
3151 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3152 	bs = g_bs;
3153 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3154 	CU_ASSERT(total_data_clusters == 63);
3155 
3156 	/*
3157 	 * The default dev size is 64M, here we set the dev size to 32M,
3158 	 * expecting EILSEQ due to super_block validation and no change in blobstore.
3159 	 */
3160 	dev->blockcnt = (32L * 1024L * 1024L) / dev->blocklen;
3161 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3162 	poll_threads();
3163 	/* This error code comes from bs_super_validate() */
3164 	CU_ASSERT(g_bserrno == -EILSEQ);
3165 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3166 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3167 	CU_ASSERT(super_block.size == bdev_size_init);
3168 
3169 	/*
3170 	 * Blobstore in this test has only space for single md_page for used_clusters,
3171 	 * which fits 1 bit per cluster minus the md header.
3172 	 *
3173 	 * Dev size is increased to exceed the reserved space for the used_cluster_mask
3174 	 * in the metadata, expecting ENOSPC and no change in blobstore.
3175 	 */
3176 	max_clusters = (spdk_bs_get_page_size(bs) - sizeof(struct spdk_bs_md_mask)) * 8;
3177 	max_clusters += 1;
3178 	dev->blockcnt = (max_clusters * spdk_bs_get_cluster_size(bs)) / dev->blocklen;
3179 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3180 	poll_threads();
3181 	CU_ASSERT(g_bserrno == -ENOSPC);
3182 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3183 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3184 	CU_ASSERT(super_block.size == bdev_size_init);
3185 
3186 	/*
3187 	 * No change should have occurred for the duration of the test,
3188 	 * unload blobstore and check metadata.
3189 	 */
3190 	spdk_bs_unload(bs, bs_op_complete, NULL);
3191 	poll_threads();
3192 	CU_ASSERT(g_bserrno == 0);
3193 	g_bs = NULL;
3194 
3195 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3196 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3197 	CU_ASSERT(super_block.size == bdev_size_init);
3198 	CU_ASSERT(super_block.clean == 1);
3199 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
3200 	       sizeof(struct spdk_bs_md_mask));
3201 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3202 	CU_ASSERT(mask.length == bdev_size_init / (1 * 1024 * 1024));
3203 
3204 	/* Load blobstore and check the cluster counts again. */
3205 	dev = init_dev();
3206 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3207 	poll_threads();
3208 	CU_ASSERT(g_bserrno == 0);
3209 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3210 	bs = g_bs;
3211 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3212 
3213 	spdk_bs_unload(bs, bs_op_complete, NULL);
3214 	poll_threads();
3215 	CU_ASSERT(g_bserrno == 0);
3216 	g_bs = NULL;
3217 }
3218 
3219 static void
3220 bs_test_grow(void)
3221 {
3222 	struct spdk_blob_store *bs;
3223 	struct spdk_bs_dev *dev;
3224 	struct spdk_bs_super_block super_block;
3225 	struct spdk_bs_opts opts;
3226 	struct spdk_bs_md_mask mask;
3227 	uint64_t bdev_size;
3228 
3229 	dev = init_dev();
3230 	bdev_size = dev->blockcnt * dev->blocklen;
3231 	spdk_bs_opts_init(&opts, sizeof(opts));
3232 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3233 	poll_threads();
3234 	CU_ASSERT(g_bserrno == 0);
3235 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3236 	bs = g_bs;
3237 
3238 	spdk_bs_unload(bs, bs_op_complete, NULL);
3239 	poll_threads();
3240 	CU_ASSERT(g_bserrno == 0);
3241 	g_bs = NULL;
3242 
3243 	/*
3244 	 * To make sure all the metadata are updated to the disk,
3245 	 * we check the g_dev_buffer after spdk_bs_unload.
3246 	 */
3247 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3248 	CU_ASSERT(super_block.size == bdev_size);
3249 
3250 	/*
3251 	 * Make sure the used_cluster mask is correct.
3252 	 */
3253 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
3254 	       sizeof(struct spdk_bs_md_mask));
3255 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3256 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3257 
3258 	/*
3259 	 * The default dev size is 64M, here we set the dev size to 128M,
3260 	 * then the blobstore will adjust the metadata according to the new size.
3261 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
3262 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
3263 	 * the end of g_dev_buffer.
3264 	 */
3265 	dev = init_dev();
3266 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
3267 	bdev_size = dev->blockcnt * dev->blocklen;
3268 	spdk_bs_opts_init(&opts, sizeof(opts));
3269 	opts.clear_method = BS_CLEAR_WITH_NONE;
3270 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
3271 	poll_threads();
3272 	CU_ASSERT(g_bserrno == 0);
3273 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3274 	bs = g_bs;
3275 
3276 	/*
3277 	 * After spdk_bs_grow, all metadata are updated to the disk.
3278 	 * So we can check g_dev_buffer now.
3279 	 */
3280 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3281 	CU_ASSERT(super_block.size == bdev_size);
3282 
3283 	/*
3284 	 * Make sure the used_cluster mask has been updated according to the bdev size
3285 	 */
3286 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
3287 	       sizeof(struct spdk_bs_md_mask));
3288 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3289 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3290 
3291 	spdk_bs_unload(bs, bs_op_complete, NULL);
3292 	poll_threads();
3293 	CU_ASSERT(g_bserrno == 0);
3294 	g_bs = NULL;
3295 }
3296 
3297 /*
3298  * Create a blobstore and then unload it.
3299  */
3300 static void
3301 bs_unload(void)
3302 {
3303 	struct spdk_blob_store *bs = g_bs;
3304 	struct spdk_blob *blob;
3305 
3306 	/* Create a blob and open it. */
3307 	blob = ut_blob_create_and_open(bs, NULL);
3308 
3309 	/* Try to unload blobstore, should fail with open blob */
3310 	g_bserrno = -1;
3311 	spdk_bs_unload(bs, bs_op_complete, NULL);
3312 	poll_threads();
3313 	CU_ASSERT(g_bserrno == -EBUSY);
3314 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3315 
3316 	/* Close the blob, then successfully unload blobstore */
3317 	g_bserrno = -1;
3318 	spdk_blob_close(blob, blob_op_complete, NULL);
3319 	poll_threads();
3320 	CU_ASSERT(g_bserrno == 0);
3321 }
3322 
3323 /*
3324  * Create a blobstore with a cluster size different than the default, and ensure it is
3325  *  persisted.
3326  */
3327 static void
3328 bs_cluster_sz(void)
3329 {
3330 	struct spdk_blob_store *bs;
3331 	struct spdk_bs_dev *dev;
3332 	struct spdk_bs_opts opts;
3333 	uint32_t cluster_sz;
3334 
3335 	/* Set cluster size to zero */
3336 	dev = init_dev();
3337 	spdk_bs_opts_init(&opts, sizeof(opts));
3338 	opts.cluster_sz = 0;
3339 
3340 	/* Initialize a new blob store */
3341 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3342 	poll_threads();
3343 	CU_ASSERT(g_bserrno == -EINVAL);
3344 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3345 
3346 	/*
3347 	 * Set cluster size to blobstore page size,
3348 	 * to work it is required to be at least twice the blobstore page size.
3349 	 */
3350 	dev = init_dev();
3351 	spdk_bs_opts_init(&opts, sizeof(opts));
3352 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
3353 
3354 	/* Initialize a new blob store */
3355 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3356 	poll_threads();
3357 	CU_ASSERT(g_bserrno == -ENOMEM);
3358 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3359 
3360 	/*
3361 	 * Set cluster size to lower than page size,
3362 	 * to work it is required to be at least twice the blobstore page size.
3363 	 */
3364 	dev = init_dev();
3365 	spdk_bs_opts_init(&opts, sizeof(opts));
3366 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
3367 
3368 	/* Initialize a new blob store */
3369 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3370 	poll_threads();
3371 	CU_ASSERT(g_bserrno == -EINVAL);
3372 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3373 
3374 	/* Set cluster size to twice the default */
3375 	dev = init_dev();
3376 	spdk_bs_opts_init(&opts, sizeof(opts));
3377 	opts.cluster_sz *= 2;
3378 	cluster_sz = opts.cluster_sz;
3379 
3380 	/* Initialize a new blob store */
3381 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3382 	poll_threads();
3383 	CU_ASSERT(g_bserrno == 0);
3384 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3385 	bs = g_bs;
3386 
3387 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3388 
3389 	ut_bs_reload(&bs, &opts);
3390 
3391 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3392 
3393 	spdk_bs_unload(bs, bs_op_complete, NULL);
3394 	poll_threads();
3395 	CU_ASSERT(g_bserrno == 0);
3396 	g_bs = NULL;
3397 }
3398 
3399 /*
3400  * Create a blobstore, reload it and ensure total usable cluster count
3401  *  stays the same.
3402  */
3403 static void
3404 bs_usable_clusters(void)
3405 {
3406 	struct spdk_blob_store *bs = g_bs;
3407 	struct spdk_blob *blob;
3408 	uint32_t clusters;
3409 	int i;
3410 
3411 
3412 	clusters = spdk_bs_total_data_cluster_count(bs);
3413 
3414 	ut_bs_reload(&bs, NULL);
3415 
3416 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3417 
3418 	/* Create and resize blobs to make sure that usable cluster count won't change */
3419 	for (i = 0; i < 4; i++) {
3420 		g_bserrno = -1;
3421 		g_blobid = SPDK_BLOBID_INVALID;
3422 		blob = ut_blob_create_and_open(bs, NULL);
3423 
3424 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3425 		poll_threads();
3426 		CU_ASSERT(g_bserrno == 0);
3427 
3428 		g_bserrno = -1;
3429 		spdk_blob_close(blob, blob_op_complete, NULL);
3430 		poll_threads();
3431 		CU_ASSERT(g_bserrno == 0);
3432 
3433 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3434 	}
3435 
3436 	/* Reload the blob store to make sure that nothing changed */
3437 	ut_bs_reload(&bs, NULL);
3438 
3439 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3440 }
3441 
3442 /*
3443  * Test resizing of the metadata blob.  This requires creating enough blobs
3444  *  so that one cluster is not enough to fit the metadata for those blobs.
3445  *  To induce this condition to happen more quickly, we reduce the cluster
3446  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3447  */
3448 static void
3449 bs_resize_md(void)
3450 {
3451 	struct spdk_blob_store *bs;
3452 	const int CLUSTER_PAGE_COUNT = 4;
3453 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3454 	struct spdk_bs_dev *dev;
3455 	struct spdk_bs_opts opts;
3456 	struct spdk_blob *blob;
3457 	struct spdk_blob_opts blob_opts;
3458 	uint32_t cluster_sz;
3459 	spdk_blob_id blobids[NUM_BLOBS];
3460 	int i;
3461 
3462 
3463 	dev = init_dev();
3464 	spdk_bs_opts_init(&opts, sizeof(opts));
3465 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
3466 	cluster_sz = opts.cluster_sz;
3467 
3468 	/* Initialize a new blob store */
3469 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3470 	poll_threads();
3471 	CU_ASSERT(g_bserrno == 0);
3472 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3473 	bs = g_bs;
3474 
3475 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3476 
3477 	ut_spdk_blob_opts_init(&blob_opts);
3478 
3479 	for (i = 0; i < NUM_BLOBS; i++) {
3480 		g_bserrno = -1;
3481 		g_blobid = SPDK_BLOBID_INVALID;
3482 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3483 		poll_threads();
3484 		CU_ASSERT(g_bserrno == 0);
3485 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3486 		blobids[i] = g_blobid;
3487 	}
3488 
3489 	ut_bs_reload(&bs, &opts);
3490 
3491 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3492 
3493 	for (i = 0; i < NUM_BLOBS; i++) {
3494 		g_bserrno = -1;
3495 		g_blob = NULL;
3496 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3497 		poll_threads();
3498 		CU_ASSERT(g_bserrno == 0);
3499 		CU_ASSERT(g_blob !=  NULL);
3500 		blob = g_blob;
3501 		g_bserrno = -1;
3502 		spdk_blob_close(blob, blob_op_complete, NULL);
3503 		poll_threads();
3504 		CU_ASSERT(g_bserrno == 0);
3505 	}
3506 
3507 	spdk_bs_unload(bs, bs_op_complete, NULL);
3508 	poll_threads();
3509 	CU_ASSERT(g_bserrno == 0);
3510 	g_bs = NULL;
3511 }
3512 
3513 static void
3514 bs_destroy(void)
3515 {
3516 	struct spdk_blob_store *bs;
3517 	struct spdk_bs_dev *dev;
3518 
3519 	/* Initialize a new blob store */
3520 	dev = init_dev();
3521 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3522 	poll_threads();
3523 	CU_ASSERT(g_bserrno == 0);
3524 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3525 	bs = g_bs;
3526 
3527 	/* Destroy the blob store */
3528 	g_bserrno = -1;
3529 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3530 	poll_threads();
3531 	CU_ASSERT(g_bserrno == 0);
3532 
3533 	/* Loading an non-existent blob store should fail. */
3534 	g_bs = NULL;
3535 	dev = init_dev();
3536 
3537 	g_bserrno = 0;
3538 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3539 	poll_threads();
3540 	CU_ASSERT(g_bserrno != 0);
3541 }
3542 
3543 /* Try to hit all of the corner cases associated with serializing
3544  * a blob to disk
3545  */
3546 static void
3547 blob_serialize_test(void)
3548 {
3549 	struct spdk_bs_dev *dev;
3550 	struct spdk_bs_opts opts;
3551 	struct spdk_blob_store *bs;
3552 	spdk_blob_id blobid[2];
3553 	struct spdk_blob *blob[2];
3554 	uint64_t i;
3555 	char *value;
3556 	int rc;
3557 
3558 	dev = init_dev();
3559 
3560 	/* Initialize a new blobstore with very small clusters */
3561 	spdk_bs_opts_init(&opts, sizeof(opts));
3562 	opts.cluster_sz = dev->blocklen * 8;
3563 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3564 	poll_threads();
3565 	CU_ASSERT(g_bserrno == 0);
3566 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3567 	bs = g_bs;
3568 
3569 	/* Create and open two blobs */
3570 	for (i = 0; i < 2; i++) {
3571 		blob[i] = ut_blob_create_and_open(bs, NULL);
3572 		blobid[i] = spdk_blob_get_id(blob[i]);
3573 
3574 		/* Set a fairly large xattr on both blobs to eat up
3575 		 * metadata space
3576 		 */
3577 		value = calloc(dev->blocklen - 64, sizeof(char));
3578 		SPDK_CU_ASSERT_FATAL(value != NULL);
3579 		memset(value, i, dev->blocklen / 2);
3580 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3581 		CU_ASSERT(rc == 0);
3582 		free(value);
3583 	}
3584 
3585 	/* Resize the blobs, alternating 1 cluster at a time.
3586 	 * This thwarts run length encoding and will cause spill
3587 	 * over of the extents.
3588 	 */
3589 	for (i = 0; i < 6; i++) {
3590 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3591 		poll_threads();
3592 		CU_ASSERT(g_bserrno == 0);
3593 	}
3594 
3595 	for (i = 0; i < 2; i++) {
3596 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3597 		poll_threads();
3598 		CU_ASSERT(g_bserrno == 0);
3599 	}
3600 
3601 	/* Close the blobs */
3602 	for (i = 0; i < 2; i++) {
3603 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3604 		poll_threads();
3605 		CU_ASSERT(g_bserrno == 0);
3606 	}
3607 
3608 	ut_bs_reload(&bs, &opts);
3609 
3610 	for (i = 0; i < 2; i++) {
3611 		blob[i] = NULL;
3612 
3613 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3614 		poll_threads();
3615 		CU_ASSERT(g_bserrno == 0);
3616 		CU_ASSERT(g_blob != NULL);
3617 		blob[i] = g_blob;
3618 
3619 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3620 
3621 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3622 		poll_threads();
3623 		CU_ASSERT(g_bserrno == 0);
3624 	}
3625 
3626 	spdk_bs_unload(bs, bs_op_complete, NULL);
3627 	poll_threads();
3628 	CU_ASSERT(g_bserrno == 0);
3629 	g_bs = NULL;
3630 }
3631 
3632 static void
3633 blob_crc(void)
3634 {
3635 	struct spdk_blob_store *bs = g_bs;
3636 	struct spdk_blob *blob;
3637 	spdk_blob_id blobid;
3638 	uint32_t page_num;
3639 	int index;
3640 	struct spdk_blob_md_page *page;
3641 
3642 	blob = ut_blob_create_and_open(bs, NULL);
3643 	blobid = spdk_blob_get_id(blob);
3644 
3645 	spdk_blob_close(blob, blob_op_complete, NULL);
3646 	poll_threads();
3647 	CU_ASSERT(g_bserrno == 0);
3648 
3649 	page_num = bs_blobid_to_page(blobid);
3650 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3651 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3652 	page->crc = 0;
3653 
3654 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3655 	poll_threads();
3656 	CU_ASSERT(g_bserrno == -EINVAL);
3657 	CU_ASSERT(g_blob == NULL);
3658 	g_bserrno = 0;
3659 
3660 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3661 	poll_threads();
3662 	CU_ASSERT(g_bserrno == -EINVAL);
3663 }
3664 
3665 static void
3666 super_block_crc(void)
3667 {
3668 	struct spdk_blob_store *bs;
3669 	struct spdk_bs_dev *dev;
3670 	struct spdk_bs_super_block *super_block;
3671 
3672 	dev = init_dev();
3673 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3674 	poll_threads();
3675 	CU_ASSERT(g_bserrno == 0);
3676 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3677 	bs = g_bs;
3678 
3679 	spdk_bs_unload(bs, bs_op_complete, NULL);
3680 	poll_threads();
3681 	CU_ASSERT(g_bserrno == 0);
3682 	g_bs = NULL;
3683 
3684 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3685 	super_block->crc = 0;
3686 	dev = init_dev();
3687 
3688 	/* Load an existing blob store */
3689 	g_bserrno = 0;
3690 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3691 	poll_threads();
3692 	CU_ASSERT(g_bserrno == -EILSEQ);
3693 }
3694 
3695 /* For blob dirty shutdown test case we do the following sub-test cases:
3696  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3697  *   dirty shutdown and reload the blob store and verify the xattrs.
3698  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3699  *   reload the blob store and verify the clusters number.
3700  * 3 Create the second blob and then dirty shutdown, reload the blob store
3701  *   and verify the second blob.
3702  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3703  *   and verify the second blob is invalid.
3704  * 5 Create the second blob again and also create the third blob, modify the
3705  *   md of second blob which makes the md invalid, and then dirty shutdown,
3706  *   reload the blob store verify the second blob, it should invalid and also
3707  *   verify the third blob, it should correct.
3708  */
3709 static void
3710 blob_dirty_shutdown(void)
3711 {
3712 	int rc;
3713 	int index;
3714 	struct spdk_blob_store *bs = g_bs;
3715 	spdk_blob_id blobid1, blobid2, blobid3;
3716 	struct spdk_blob *blob = g_blob;
3717 	uint64_t length;
3718 	uint64_t free_clusters;
3719 	const void *value;
3720 	size_t value_len;
3721 	uint32_t page_num;
3722 	struct spdk_blob_md_page *page;
3723 	struct spdk_blob_opts blob_opts;
3724 
3725 	/* Create first blob */
3726 	blobid1 = spdk_blob_get_id(blob);
3727 
3728 	/* Set some xattrs */
3729 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3730 	CU_ASSERT(rc == 0);
3731 
3732 	length = 2345;
3733 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3734 	CU_ASSERT(rc == 0);
3735 
3736 	/* Put xattr that fits exactly single page.
3737 	 * This results in adding additional pages to MD.
3738 	 * First is flags and smaller xattr, second the large xattr,
3739 	 * third are just the extents.
3740 	 */
3741 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3742 			      strlen("large_xattr");
3743 	char *xattr = calloc(xattr_length, sizeof(char));
3744 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3745 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3746 	free(xattr);
3747 	SPDK_CU_ASSERT_FATAL(rc == 0);
3748 
3749 	/* Resize the blob */
3750 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3751 	poll_threads();
3752 	CU_ASSERT(g_bserrno == 0);
3753 
3754 	/* Set the blob as the super blob */
3755 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3756 	poll_threads();
3757 	CU_ASSERT(g_bserrno == 0);
3758 
3759 	free_clusters = spdk_bs_free_cluster_count(bs);
3760 
3761 	spdk_blob_close(blob, blob_op_complete, NULL);
3762 	poll_threads();
3763 	CU_ASSERT(g_bserrno == 0);
3764 	blob = NULL;
3765 	g_blob = NULL;
3766 	g_blobid = SPDK_BLOBID_INVALID;
3767 
3768 	ut_bs_dirty_load(&bs, NULL);
3769 
3770 	/* Get the super blob */
3771 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3772 	poll_threads();
3773 	CU_ASSERT(g_bserrno == 0);
3774 	CU_ASSERT(blobid1 == g_blobid);
3775 
3776 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3777 	poll_threads();
3778 	CU_ASSERT(g_bserrno == 0);
3779 	CU_ASSERT(g_blob != NULL);
3780 	blob = g_blob;
3781 
3782 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3783 
3784 	/* Get the xattrs */
3785 	value = NULL;
3786 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3787 	CU_ASSERT(rc == 0);
3788 	SPDK_CU_ASSERT_FATAL(value != NULL);
3789 	CU_ASSERT(*(uint64_t *)value == length);
3790 	CU_ASSERT(value_len == 8);
3791 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3792 
3793 	/* Resize the blob */
3794 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3795 	poll_threads();
3796 	CU_ASSERT(g_bserrno == 0);
3797 
3798 	free_clusters = spdk_bs_free_cluster_count(bs);
3799 
3800 	spdk_blob_close(blob, blob_op_complete, NULL);
3801 	poll_threads();
3802 	CU_ASSERT(g_bserrno == 0);
3803 	blob = NULL;
3804 	g_blob = NULL;
3805 	g_blobid = SPDK_BLOBID_INVALID;
3806 
3807 	ut_bs_dirty_load(&bs, NULL);
3808 
3809 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3810 	poll_threads();
3811 	CU_ASSERT(g_bserrno == 0);
3812 	CU_ASSERT(g_blob != NULL);
3813 	blob = g_blob;
3814 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3815 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3816 
3817 	spdk_blob_close(blob, blob_op_complete, NULL);
3818 	poll_threads();
3819 	CU_ASSERT(g_bserrno == 0);
3820 	blob = NULL;
3821 	g_blob = NULL;
3822 	g_blobid = SPDK_BLOBID_INVALID;
3823 
3824 	/* Create second blob */
3825 	blob = ut_blob_create_and_open(bs, NULL);
3826 	blobid2 = spdk_blob_get_id(blob);
3827 
3828 	/* Set some xattrs */
3829 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3830 	CU_ASSERT(rc == 0);
3831 
3832 	length = 5432;
3833 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3834 	CU_ASSERT(rc == 0);
3835 
3836 	/* Resize the blob */
3837 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3838 	poll_threads();
3839 	CU_ASSERT(g_bserrno == 0);
3840 
3841 	free_clusters = spdk_bs_free_cluster_count(bs);
3842 
3843 	spdk_blob_close(blob, blob_op_complete, NULL);
3844 	poll_threads();
3845 	CU_ASSERT(g_bserrno == 0);
3846 	blob = NULL;
3847 	g_blob = NULL;
3848 	g_blobid = SPDK_BLOBID_INVALID;
3849 
3850 	ut_bs_dirty_load(&bs, NULL);
3851 
3852 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3853 	poll_threads();
3854 	CU_ASSERT(g_bserrno == 0);
3855 	CU_ASSERT(g_blob != NULL);
3856 	blob = g_blob;
3857 
3858 	/* Get the xattrs */
3859 	value = NULL;
3860 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3861 	CU_ASSERT(rc == 0);
3862 	SPDK_CU_ASSERT_FATAL(value != NULL);
3863 	CU_ASSERT(*(uint64_t *)value == length);
3864 	CU_ASSERT(value_len == 8);
3865 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3866 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3867 
3868 	ut_blob_close_and_delete(bs, blob);
3869 
3870 	free_clusters = spdk_bs_free_cluster_count(bs);
3871 
3872 	ut_bs_dirty_load(&bs, NULL);
3873 
3874 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3875 	poll_threads();
3876 	CU_ASSERT(g_bserrno != 0);
3877 	CU_ASSERT(g_blob == NULL);
3878 
3879 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3880 	poll_threads();
3881 	CU_ASSERT(g_bserrno == 0);
3882 	CU_ASSERT(g_blob != NULL);
3883 	blob = g_blob;
3884 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3885 	spdk_blob_close(blob, blob_op_complete, NULL);
3886 	poll_threads();
3887 	CU_ASSERT(g_bserrno == 0);
3888 
3889 	ut_bs_reload(&bs, NULL);
3890 
3891 	/* Create second blob */
3892 	ut_spdk_blob_opts_init(&blob_opts);
3893 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3894 	poll_threads();
3895 	CU_ASSERT(g_bserrno == 0);
3896 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3897 	blobid2 = g_blobid;
3898 
3899 	/* Create third blob */
3900 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3901 	poll_threads();
3902 	CU_ASSERT(g_bserrno == 0);
3903 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3904 	blobid3 = g_blobid;
3905 
3906 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3907 	poll_threads();
3908 	CU_ASSERT(g_bserrno == 0);
3909 	CU_ASSERT(g_blob != NULL);
3910 	blob = g_blob;
3911 
3912 	/* Set some xattrs for second blob */
3913 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3914 	CU_ASSERT(rc == 0);
3915 
3916 	length = 5432;
3917 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3918 	CU_ASSERT(rc == 0);
3919 
3920 	spdk_blob_close(blob, blob_op_complete, NULL);
3921 	poll_threads();
3922 	CU_ASSERT(g_bserrno == 0);
3923 	blob = NULL;
3924 	g_blob = NULL;
3925 	g_blobid = SPDK_BLOBID_INVALID;
3926 
3927 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3928 	poll_threads();
3929 	CU_ASSERT(g_bserrno == 0);
3930 	CU_ASSERT(g_blob != NULL);
3931 	blob = g_blob;
3932 
3933 	/* Set some xattrs for third blob */
3934 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3935 	CU_ASSERT(rc == 0);
3936 
3937 	length = 5432;
3938 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3939 	CU_ASSERT(rc == 0);
3940 
3941 	spdk_blob_close(blob, blob_op_complete, NULL);
3942 	poll_threads();
3943 	CU_ASSERT(g_bserrno == 0);
3944 	blob = NULL;
3945 	g_blob = NULL;
3946 	g_blobid = SPDK_BLOBID_INVALID;
3947 
3948 	/* Mark second blob as invalid */
3949 	page_num = bs_blobid_to_page(blobid2);
3950 
3951 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3952 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3953 	page->sequence_num = 1;
3954 	page->crc = blob_md_page_calc_crc(page);
3955 
3956 	free_clusters = spdk_bs_free_cluster_count(bs);
3957 
3958 	ut_bs_dirty_load(&bs, NULL);
3959 
3960 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3961 	poll_threads();
3962 	CU_ASSERT(g_bserrno != 0);
3963 	CU_ASSERT(g_blob == NULL);
3964 
3965 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3966 	poll_threads();
3967 	CU_ASSERT(g_bserrno == 0);
3968 	CU_ASSERT(g_blob != NULL);
3969 	blob = g_blob;
3970 
3971 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3972 }
3973 
3974 static void
3975 blob_flags(void)
3976 {
3977 	struct spdk_blob_store *bs = g_bs;
3978 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3979 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3980 	struct spdk_blob_opts blob_opts;
3981 	int rc;
3982 
3983 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3984 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3985 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3986 
3987 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3988 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3989 
3990 	ut_spdk_blob_opts_init(&blob_opts);
3991 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3992 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3993 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3994 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3995 
3996 	/* Change the size of blob_data_ro to check if flags are serialized
3997 	 * when blob has non zero number of extents */
3998 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3999 	poll_threads();
4000 	CU_ASSERT(g_bserrno == 0);
4001 
4002 	/* Set the xattr to check if flags are serialized
4003 	 * when blob has non zero number of xattrs */
4004 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
4005 	CU_ASSERT(rc == 0);
4006 
4007 	blob_invalid->invalid_flags = (1ULL << 63);
4008 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
4009 	blob_data_ro->data_ro_flags = (1ULL << 62);
4010 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
4011 	blob_md_ro->md_ro_flags = (1ULL << 61);
4012 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
4013 
4014 	g_bserrno = -1;
4015 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
4016 	poll_threads();
4017 	CU_ASSERT(g_bserrno == 0);
4018 	g_bserrno = -1;
4019 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
4020 	poll_threads();
4021 	CU_ASSERT(g_bserrno == 0);
4022 	g_bserrno = -1;
4023 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4024 	poll_threads();
4025 	CU_ASSERT(g_bserrno == 0);
4026 
4027 	g_bserrno = -1;
4028 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
4029 	poll_threads();
4030 	CU_ASSERT(g_bserrno == 0);
4031 	blob_invalid = NULL;
4032 	g_bserrno = -1;
4033 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
4034 	poll_threads();
4035 	CU_ASSERT(g_bserrno == 0);
4036 	blob_data_ro = NULL;
4037 	g_bserrno = -1;
4038 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
4039 	poll_threads();
4040 	CU_ASSERT(g_bserrno == 0);
4041 	blob_md_ro = NULL;
4042 
4043 	g_blob = NULL;
4044 	g_blobid = SPDK_BLOBID_INVALID;
4045 
4046 	ut_bs_reload(&bs, NULL);
4047 
4048 	g_blob = NULL;
4049 	g_bserrno = 0;
4050 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
4051 	poll_threads();
4052 	CU_ASSERT(g_bserrno != 0);
4053 	CU_ASSERT(g_blob == NULL);
4054 
4055 	g_blob = NULL;
4056 	g_bserrno = -1;
4057 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
4058 	poll_threads();
4059 	CU_ASSERT(g_bserrno == 0);
4060 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4061 	blob_data_ro = g_blob;
4062 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
4063 	CU_ASSERT(blob_data_ro->data_ro == true);
4064 	CU_ASSERT(blob_data_ro->md_ro == true);
4065 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
4066 
4067 	g_blob = NULL;
4068 	g_bserrno = -1;
4069 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
4070 	poll_threads();
4071 	CU_ASSERT(g_bserrno == 0);
4072 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4073 	blob_md_ro = g_blob;
4074 	CU_ASSERT(blob_md_ro->data_ro == false);
4075 	CU_ASSERT(blob_md_ro->md_ro == true);
4076 
4077 	g_bserrno = -1;
4078 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4079 	poll_threads();
4080 	CU_ASSERT(g_bserrno == 0);
4081 
4082 	ut_blob_close_and_delete(bs, blob_data_ro);
4083 	ut_blob_close_and_delete(bs, blob_md_ro);
4084 }
4085 
4086 static void
4087 bs_version(void)
4088 {
4089 	struct spdk_bs_super_block *super;
4090 	struct spdk_blob_store *bs = g_bs;
4091 	struct spdk_bs_dev *dev;
4092 	struct spdk_blob *blob;
4093 	struct spdk_blob_opts blob_opts;
4094 	spdk_blob_id blobid;
4095 
4096 	/* Unload the blob store */
4097 	spdk_bs_unload(bs, bs_op_complete, NULL);
4098 	poll_threads();
4099 	CU_ASSERT(g_bserrno == 0);
4100 	g_bs = NULL;
4101 
4102 	/*
4103 	 * Change the bs version on disk.  This will allow us to
4104 	 *  test that the version does not get modified automatically
4105 	 *  when loading and unloading the blobstore.
4106 	 */
4107 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
4108 	CU_ASSERT(super->version == SPDK_BS_VERSION);
4109 	CU_ASSERT(super->clean == 1);
4110 	super->version = 2;
4111 	/*
4112 	 * Version 2 metadata does not have a used blobid mask, so clear
4113 	 *  those fields in the super block and zero the corresponding
4114 	 *  region on "disk".  We will use this to ensure blob IDs are
4115 	 *  correctly reconstructed.
4116 	 */
4117 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
4118 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
4119 	super->used_blobid_mask_start = 0;
4120 	super->used_blobid_mask_len = 0;
4121 	super->crc = blob_md_page_calc_crc(super);
4122 
4123 	/* Load an existing blob store */
4124 	dev = init_dev();
4125 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4126 	poll_threads();
4127 	CU_ASSERT(g_bserrno == 0);
4128 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4129 	CU_ASSERT(super->clean == 1);
4130 	bs = g_bs;
4131 
4132 	/*
4133 	 * Create a blob - just to make sure that when we unload it
4134 	 *  results in writing the super block (since metadata pages
4135 	 *  were allocated.
4136 	 */
4137 	ut_spdk_blob_opts_init(&blob_opts);
4138 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
4139 	poll_threads();
4140 	CU_ASSERT(g_bserrno == 0);
4141 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4142 	blobid = g_blobid;
4143 
4144 	/* Unload the blob store */
4145 	spdk_bs_unload(bs, bs_op_complete, NULL);
4146 	poll_threads();
4147 	CU_ASSERT(g_bserrno == 0);
4148 	g_bs = NULL;
4149 	CU_ASSERT(super->version == 2);
4150 	CU_ASSERT(super->used_blobid_mask_start == 0);
4151 	CU_ASSERT(super->used_blobid_mask_len == 0);
4152 
4153 	dev = init_dev();
4154 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4155 	poll_threads();
4156 	CU_ASSERT(g_bserrno == 0);
4157 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4158 	bs = g_bs;
4159 
4160 	g_blob = NULL;
4161 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4162 	poll_threads();
4163 	CU_ASSERT(g_bserrno == 0);
4164 	CU_ASSERT(g_blob != NULL);
4165 	blob = g_blob;
4166 
4167 	ut_blob_close_and_delete(bs, blob);
4168 
4169 	CU_ASSERT(super->version == 2);
4170 	CU_ASSERT(super->used_blobid_mask_start == 0);
4171 	CU_ASSERT(super->used_blobid_mask_len == 0);
4172 }
4173 
4174 static void
4175 blob_set_xattrs_test(void)
4176 {
4177 	struct spdk_blob_store *bs = g_bs;
4178 	struct spdk_blob *blob;
4179 	struct spdk_blob_opts opts;
4180 	const void *value;
4181 	size_t value_len;
4182 	char *xattr;
4183 	size_t xattr_length;
4184 	int rc;
4185 
4186 	/* Create blob with extra attributes */
4187 	ut_spdk_blob_opts_init(&opts);
4188 
4189 	opts.xattrs.names = g_xattr_names;
4190 	opts.xattrs.get_value = _get_xattr_value;
4191 	opts.xattrs.count = 3;
4192 	opts.xattrs.ctx = &g_ctx;
4193 
4194 	blob = ut_blob_create_and_open(bs, &opts);
4195 
4196 	/* Get the xattrs */
4197 	value = NULL;
4198 
4199 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
4200 	CU_ASSERT(rc == 0);
4201 	SPDK_CU_ASSERT_FATAL(value != NULL);
4202 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
4203 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
4204 
4205 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
4206 	CU_ASSERT(rc == 0);
4207 	SPDK_CU_ASSERT_FATAL(value != NULL);
4208 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
4209 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
4210 
4211 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
4212 	CU_ASSERT(rc == 0);
4213 	SPDK_CU_ASSERT_FATAL(value != NULL);
4214 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
4215 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
4216 
4217 	/* Try to get non existing attribute */
4218 
4219 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
4220 	CU_ASSERT(rc == -ENOENT);
4221 
4222 	/* Try xattr exceeding maximum length of descriptor in single page */
4223 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
4224 		       strlen("large_xattr") + 1;
4225 	xattr = calloc(xattr_length, sizeof(char));
4226 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
4227 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
4228 	free(xattr);
4229 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
4230 
4231 	spdk_blob_close(blob, blob_op_complete, NULL);
4232 	poll_threads();
4233 	CU_ASSERT(g_bserrno == 0);
4234 	blob = NULL;
4235 	g_blob = NULL;
4236 	g_blobid = SPDK_BLOBID_INVALID;
4237 
4238 	/* NULL callback */
4239 	ut_spdk_blob_opts_init(&opts);
4240 	opts.xattrs.names = g_xattr_names;
4241 	opts.xattrs.get_value = NULL;
4242 	opts.xattrs.count = 1;
4243 	opts.xattrs.ctx = &g_ctx;
4244 
4245 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4246 	poll_threads();
4247 	CU_ASSERT(g_bserrno == -EINVAL);
4248 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4249 
4250 	/* NULL values */
4251 	ut_spdk_blob_opts_init(&opts);
4252 	opts.xattrs.names = g_xattr_names;
4253 	opts.xattrs.get_value = _get_xattr_value_null;
4254 	opts.xattrs.count = 1;
4255 	opts.xattrs.ctx = NULL;
4256 
4257 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4258 	poll_threads();
4259 	CU_ASSERT(g_bserrno == -EINVAL);
4260 }
4261 
4262 static void
4263 blob_thin_prov_alloc(void)
4264 {
4265 	struct spdk_blob_store *bs = g_bs;
4266 	struct spdk_blob *blob;
4267 	struct spdk_blob_opts opts;
4268 	spdk_blob_id blobid;
4269 	uint64_t free_clusters;
4270 
4271 	free_clusters = spdk_bs_free_cluster_count(bs);
4272 
4273 	/* Set blob as thin provisioned */
4274 	ut_spdk_blob_opts_init(&opts);
4275 	opts.thin_provision = true;
4276 
4277 	blob = ut_blob_create_and_open(bs, &opts);
4278 	blobid = spdk_blob_get_id(blob);
4279 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4280 
4281 	CU_ASSERT(blob->active.num_clusters == 0);
4282 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
4283 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4284 
4285 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4286 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4287 	poll_threads();
4288 	CU_ASSERT(g_bserrno == 0);
4289 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4290 	CU_ASSERT(blob->active.num_clusters == 5);
4291 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4292 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4293 
4294 	/* Grow it to 1TB - still unallocated */
4295 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
4296 	poll_threads();
4297 	CU_ASSERT(g_bserrno == 0);
4298 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4299 	CU_ASSERT(blob->active.num_clusters == 262144);
4300 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4301 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4302 
4303 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4304 	poll_threads();
4305 	CU_ASSERT(g_bserrno == 0);
4306 	/* Sync must not change anything */
4307 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4308 	CU_ASSERT(blob->active.num_clusters == 262144);
4309 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4310 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4311 	/* Since clusters are not allocated,
4312 	 * number of metadata pages is expected to be minimal.
4313 	 */
4314 	CU_ASSERT(blob->active.num_pages == 1);
4315 
4316 	/* Shrink the blob to 3 clusters - still unallocated */
4317 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
4318 	poll_threads();
4319 	CU_ASSERT(g_bserrno == 0);
4320 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4321 	CU_ASSERT(blob->active.num_clusters == 3);
4322 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4323 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4324 
4325 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4326 	poll_threads();
4327 	CU_ASSERT(g_bserrno == 0);
4328 	/* Sync must not change anything */
4329 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4330 	CU_ASSERT(blob->active.num_clusters == 3);
4331 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4332 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4333 
4334 	spdk_blob_close(blob, blob_op_complete, NULL);
4335 	poll_threads();
4336 	CU_ASSERT(g_bserrno == 0);
4337 
4338 	ut_bs_reload(&bs, NULL);
4339 
4340 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4341 	poll_threads();
4342 	CU_ASSERT(g_bserrno == 0);
4343 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4344 	blob = g_blob;
4345 
4346 	/* Check that clusters allocation and size is still the same */
4347 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4348 	CU_ASSERT(blob->active.num_clusters == 3);
4349 
4350 	ut_blob_close_and_delete(bs, blob);
4351 }
4352 
4353 static void
4354 blob_insert_cluster_msg_test(void)
4355 {
4356 	struct spdk_blob_store *bs = g_bs;
4357 	struct spdk_blob *blob;
4358 	struct spdk_blob_opts opts;
4359 	struct spdk_blob_md_page page = {};
4360 	spdk_blob_id blobid;
4361 	uint64_t free_clusters;
4362 	uint64_t new_cluster = 0;
4363 	uint32_t cluster_num = 3;
4364 	uint32_t extent_page = 0;
4365 
4366 	free_clusters = spdk_bs_free_cluster_count(bs);
4367 
4368 	/* Set blob as thin provisioned */
4369 	ut_spdk_blob_opts_init(&opts);
4370 	opts.thin_provision = true;
4371 	opts.num_clusters = 4;
4372 
4373 	blob = ut_blob_create_and_open(bs, &opts);
4374 	blobid = spdk_blob_get_id(blob);
4375 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4376 
4377 	CU_ASSERT(blob->active.num_clusters == 4);
4378 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4379 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4380 
4381 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4382 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4383 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4384 	spdk_spin_lock(&bs->used_lock);
4385 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4386 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4387 	spdk_spin_unlock(&bs->used_lock);
4388 
4389 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
4390 					 blob_op_complete, NULL);
4391 	poll_threads();
4392 
4393 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4394 
4395 	spdk_blob_close(blob, blob_op_complete, NULL);
4396 	poll_threads();
4397 	CU_ASSERT(g_bserrno == 0);
4398 
4399 	ut_bs_reload(&bs, NULL);
4400 
4401 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4402 	poll_threads();
4403 	CU_ASSERT(g_bserrno == 0);
4404 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4405 	blob = g_blob;
4406 
4407 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4408 
4409 	ut_blob_close_and_delete(bs, blob);
4410 }
4411 
4412 static void
4413 blob_thin_prov_rw(void)
4414 {
4415 	static const uint8_t zero[10 * 4096] = { 0 };
4416 	struct spdk_blob_store *bs = g_bs;
4417 	struct spdk_blob *blob, *blob_id0;
4418 	struct spdk_io_channel *channel, *channel_thread1;
4419 	struct spdk_blob_opts opts;
4420 	uint64_t free_clusters;
4421 	uint64_t page_size;
4422 	uint8_t payload_read[10 * 4096];
4423 	uint8_t payload_write[10 * 4096];
4424 	uint64_t write_bytes;
4425 	uint64_t read_bytes;
4426 
4427 	free_clusters = spdk_bs_free_cluster_count(bs);
4428 	page_size = spdk_bs_get_page_size(bs);
4429 
4430 	channel = spdk_bs_alloc_io_channel(bs);
4431 	CU_ASSERT(channel != NULL);
4432 
4433 	ut_spdk_blob_opts_init(&opts);
4434 	opts.thin_provision = true;
4435 
4436 	/* Create and delete blob at md page 0, so that next md page allocation
4437 	 * for extent will use that. */
4438 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4439 	blob = ut_blob_create_and_open(bs, &opts);
4440 	ut_blob_close_and_delete(bs, blob_id0);
4441 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4442 
4443 	CU_ASSERT(blob->active.num_clusters == 0);
4444 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4445 
4446 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4447 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4448 	poll_threads();
4449 	CU_ASSERT(g_bserrno == 0);
4450 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4451 	CU_ASSERT(blob->active.num_clusters == 5);
4452 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4453 
4454 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4455 	poll_threads();
4456 	CU_ASSERT(g_bserrno == 0);
4457 	/* Sync must not change anything */
4458 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4459 	CU_ASSERT(blob->active.num_clusters == 5);
4460 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4461 
4462 	/* Payload should be all zeros from unallocated clusters */
4463 	memset(payload_read, 0xFF, sizeof(payload_read));
4464 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4465 	poll_threads();
4466 	CU_ASSERT(g_bserrno == 0);
4467 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4468 
4469 	write_bytes = g_dev_write_bytes;
4470 	read_bytes = g_dev_read_bytes;
4471 
4472 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4473 	set_thread(1);
4474 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4475 	CU_ASSERT(channel_thread1 != NULL);
4476 	memset(payload_write, 0xE5, sizeof(payload_write));
4477 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4478 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4479 	/* Perform write on thread 0. That will try to allocate cluster,
4480 	 * but fail due to another thread issuing the cluster allocation first. */
4481 	set_thread(0);
4482 	memset(payload_write, 0xE5, sizeof(payload_write));
4483 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4484 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4485 	poll_threads();
4486 	CU_ASSERT(g_bserrno == 0);
4487 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4488 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
4489 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
4490 	 * read 0 bytes */
4491 	if (g_use_extent_table) {
4492 		/* Add one more page for EXTENT_PAGE write */
4493 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
4494 	} else {
4495 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
4496 	}
4497 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4498 
4499 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4500 	poll_threads();
4501 	CU_ASSERT(g_bserrno == 0);
4502 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4503 
4504 	ut_blob_close_and_delete(bs, blob);
4505 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4506 
4507 	set_thread(1);
4508 	spdk_bs_free_io_channel(channel_thread1);
4509 	set_thread(0);
4510 	spdk_bs_free_io_channel(channel);
4511 	poll_threads();
4512 	g_blob = NULL;
4513 	g_blobid = 0;
4514 }
4515 
4516 static void
4517 blob_thin_prov_write_count_io(void)
4518 {
4519 	struct spdk_blob_store *bs;
4520 	struct spdk_blob *blob;
4521 	struct spdk_io_channel *ch;
4522 	struct spdk_bs_dev *dev;
4523 	struct spdk_bs_opts bs_opts;
4524 	struct spdk_blob_opts opts;
4525 	uint64_t free_clusters;
4526 	uint64_t page_size;
4527 	uint8_t payload_write[4096];
4528 	uint64_t write_bytes;
4529 	uint64_t read_bytes;
4530 	const uint32_t CLUSTER_SZ = 16384;
4531 	uint32_t pages_per_cluster;
4532 	uint32_t pages_per_extent_page;
4533 	uint32_t i;
4534 
4535 	/* Use a very small cluster size for this test.  This ensures we need multiple
4536 	 * extent pages to hold all of the clusters even for relatively small blobs like
4537 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4538 	 * buffers).
4539 	 */
4540 	dev = init_dev();
4541 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4542 	bs_opts.cluster_sz = CLUSTER_SZ;
4543 
4544 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4545 	poll_threads();
4546 	CU_ASSERT(g_bserrno == 0);
4547 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4548 	bs = g_bs;
4549 
4550 	free_clusters = spdk_bs_free_cluster_count(bs);
4551 	page_size = spdk_bs_get_page_size(bs);
4552 	pages_per_cluster = CLUSTER_SZ / page_size;
4553 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4554 
4555 	ch = spdk_bs_alloc_io_channel(bs);
4556 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4557 
4558 	ut_spdk_blob_opts_init(&opts);
4559 	opts.thin_provision = true;
4560 
4561 	blob = ut_blob_create_and_open(bs, &opts);
4562 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4563 
4564 	/* Resize the blob so that it will require 8 extent pages to hold all of
4565 	 * the clusters.
4566 	 */
4567 	g_bserrno = -1;
4568 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4569 	poll_threads();
4570 	CU_ASSERT(g_bserrno == 0);
4571 
4572 	g_bserrno = -1;
4573 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4574 	poll_threads();
4575 	CU_ASSERT(g_bserrno == 0);
4576 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4577 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4578 
4579 	memset(payload_write, 0, sizeof(payload_write));
4580 	for (i = 0; i < 8; i++) {
4581 		write_bytes = g_dev_write_bytes;
4582 		read_bytes = g_dev_read_bytes;
4583 
4584 		g_bserrno = -1;
4585 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4586 		poll_threads();
4587 		CU_ASSERT(g_bserrno == 0);
4588 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4589 
4590 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4591 		if (!g_use_extent_table) {
4592 			/* For legacy metadata, we should have written two pages - one for the
4593 			 * write I/O itself, another for the blob's primary metadata.
4594 			 */
4595 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4596 		} else {
4597 			/* For extent table metadata, we should have written three pages - one
4598 			 * for the write I/O, one for the extent page, one for the blob's primary
4599 			 * metadata.
4600 			 */
4601 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4602 		}
4603 
4604 		/* The write should have synced the metadata already.  Do another sync here
4605 		 * just to confirm.
4606 		 */
4607 		write_bytes = g_dev_write_bytes;
4608 		read_bytes = g_dev_read_bytes;
4609 
4610 		g_bserrno = -1;
4611 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4612 		poll_threads();
4613 		CU_ASSERT(g_bserrno == 0);
4614 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4615 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 1);
4616 
4617 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4618 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4619 
4620 		/* Now write to another unallocated cluster that is part of the same extent page. */
4621 		g_bserrno = -1;
4622 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4623 				   1, blob_op_complete, NULL);
4624 		poll_threads();
4625 		CU_ASSERT(g_bserrno == 0);
4626 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4627 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 2);
4628 
4629 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4630 		/*
4631 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4632 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4633 		 */
4634 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4635 
4636 		/* Send unmap aligned to the whole cluster - should free it up */
4637 		g_bserrno = -1;
4638 		spdk_blob_io_unmap(blob, ch, pages_per_extent_page * i, pages_per_cluster, blob_op_complete, NULL);
4639 		poll_threads();
4640 		CU_ASSERT(g_bserrno == 0);
4641 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4642 
4643 		/* Write back to the freed cluster */
4644 		g_bserrno = -1;
4645 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4646 		poll_threads();
4647 		CU_ASSERT(g_bserrno == 0);
4648 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4649 	}
4650 
4651 	ut_blob_close_and_delete(bs, blob);
4652 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4653 
4654 	spdk_bs_free_io_channel(ch);
4655 	poll_threads();
4656 	g_blob = NULL;
4657 	g_blobid = 0;
4658 
4659 	spdk_bs_unload(bs, bs_op_complete, NULL);
4660 	poll_threads();
4661 	CU_ASSERT(g_bserrno == 0);
4662 	g_bs = NULL;
4663 }
4664 
4665 static void
4666 blob_thin_prov_unmap_cluster(void)
4667 {
4668 	struct spdk_blob_store *bs;
4669 	struct spdk_blob *blob, *snapshot;
4670 	struct spdk_io_channel *ch;
4671 	struct spdk_bs_dev *dev;
4672 	struct spdk_bs_opts bs_opts;
4673 	struct spdk_blob_opts opts;
4674 	uint64_t free_clusters;
4675 	uint64_t page_size;
4676 	uint8_t payload_write[4096];
4677 	uint8_t payload_read[4096];
4678 	const uint32_t CLUSTER_COUNT = 3;
4679 	uint32_t pages_per_cluster;
4680 	spdk_blob_id blobid, snapshotid;
4681 	uint32_t i;
4682 	int err;
4683 
4684 	/* Use a very large cluster size for this test. Check how the unmap/release cluster code path behaves when
4685 	 * clusters are fully used.
4686 	 */
4687 	dev = init_dev();
4688 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4689 	bs_opts.cluster_sz = dev->blocklen * dev->blockcnt / (CLUSTER_COUNT + 1);
4690 
4691 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4692 	poll_threads();
4693 	CU_ASSERT(g_bserrno == 0);
4694 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4695 	bs = g_bs;
4696 
4697 	free_clusters = spdk_bs_free_cluster_count(bs);
4698 	page_size = spdk_bs_get_page_size(bs);
4699 	pages_per_cluster = bs_opts.cluster_sz / page_size;
4700 
4701 	ch = spdk_bs_alloc_io_channel(bs);
4702 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4703 
4704 	ut_spdk_blob_opts_init(&opts);
4705 	opts.thin_provision = true;
4706 
4707 	blob = ut_blob_create_and_open(bs, &opts);
4708 	CU_ASSERT(free_clusters == CLUSTER_COUNT);
4709 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4710 	blobid = spdk_blob_get_id(blob);
4711 
4712 	g_bserrno = -1;
4713 	spdk_blob_resize(blob, CLUSTER_COUNT, blob_op_complete, NULL);
4714 	poll_threads();
4715 	CU_ASSERT(g_bserrno == 0);
4716 
4717 	g_bserrno = -1;
4718 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4719 	poll_threads();
4720 	CU_ASSERT(g_bserrno == 0);
4721 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4722 	CU_ASSERT(blob->active.num_clusters == CLUSTER_COUNT);
4723 
4724 	/* Fill all clusters */
4725 	for (i = 0; i < CLUSTER_COUNT; i++) {
4726 		memset(payload_write, i + 1, sizeof(payload_write));
4727 		g_bserrno = -1;
4728 		spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster * i, 1, blob_op_complete, NULL);
4729 		poll_threads();
4730 		CU_ASSERT(g_bserrno == 0);
4731 		CU_ASSERT(free_clusters - (i + 1) == spdk_bs_free_cluster_count(bs));
4732 	}
4733 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4734 
4735 	/* Unmap one whole cluster */
4736 	g_bserrno = -1;
4737 	spdk_blob_io_unmap(blob, ch, pages_per_cluster, pages_per_cluster, blob_op_complete, NULL);
4738 	poll_threads();
4739 	CU_ASSERT(g_bserrno == 0);
4740 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4741 
4742 	/* Verify the data read from the cluster is zeroed out */
4743 	memset(payload_write, 0, sizeof(payload_write));
4744 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4745 	poll_threads();
4746 	CU_ASSERT(g_bserrno == 0);
4747 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4748 
4749 	/* Fill the same cluster with data */
4750 	memset(payload_write, 3, sizeof(payload_write));
4751 	g_bserrno = -1;
4752 	spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster, 1, blob_op_complete, NULL);
4753 	poll_threads();
4754 	CU_ASSERT(g_bserrno == 0);
4755 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4756 
4757 	/* Verify the data read from the cluster has the expected data */
4758 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4759 	poll_threads();
4760 	CU_ASSERT(g_bserrno == 0);
4761 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4762 
4763 	/* Send an unaligned unmap that ecompasses one whole cluster */
4764 	g_bserrno = -1;
4765 	spdk_blob_io_unmap(blob, ch, pages_per_cluster - 1, pages_per_cluster + 2, blob_op_complete, NULL);
4766 	poll_threads();
4767 	CU_ASSERT(g_bserrno == 0);
4768 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4769 
4770 	/* Verify the data read from the cluster is zeroed out */
4771 	g_bserrno = -1;
4772 	memset(payload_write, 0, sizeof(payload_write));
4773 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4774 	poll_threads();
4775 	CU_ASSERT(g_bserrno == 0);
4776 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4777 
4778 	/* Send a simultaneous unmap with a write to an unallocated area -
4779 	 * check that writes don't claim the currently unmapped cluster */
4780 	g_bserrno = -1;
4781 	memset(payload_write, 7, sizeof(payload_write));
4782 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4783 	spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster, 1, blob_op_complete, NULL);
4784 	poll_threads();
4785 	CU_ASSERT(g_bserrno == 0);
4786 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4787 
4788 	/* Verify the contents of written sector */
4789 	g_bserrno = -1;
4790 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4791 	poll_threads();
4792 	CU_ASSERT(g_bserrno == 0);
4793 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4794 
4795 	/* Verify the contents of unmapped sector */
4796 	g_bserrno = -1;
4797 	memset(payload_write, 0, sizeof(payload_write));
4798 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4799 	poll_threads();
4800 	CU_ASSERT(g_bserrno == 0);
4801 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4802 
4803 	/* Make sure clusters are not freed until the unmap to the drive is done */
4804 	g_bserrno = -1;
4805 	memset(payload_write, 7, sizeof(payload_write));
4806 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4807 	poll_threads();
4808 	CU_ASSERT(g_bserrno == 0);
4809 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4810 
4811 	g_bserrno = -1;
4812 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4813 	while (memcmp(payload_write, &g_dev_buffer[4096 * pages_per_cluster], 4096) == 0) {
4814 		CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4815 		poll_thread_times(0, 1);
4816 	}
4817 	poll_threads();
4818 	CU_ASSERT(g_bserrno == 0);
4819 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4820 
4821 	/* Issue #3358 had a bug with concurrent trims to the same cluster causing an assert, check for regressions.
4822 	 * Send three concurrent unmaps to the same cluster.
4823 	 */
4824 	g_bserrno = -1;
4825 	memset(payload_write, 7, sizeof(payload_write));
4826 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4827 	poll_threads();
4828 	CU_ASSERT(g_bserrno == 0);
4829 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4830 
4831 	g_bserrno = -1;
4832 	err = -1;
4833 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4834 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4835 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, &err);
4836 	poll_threads();
4837 	CU_ASSERT(g_bserrno == 0);
4838 	CU_ASSERT(err == 0);
4839 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4840 
4841 	/* Test thin-provisioned blob that is backed */
4842 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
4843 	poll_threads();
4844 	CU_ASSERT(g_bserrno == 0);
4845 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4846 	poll_threads();
4847 	CU_ASSERT(g_bserrno == 0);
4848 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4849 
4850 	g_bserrno = -1;
4851 	memset(payload_write, 1, sizeof(payload_write));
4852 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4853 	poll_threads();
4854 	CU_ASSERT(g_bserrno == 0);
4855 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4856 
4857 	/* Create a snapshot */
4858 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
4859 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4860 	poll_threads();
4861 	CU_ASSERT(g_bserrno == 0);
4862 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4863 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
4864 	snapshotid = g_blobid;
4865 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4866 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4867 	poll_threads();
4868 	CU_ASSERT(g_bserrno == 0);
4869 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4870 	snapshot = g_blob;
4871 
4872 	/* Write data to blob, it will alloc new cluster */
4873 	g_bserrno = -1;
4874 	memset(payload_write, 2, sizeof(payload_write));
4875 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4876 	poll_threads();
4877 	CU_ASSERT(g_bserrno == 0);
4878 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4879 
4880 	/* Unmap one whole cluster, but do not release this cluster */
4881 	g_bserrno = -1;
4882 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4883 	poll_threads();
4884 	CU_ASSERT(g_bserrno == 0);
4885 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4886 
4887 	/* Verify the data read from the cluster is zeroed out */
4888 	g_bserrno = -1;
4889 	memset(payload_write, 0, sizeof(payload_write));
4890 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4891 	poll_threads();
4892 	CU_ASSERT(g_bserrno == 0);
4893 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4894 
4895 	ut_blob_close_and_delete(bs, blob);
4896 	ut_blob_close_and_delete(bs, snapshot);
4897 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4898 
4899 	spdk_bs_free_io_channel(ch);
4900 	poll_threads();
4901 	g_blob = NULL;
4902 	g_blobid = 0;
4903 
4904 	spdk_bs_unload(bs, bs_op_complete, NULL);
4905 	poll_threads();
4906 	CU_ASSERT(g_bserrno == 0);
4907 	g_bs = NULL;
4908 }
4909 
4910 static void
4911 blob_thin_prov_rle(void)
4912 {
4913 	static const uint8_t zero[10 * 4096] = { 0 };
4914 	struct spdk_blob_store *bs = g_bs;
4915 	struct spdk_blob *blob;
4916 	struct spdk_io_channel *channel;
4917 	struct spdk_blob_opts opts;
4918 	spdk_blob_id blobid;
4919 	uint64_t free_clusters;
4920 	uint64_t page_size;
4921 	uint8_t payload_read[10 * 4096];
4922 	uint8_t payload_write[10 * 4096];
4923 	uint64_t write_bytes;
4924 	uint64_t read_bytes;
4925 	uint64_t io_unit;
4926 
4927 	free_clusters = spdk_bs_free_cluster_count(bs);
4928 	page_size = spdk_bs_get_page_size(bs);
4929 
4930 	ut_spdk_blob_opts_init(&opts);
4931 	opts.thin_provision = true;
4932 	opts.num_clusters = 5;
4933 
4934 	blob = ut_blob_create_and_open(bs, &opts);
4935 	blobid = spdk_blob_get_id(blob);
4936 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4937 
4938 	channel = spdk_bs_alloc_io_channel(bs);
4939 	CU_ASSERT(channel != NULL);
4940 
4941 	/* Target specifically second cluster in a blob as first allocation */
4942 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4943 
4944 	/* Payload should be all zeros from unallocated clusters */
4945 	memset(payload_read, 0xFF, sizeof(payload_read));
4946 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4947 	poll_threads();
4948 	CU_ASSERT(g_bserrno == 0);
4949 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4950 
4951 	write_bytes = g_dev_write_bytes;
4952 	read_bytes = g_dev_read_bytes;
4953 
4954 	/* Issue write to second cluster in a blob */
4955 	memset(payload_write, 0xE5, sizeof(payload_write));
4956 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4957 	poll_threads();
4958 	CU_ASSERT(g_bserrno == 0);
4959 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4960 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4961 	 * read 0 bytes */
4962 	if (g_use_extent_table) {
4963 		/* Add one more page for EXTENT_PAGE write */
4964 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4965 	} else {
4966 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4967 	}
4968 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4969 
4970 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4971 	poll_threads();
4972 	CU_ASSERT(g_bserrno == 0);
4973 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4974 
4975 	spdk_bs_free_io_channel(channel);
4976 	poll_threads();
4977 
4978 	spdk_blob_close(blob, blob_op_complete, NULL);
4979 	poll_threads();
4980 	CU_ASSERT(g_bserrno == 0);
4981 
4982 	ut_bs_reload(&bs, NULL);
4983 
4984 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4985 	poll_threads();
4986 	CU_ASSERT(g_bserrno == 0);
4987 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4988 	blob = g_blob;
4989 
4990 	channel = spdk_bs_alloc_io_channel(bs);
4991 	CU_ASSERT(channel != NULL);
4992 
4993 	/* Read second cluster after blob reload to confirm data written */
4994 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4995 	poll_threads();
4996 	CU_ASSERT(g_bserrno == 0);
4997 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4998 
4999 	spdk_bs_free_io_channel(channel);
5000 	poll_threads();
5001 
5002 	ut_blob_close_and_delete(bs, blob);
5003 }
5004 
5005 static void
5006 blob_thin_prov_rw_iov(void)
5007 {
5008 	static const uint8_t zero[10 * 4096] = { 0 };
5009 	struct spdk_blob_store *bs = g_bs;
5010 	struct spdk_blob *blob;
5011 	struct spdk_io_channel *channel;
5012 	struct spdk_blob_opts opts;
5013 	uint64_t free_clusters;
5014 	uint8_t payload_read[10 * 4096];
5015 	uint8_t payload_write[10 * 4096];
5016 	struct iovec iov_read[3];
5017 	struct iovec iov_write[3];
5018 
5019 	free_clusters = spdk_bs_free_cluster_count(bs);
5020 
5021 	channel = spdk_bs_alloc_io_channel(bs);
5022 	CU_ASSERT(channel != NULL);
5023 
5024 	ut_spdk_blob_opts_init(&opts);
5025 	opts.thin_provision = true;
5026 
5027 	blob = ut_blob_create_and_open(bs, &opts);
5028 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5029 
5030 	CU_ASSERT(blob->active.num_clusters == 0);
5031 
5032 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
5033 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
5034 	poll_threads();
5035 	CU_ASSERT(g_bserrno == 0);
5036 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5037 	CU_ASSERT(blob->active.num_clusters == 5);
5038 
5039 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5040 	poll_threads();
5041 	CU_ASSERT(g_bserrno == 0);
5042 	/* Sync must not change anything */
5043 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5044 	CU_ASSERT(blob->active.num_clusters == 5);
5045 
5046 	/* Payload should be all zeros from unallocated clusters */
5047 	memset(payload_read, 0xAA, sizeof(payload_read));
5048 	iov_read[0].iov_base = payload_read;
5049 	iov_read[0].iov_len = 3 * 4096;
5050 	iov_read[1].iov_base = payload_read + 3 * 4096;
5051 	iov_read[1].iov_len = 4 * 4096;
5052 	iov_read[2].iov_base = payload_read + 7 * 4096;
5053 	iov_read[2].iov_len = 3 * 4096;
5054 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5055 	poll_threads();
5056 	CU_ASSERT(g_bserrno == 0);
5057 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
5058 
5059 	memset(payload_write, 0xE5, sizeof(payload_write));
5060 	iov_write[0].iov_base = payload_write;
5061 	iov_write[0].iov_len = 1 * 4096;
5062 	iov_write[1].iov_base = payload_write + 1 * 4096;
5063 	iov_write[1].iov_len = 5 * 4096;
5064 	iov_write[2].iov_base = payload_write + 6 * 4096;
5065 	iov_write[2].iov_len = 4 * 4096;
5066 
5067 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5068 	poll_threads();
5069 	CU_ASSERT(g_bserrno == 0);
5070 
5071 	memset(payload_read, 0xAA, sizeof(payload_read));
5072 	iov_read[0].iov_base = payload_read;
5073 	iov_read[0].iov_len = 3 * 4096;
5074 	iov_read[1].iov_base = payload_read + 3 * 4096;
5075 	iov_read[1].iov_len = 4 * 4096;
5076 	iov_read[2].iov_base = payload_read + 7 * 4096;
5077 	iov_read[2].iov_len = 3 * 4096;
5078 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5079 	poll_threads();
5080 	CU_ASSERT(g_bserrno == 0);
5081 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
5082 
5083 	spdk_bs_free_io_channel(channel);
5084 	poll_threads();
5085 
5086 	ut_blob_close_and_delete(bs, blob);
5087 }
5088 
5089 struct iter_ctx {
5090 	int		current_iter;
5091 	spdk_blob_id	blobid[4];
5092 };
5093 
5094 static void
5095 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
5096 {
5097 	struct iter_ctx *iter_ctx = arg;
5098 	spdk_blob_id blobid;
5099 
5100 	CU_ASSERT(bserrno == 0);
5101 	blobid = spdk_blob_get_id(blob);
5102 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
5103 }
5104 
5105 static void
5106 bs_load_iter_test(void)
5107 {
5108 	struct spdk_blob_store *bs;
5109 	struct spdk_bs_dev *dev;
5110 	struct iter_ctx iter_ctx = { 0 };
5111 	struct spdk_blob *blob;
5112 	int i, rc;
5113 	struct spdk_bs_opts opts;
5114 
5115 	dev = init_dev();
5116 	spdk_bs_opts_init(&opts, sizeof(opts));
5117 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5118 
5119 	/* Initialize a new blob store */
5120 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
5121 	poll_threads();
5122 	CU_ASSERT(g_bserrno == 0);
5123 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5124 	bs = g_bs;
5125 
5126 	for (i = 0; i < 4; i++) {
5127 		blob = ut_blob_create_and_open(bs, NULL);
5128 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
5129 
5130 		/* Just save the blobid as an xattr for testing purposes. */
5131 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
5132 		CU_ASSERT(rc == 0);
5133 
5134 		/* Resize the blob */
5135 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
5136 		poll_threads();
5137 		CU_ASSERT(g_bserrno == 0);
5138 
5139 		spdk_blob_close(blob, blob_op_complete, NULL);
5140 		poll_threads();
5141 		CU_ASSERT(g_bserrno == 0);
5142 	}
5143 
5144 	g_bserrno = -1;
5145 	spdk_bs_unload(bs, bs_op_complete, NULL);
5146 	poll_threads();
5147 	CU_ASSERT(g_bserrno == 0);
5148 
5149 	dev = init_dev();
5150 	spdk_bs_opts_init(&opts, sizeof(opts));
5151 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5152 	opts.iter_cb_fn = test_iter;
5153 	opts.iter_cb_arg = &iter_ctx;
5154 
5155 	/* Test blob iteration during load after a clean shutdown. */
5156 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5157 	poll_threads();
5158 	CU_ASSERT(g_bserrno == 0);
5159 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5160 	bs = g_bs;
5161 
5162 	/* Dirty shutdown */
5163 	bs_free(bs);
5164 
5165 	dev = init_dev();
5166 	spdk_bs_opts_init(&opts, sizeof(opts));
5167 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5168 	opts.iter_cb_fn = test_iter;
5169 	iter_ctx.current_iter = 0;
5170 	opts.iter_cb_arg = &iter_ctx;
5171 
5172 	/* Test blob iteration during load after a dirty shutdown. */
5173 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5174 	poll_threads();
5175 	CU_ASSERT(g_bserrno == 0);
5176 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5177 	bs = g_bs;
5178 
5179 	spdk_bs_unload(bs, bs_op_complete, NULL);
5180 	poll_threads();
5181 	CU_ASSERT(g_bserrno == 0);
5182 	g_bs = NULL;
5183 }
5184 
5185 static void
5186 blob_snapshot_rw(void)
5187 {
5188 	static const uint8_t zero[10 * 4096] = { 0 };
5189 	struct spdk_blob_store *bs = g_bs;
5190 	struct spdk_blob *blob, *snapshot;
5191 	struct spdk_io_channel *channel;
5192 	struct spdk_blob_opts opts;
5193 	spdk_blob_id blobid, snapshotid;
5194 	uint64_t free_clusters;
5195 	uint64_t cluster_size;
5196 	uint64_t page_size;
5197 	uint8_t payload_read[10 * 4096];
5198 	uint8_t payload_write[10 * 4096];
5199 	uint64_t write_bytes_start;
5200 	uint64_t read_bytes_start;
5201 	uint64_t copy_bytes_start;
5202 	uint64_t write_bytes;
5203 	uint64_t read_bytes;
5204 	uint64_t copy_bytes;
5205 
5206 	free_clusters = spdk_bs_free_cluster_count(bs);
5207 	cluster_size = spdk_bs_get_cluster_size(bs);
5208 	page_size = spdk_bs_get_page_size(bs);
5209 
5210 	channel = spdk_bs_alloc_io_channel(bs);
5211 	CU_ASSERT(channel != NULL);
5212 
5213 	ut_spdk_blob_opts_init(&opts);
5214 	opts.thin_provision = true;
5215 	opts.num_clusters = 5;
5216 
5217 	blob = ut_blob_create_and_open(bs, &opts);
5218 	blobid = spdk_blob_get_id(blob);
5219 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5220 
5221 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5222 
5223 	memset(payload_read, 0xFF, sizeof(payload_read));
5224 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5225 	poll_threads();
5226 	CU_ASSERT(g_bserrno == 0);
5227 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
5228 
5229 	memset(payload_write, 0xE5, sizeof(payload_write));
5230 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5231 	poll_threads();
5232 	CU_ASSERT(g_bserrno == 0);
5233 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5234 
5235 	/* Create snapshot from blob */
5236 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5237 	poll_threads();
5238 	CU_ASSERT(g_bserrno == 0);
5239 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5240 	snapshotid = g_blobid;
5241 
5242 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5243 	poll_threads();
5244 	CU_ASSERT(g_bserrno == 0);
5245 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5246 	snapshot = g_blob;
5247 	CU_ASSERT(snapshot->data_ro == true);
5248 	CU_ASSERT(snapshot->md_ro == true);
5249 
5250 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5251 
5252 	write_bytes_start = g_dev_write_bytes;
5253 	read_bytes_start = g_dev_read_bytes;
5254 	copy_bytes_start = g_dev_copy_bytes;
5255 
5256 	memset(payload_write, 0xAA, sizeof(payload_write));
5257 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5258 	poll_threads();
5259 	CU_ASSERT(g_bserrno == 0);
5260 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5261 
5262 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
5263 	 * and then write 10 pages of payload.
5264 	 */
5265 	write_bytes = g_dev_write_bytes - write_bytes_start;
5266 	read_bytes = g_dev_read_bytes - read_bytes_start;
5267 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
5268 	if (g_dev_copy_enabled) {
5269 		CU_ASSERT(copy_bytes == cluster_size);
5270 	} else {
5271 		CU_ASSERT(copy_bytes == 0);
5272 	}
5273 	if (g_use_extent_table) {
5274 		/* Add one more page for EXTENT_PAGE write */
5275 		CU_ASSERT(write_bytes + copy_bytes == page_size * 12 + cluster_size);
5276 	} else {
5277 		CU_ASSERT(write_bytes + copy_bytes == page_size * 11 + cluster_size);
5278 	}
5279 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
5280 
5281 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5282 	poll_threads();
5283 	CU_ASSERT(g_bserrno == 0);
5284 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
5285 
5286 	/* Data on snapshot should not change after write to clone */
5287 	memset(payload_write, 0xE5, sizeof(payload_write));
5288 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
5289 	poll_threads();
5290 	CU_ASSERT(g_bserrno == 0);
5291 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
5292 
5293 	ut_blob_close_and_delete(bs, blob);
5294 	ut_blob_close_and_delete(bs, snapshot);
5295 
5296 	spdk_bs_free_io_channel(channel);
5297 	poll_threads();
5298 	g_blob = NULL;
5299 	g_blobid = 0;
5300 }
5301 
5302 static void
5303 blob_snapshot_rw_iov(void)
5304 {
5305 	static const uint8_t zero[10 * 4096] = { 0 };
5306 	struct spdk_blob_store *bs = g_bs;
5307 	struct spdk_blob *blob, *snapshot;
5308 	struct spdk_io_channel *channel;
5309 	struct spdk_blob_opts opts;
5310 	spdk_blob_id blobid, snapshotid;
5311 	uint64_t free_clusters;
5312 	uint8_t payload_read[10 * 4096];
5313 	uint8_t payload_write[10 * 4096];
5314 	struct iovec iov_read[3];
5315 	struct iovec iov_write[3];
5316 
5317 	free_clusters = spdk_bs_free_cluster_count(bs);
5318 
5319 	channel = spdk_bs_alloc_io_channel(bs);
5320 	CU_ASSERT(channel != NULL);
5321 
5322 	ut_spdk_blob_opts_init(&opts);
5323 	opts.thin_provision = true;
5324 	opts.num_clusters = 5;
5325 
5326 	blob = ut_blob_create_and_open(bs, &opts);
5327 	blobid = spdk_blob_get_id(blob);
5328 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5329 
5330 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5331 
5332 	/* Create snapshot from blob */
5333 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5334 	poll_threads();
5335 	CU_ASSERT(g_bserrno == 0);
5336 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5337 	snapshotid = g_blobid;
5338 
5339 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5340 	poll_threads();
5341 	CU_ASSERT(g_bserrno == 0);
5342 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5343 	snapshot = g_blob;
5344 	CU_ASSERT(snapshot->data_ro == true);
5345 	CU_ASSERT(snapshot->md_ro == true);
5346 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5347 
5348 	/* Payload should be all zeros from unallocated clusters */
5349 	memset(payload_read, 0xAA, sizeof(payload_read));
5350 	iov_read[0].iov_base = payload_read;
5351 	iov_read[0].iov_len = 3 * 4096;
5352 	iov_read[1].iov_base = payload_read + 3 * 4096;
5353 	iov_read[1].iov_len = 4 * 4096;
5354 	iov_read[2].iov_base = payload_read + 7 * 4096;
5355 	iov_read[2].iov_len = 3 * 4096;
5356 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5357 	poll_threads();
5358 	CU_ASSERT(g_bserrno == 0);
5359 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
5360 
5361 	memset(payload_write, 0xE5, sizeof(payload_write));
5362 	iov_write[0].iov_base = payload_write;
5363 	iov_write[0].iov_len = 1 * 4096;
5364 	iov_write[1].iov_base = payload_write + 1 * 4096;
5365 	iov_write[1].iov_len = 5 * 4096;
5366 	iov_write[2].iov_base = payload_write + 6 * 4096;
5367 	iov_write[2].iov_len = 4 * 4096;
5368 
5369 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5370 	poll_threads();
5371 	CU_ASSERT(g_bserrno == 0);
5372 
5373 	memset(payload_read, 0xAA, sizeof(payload_read));
5374 	iov_read[0].iov_base = payload_read;
5375 	iov_read[0].iov_len = 3 * 4096;
5376 	iov_read[1].iov_base = payload_read + 3 * 4096;
5377 	iov_read[1].iov_len = 4 * 4096;
5378 	iov_read[2].iov_base = payload_read + 7 * 4096;
5379 	iov_read[2].iov_len = 3 * 4096;
5380 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5381 	poll_threads();
5382 	CU_ASSERT(g_bserrno == 0);
5383 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
5384 
5385 	spdk_bs_free_io_channel(channel);
5386 	poll_threads();
5387 
5388 	ut_blob_close_and_delete(bs, blob);
5389 	ut_blob_close_and_delete(bs, snapshot);
5390 }
5391 
5392 /**
5393  * Inflate / decouple parent rw unit tests.
5394  *
5395  * --------------
5396  * original blob:         0         1         2         3         4
5397  *                   ,---------+---------+---------+---------+---------.
5398  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5399  *                   +---------+---------+---------+---------+---------+
5400  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
5401  *                   +---------+---------+---------+---------+---------+
5402  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
5403  *                   '---------+---------+---------+---------+---------'
5404  *                   .         .         .         .         .         .
5405  * --------          .         .         .         .         .         .
5406  * inflate:          .         .         .         .         .         .
5407  *                   ,---------+---------+---------+---------+---------.
5408  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
5409  *                   '---------+---------+---------+---------+---------'
5410  *
5411  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
5412  *               on snapshot2 and snapshot removed .         .         .
5413  *                   .         .         .         .         .         .
5414  * ----------------  .         .         .         .         .         .
5415  * decouple parent:  .         .         .         .         .         .
5416  *                   ,---------+---------+---------+---------+---------.
5417  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5418  *                   +---------+---------+---------+---------+---------+
5419  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
5420  *                   '---------+---------+---------+---------+---------'
5421  *
5422  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
5423  *               on snapshot2 removed and on snapshot still exists. Snapshot2
5424  *               should remain a clone of snapshot.
5425  */
5426 static void
5427 _blob_inflate_rw(bool decouple_parent)
5428 {
5429 	struct spdk_blob_store *bs = g_bs;
5430 	struct spdk_blob *blob, *snapshot, *snapshot2;
5431 	struct spdk_io_channel *channel;
5432 	struct spdk_blob_opts opts;
5433 	spdk_blob_id blobid, snapshotid, snapshot2id;
5434 	uint64_t free_clusters;
5435 	uint64_t cluster_size;
5436 
5437 	uint64_t payload_size;
5438 	uint8_t *payload_read;
5439 	uint8_t *payload_write;
5440 	uint8_t *payload_clone;
5441 
5442 	uint64_t pages_per_cluster;
5443 	uint64_t pages_per_payload;
5444 
5445 	int i;
5446 	spdk_blob_id ids[2];
5447 	size_t count;
5448 
5449 	free_clusters = spdk_bs_free_cluster_count(bs);
5450 	cluster_size = spdk_bs_get_cluster_size(bs);
5451 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
5452 	pages_per_payload = pages_per_cluster * 5;
5453 
5454 	payload_size = cluster_size * 5;
5455 
5456 	payload_read = malloc(payload_size);
5457 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
5458 
5459 	payload_write = malloc(payload_size);
5460 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
5461 
5462 	payload_clone = malloc(payload_size);
5463 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
5464 
5465 	channel = spdk_bs_alloc_io_channel(bs);
5466 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5467 
5468 	/* Create blob */
5469 	ut_spdk_blob_opts_init(&opts);
5470 	opts.thin_provision = true;
5471 	opts.num_clusters = 5;
5472 
5473 	blob = ut_blob_create_and_open(bs, &opts);
5474 	blobid = spdk_blob_get_id(blob);
5475 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5476 
5477 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5478 
5479 	/* 1) Initial read should return zeroed payload */
5480 	memset(payload_read, 0xFF, payload_size);
5481 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5482 			  blob_op_complete, NULL);
5483 	poll_threads();
5484 	CU_ASSERT(g_bserrno == 0);
5485 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
5486 
5487 	/* Fill whole blob with a pattern, except last cluster (to be sure it
5488 	 * isn't allocated) */
5489 	memset(payload_write, 0xE5, payload_size - cluster_size);
5490 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
5491 			   pages_per_cluster, blob_op_complete, NULL);
5492 	poll_threads();
5493 	CU_ASSERT(g_bserrno == 0);
5494 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5495 
5496 	/* 2) Create snapshot from blob (first level) */
5497 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5498 	poll_threads();
5499 	CU_ASSERT(g_bserrno == 0);
5500 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5501 	snapshotid = g_blobid;
5502 
5503 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5504 	poll_threads();
5505 	CU_ASSERT(g_bserrno == 0);
5506 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5507 	snapshot = g_blob;
5508 	CU_ASSERT(snapshot->data_ro == true);
5509 	CU_ASSERT(snapshot->md_ro == true);
5510 
5511 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5512 
5513 	/* Write every second cluster with a pattern.
5514 	 *
5515 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
5516 	 * doesn't allocate it.
5517 	 *
5518 	 * payload_clone stores expected result on "blob" read at the time and
5519 	 * is used only to check data consistency on clone before and after
5520 	 * inflation. Initially we fill it with a backing snapshots pattern
5521 	 * used before.
5522 	 */
5523 	memset(payload_clone, 0xE5, payload_size - cluster_size);
5524 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
5525 	memset(payload_write, 0xAA, payload_size);
5526 	for (i = 1; i < 5; i += 2) {
5527 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
5528 				   pages_per_cluster, blob_op_complete, NULL);
5529 		poll_threads();
5530 		CU_ASSERT(g_bserrno == 0);
5531 
5532 		/* Update expected result */
5533 		memcpy(payload_clone + (cluster_size * i), payload_write,
5534 		       cluster_size);
5535 	}
5536 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5537 
5538 	/* Check data consistency on clone */
5539 	memset(payload_read, 0xFF, payload_size);
5540 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5541 			  blob_op_complete, NULL);
5542 	poll_threads();
5543 	CU_ASSERT(g_bserrno == 0);
5544 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5545 
5546 	/* 3) Create second levels snapshot from blob */
5547 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5548 	poll_threads();
5549 	CU_ASSERT(g_bserrno == 0);
5550 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5551 	snapshot2id = g_blobid;
5552 
5553 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
5554 	poll_threads();
5555 	CU_ASSERT(g_bserrno == 0);
5556 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5557 	snapshot2 = g_blob;
5558 	CU_ASSERT(snapshot2->data_ro == true);
5559 	CU_ASSERT(snapshot2->md_ro == true);
5560 
5561 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
5562 
5563 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5564 
5565 	/* Write one cluster on the top level blob. This cluster (1) covers
5566 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
5567 	 * at all */
5568 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
5569 			   pages_per_cluster, blob_op_complete, NULL);
5570 	poll_threads();
5571 	CU_ASSERT(g_bserrno == 0);
5572 
5573 	/* Update expected result */
5574 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
5575 
5576 	/* Check data consistency on clone */
5577 	memset(payload_read, 0xFF, payload_size);
5578 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5579 			  blob_op_complete, NULL);
5580 	poll_threads();
5581 	CU_ASSERT(g_bserrno == 0);
5582 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5583 
5584 
5585 	/* Close all blobs */
5586 	spdk_blob_close(blob, blob_op_complete, NULL);
5587 	poll_threads();
5588 	CU_ASSERT(g_bserrno == 0);
5589 
5590 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5591 	poll_threads();
5592 	CU_ASSERT(g_bserrno == 0);
5593 
5594 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5595 	poll_threads();
5596 	CU_ASSERT(g_bserrno == 0);
5597 
5598 	/* Check snapshot-clone relations */
5599 	count = 2;
5600 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5601 	CU_ASSERT(count == 1);
5602 	CU_ASSERT(ids[0] == snapshot2id);
5603 
5604 	count = 2;
5605 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5606 	CU_ASSERT(count == 1);
5607 	CU_ASSERT(ids[0] == blobid);
5608 
5609 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
5610 
5611 	free_clusters = spdk_bs_free_cluster_count(bs);
5612 	if (!decouple_parent) {
5613 		/* Do full blob inflation */
5614 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
5615 		poll_threads();
5616 		CU_ASSERT(g_bserrno == 0);
5617 
5618 		/* All clusters should be inflated (except one already allocated
5619 		 * in a top level blob) */
5620 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5621 
5622 		/* Check if relation tree updated correctly */
5623 		count = 2;
5624 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5625 
5626 		/* snapshotid have one clone */
5627 		CU_ASSERT(count == 1);
5628 		CU_ASSERT(ids[0] == snapshot2id);
5629 
5630 		/* snapshot2id have no clones */
5631 		count = 2;
5632 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5633 		CU_ASSERT(count == 0);
5634 
5635 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5636 	} else {
5637 		/* Decouple parent of blob */
5638 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5639 		poll_threads();
5640 		CU_ASSERT(g_bserrno == 0);
5641 
5642 		/* Only one cluster from a parent should be inflated (second one
5643 		 * is covered by a cluster written on a top level blob, and
5644 		 * already allocated) */
5645 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5646 
5647 		/* Check if relation tree updated correctly */
5648 		count = 2;
5649 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5650 
5651 		/* snapshotid have two clones now */
5652 		CU_ASSERT(count == 2);
5653 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5654 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5655 
5656 		/* snapshot2id have no clones */
5657 		count = 2;
5658 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5659 		CU_ASSERT(count == 0);
5660 
5661 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5662 	}
5663 
5664 	/* Try to delete snapshot2 (should pass) */
5665 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5666 	poll_threads();
5667 	CU_ASSERT(g_bserrno == 0);
5668 
5669 	/* Try to delete base snapshot */
5670 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5671 	poll_threads();
5672 	CU_ASSERT(g_bserrno == 0);
5673 
5674 	/* Reopen blob after snapshot deletion */
5675 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5676 	poll_threads();
5677 	CU_ASSERT(g_bserrno == 0);
5678 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5679 	blob = g_blob;
5680 
5681 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5682 
5683 	/* Check data consistency on inflated blob */
5684 	memset(payload_read, 0xFF, payload_size);
5685 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5686 			  blob_op_complete, NULL);
5687 	poll_threads();
5688 	CU_ASSERT(g_bserrno == 0);
5689 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5690 
5691 	spdk_bs_free_io_channel(channel);
5692 	poll_threads();
5693 
5694 	free(payload_read);
5695 	free(payload_write);
5696 	free(payload_clone);
5697 
5698 	ut_blob_close_and_delete(bs, blob);
5699 }
5700 
5701 static void
5702 blob_inflate_rw(void)
5703 {
5704 	_blob_inflate_rw(false);
5705 	_blob_inflate_rw(true);
5706 }
5707 
5708 /**
5709  * Snapshot-clones relation test
5710  *
5711  *         snapshot
5712  *            |
5713  *      +-----+-----+
5714  *      |           |
5715  *   blob(ro)   snapshot2
5716  *      |           |
5717  *   clone2      clone
5718  */
5719 static void
5720 blob_relations(void)
5721 {
5722 	struct spdk_blob_store *bs;
5723 	struct spdk_bs_dev *dev;
5724 	struct spdk_bs_opts bs_opts;
5725 	struct spdk_blob_opts opts;
5726 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5727 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5728 	int rc;
5729 	size_t count;
5730 	spdk_blob_id ids[10] = {};
5731 
5732 	dev = init_dev();
5733 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5734 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5735 
5736 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5737 	poll_threads();
5738 	CU_ASSERT(g_bserrno == 0);
5739 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5740 	bs = g_bs;
5741 
5742 	/* 1. Create blob with 10 clusters */
5743 
5744 	ut_spdk_blob_opts_init(&opts);
5745 	opts.num_clusters = 10;
5746 
5747 	blob = ut_blob_create_and_open(bs, &opts);
5748 	blobid = spdk_blob_get_id(blob);
5749 
5750 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5751 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5752 	CU_ASSERT(!spdk_blob_is_clone(blob));
5753 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5754 
5755 	/* blob should not have underlying snapshot nor clones */
5756 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5757 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5758 	count = SPDK_COUNTOF(ids);
5759 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5760 	CU_ASSERT(rc == 0);
5761 	CU_ASSERT(count == 0);
5762 
5763 
5764 	/* 2. Create snapshot */
5765 
5766 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5767 	poll_threads();
5768 	CU_ASSERT(g_bserrno == 0);
5769 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5770 	snapshotid = g_blobid;
5771 
5772 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5773 	poll_threads();
5774 	CU_ASSERT(g_bserrno == 0);
5775 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5776 	snapshot = g_blob;
5777 
5778 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5779 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5780 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5781 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5782 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5783 
5784 	/* Check if original blob is converted to the clone of snapshot */
5785 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5786 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5787 	CU_ASSERT(spdk_blob_is_clone(blob));
5788 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5789 	CU_ASSERT(blob->parent_id == snapshotid);
5790 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5791 
5792 	count = SPDK_COUNTOF(ids);
5793 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5794 	CU_ASSERT(rc == 0);
5795 	CU_ASSERT(count == 1);
5796 	CU_ASSERT(ids[0] == blobid);
5797 
5798 
5799 	/* 3. Create clone from snapshot */
5800 
5801 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5802 	poll_threads();
5803 	CU_ASSERT(g_bserrno == 0);
5804 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5805 	cloneid = g_blobid;
5806 
5807 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5808 	poll_threads();
5809 	CU_ASSERT(g_bserrno == 0);
5810 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5811 	clone = g_blob;
5812 
5813 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5814 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5815 	CU_ASSERT(spdk_blob_is_clone(clone));
5816 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5817 	CU_ASSERT(clone->parent_id == snapshotid);
5818 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5819 
5820 	count = SPDK_COUNTOF(ids);
5821 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5822 	CU_ASSERT(rc == 0);
5823 	CU_ASSERT(count == 0);
5824 
5825 	/* Check if clone is on the snapshot's list */
5826 	count = SPDK_COUNTOF(ids);
5827 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5828 	CU_ASSERT(rc == 0);
5829 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5830 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5831 
5832 
5833 	/* 4. Create snapshot of the clone */
5834 
5835 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5836 	poll_threads();
5837 	CU_ASSERT(g_bserrno == 0);
5838 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5839 	snapshotid2 = g_blobid;
5840 
5841 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5842 	poll_threads();
5843 	CU_ASSERT(g_bserrno == 0);
5844 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5845 	snapshot2 = g_blob;
5846 
5847 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5848 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5849 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5850 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5851 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5852 
5853 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5854 	 * is a child of snapshot */
5855 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5856 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5857 	CU_ASSERT(spdk_blob_is_clone(clone));
5858 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5859 	CU_ASSERT(clone->parent_id == snapshotid2);
5860 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5861 
5862 	count = SPDK_COUNTOF(ids);
5863 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5864 	CU_ASSERT(rc == 0);
5865 	CU_ASSERT(count == 1);
5866 	CU_ASSERT(ids[0] == cloneid);
5867 
5868 
5869 	/* 5. Try to create clone from read only blob */
5870 
5871 	/* Mark blob as read only */
5872 	spdk_blob_set_read_only(blob);
5873 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5874 	poll_threads();
5875 	CU_ASSERT(g_bserrno == 0);
5876 
5877 	/* Check if previously created blob is read only clone */
5878 	CU_ASSERT(spdk_blob_is_read_only(blob));
5879 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5880 	CU_ASSERT(spdk_blob_is_clone(blob));
5881 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5882 
5883 	/* Create clone from read only blob */
5884 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5885 	poll_threads();
5886 	CU_ASSERT(g_bserrno == 0);
5887 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5888 	cloneid2 = g_blobid;
5889 
5890 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5891 	poll_threads();
5892 	CU_ASSERT(g_bserrno == 0);
5893 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5894 	clone2 = g_blob;
5895 
5896 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5897 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5898 	CU_ASSERT(spdk_blob_is_clone(clone2));
5899 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5900 
5901 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5902 
5903 	count = SPDK_COUNTOF(ids);
5904 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5905 	CU_ASSERT(rc == 0);
5906 
5907 	CU_ASSERT(count == 1);
5908 	CU_ASSERT(ids[0] == cloneid2);
5909 
5910 	/* Close blobs */
5911 
5912 	spdk_blob_close(clone2, blob_op_complete, NULL);
5913 	poll_threads();
5914 	CU_ASSERT(g_bserrno == 0);
5915 
5916 	spdk_blob_close(blob, blob_op_complete, NULL);
5917 	poll_threads();
5918 	CU_ASSERT(g_bserrno == 0);
5919 
5920 	spdk_blob_close(clone, blob_op_complete, NULL);
5921 	poll_threads();
5922 	CU_ASSERT(g_bserrno == 0);
5923 
5924 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5925 	poll_threads();
5926 	CU_ASSERT(g_bserrno == 0);
5927 
5928 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5929 	poll_threads();
5930 	CU_ASSERT(g_bserrno == 0);
5931 
5932 	/* Try to delete snapshot with more than 1 clone */
5933 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5934 	poll_threads();
5935 	CU_ASSERT(g_bserrno != 0);
5936 
5937 	ut_bs_reload(&bs, &bs_opts);
5938 
5939 	/* NULL ids array should return number of clones in count */
5940 	count = SPDK_COUNTOF(ids);
5941 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5942 	CU_ASSERT(rc == -ENOMEM);
5943 	CU_ASSERT(count == 2);
5944 
5945 	/* incorrect array size */
5946 	count = 1;
5947 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5948 	CU_ASSERT(rc == -ENOMEM);
5949 	CU_ASSERT(count == 2);
5950 
5951 
5952 	/* Verify structure of loaded blob store */
5953 
5954 	/* snapshot */
5955 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5956 
5957 	count = SPDK_COUNTOF(ids);
5958 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5959 	CU_ASSERT(rc == 0);
5960 	CU_ASSERT(count == 2);
5961 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5962 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5963 
5964 	/* blob */
5965 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5966 	count = SPDK_COUNTOF(ids);
5967 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5968 	CU_ASSERT(rc == 0);
5969 	CU_ASSERT(count == 1);
5970 	CU_ASSERT(ids[0] == cloneid2);
5971 
5972 	/* clone */
5973 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5974 	count = SPDK_COUNTOF(ids);
5975 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5976 	CU_ASSERT(rc == 0);
5977 	CU_ASSERT(count == 0);
5978 
5979 	/* snapshot2 */
5980 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5981 	count = SPDK_COUNTOF(ids);
5982 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5983 	CU_ASSERT(rc == 0);
5984 	CU_ASSERT(count == 1);
5985 	CU_ASSERT(ids[0] == cloneid);
5986 
5987 	/* clone2 */
5988 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5989 	count = SPDK_COUNTOF(ids);
5990 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5991 	CU_ASSERT(rc == 0);
5992 	CU_ASSERT(count == 0);
5993 
5994 	/* Try to delete blob that user should not be able to remove */
5995 
5996 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5997 	poll_threads();
5998 	CU_ASSERT(g_bserrno != 0);
5999 
6000 	/* Remove all blobs */
6001 
6002 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6003 	poll_threads();
6004 	CU_ASSERT(g_bserrno == 0);
6005 
6006 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6007 	poll_threads();
6008 	CU_ASSERT(g_bserrno == 0);
6009 
6010 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6011 	poll_threads();
6012 	CU_ASSERT(g_bserrno == 0);
6013 
6014 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6015 	poll_threads();
6016 	CU_ASSERT(g_bserrno == 0);
6017 
6018 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6019 	poll_threads();
6020 	CU_ASSERT(g_bserrno == 0);
6021 
6022 	spdk_bs_unload(bs, bs_op_complete, NULL);
6023 	poll_threads();
6024 	CU_ASSERT(g_bserrno == 0);
6025 
6026 	g_bs = NULL;
6027 }
6028 
6029 /**
6030  * Snapshot-clones relation test 2
6031  *
6032  *         snapshot1
6033  *            |
6034  *         snapshot2
6035  *            |
6036  *      +-----+-----+
6037  *      |           |
6038  *   blob(ro)   snapshot3
6039  *      |           |
6040  *      |       snapshot4
6041  *      |        |     |
6042  *   clone2   clone  clone3
6043  */
6044 static void
6045 blob_relations2(void)
6046 {
6047 	struct spdk_blob_store *bs;
6048 	struct spdk_bs_dev *dev;
6049 	struct spdk_bs_opts bs_opts;
6050 	struct spdk_blob_opts opts;
6051 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
6052 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
6053 		     cloneid3;
6054 	int rc;
6055 	size_t count;
6056 	spdk_blob_id ids[10] = {};
6057 
6058 	dev = init_dev();
6059 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6060 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6061 
6062 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6063 	poll_threads();
6064 	CU_ASSERT(g_bserrno == 0);
6065 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6066 	bs = g_bs;
6067 
6068 	/* 1. Create blob with 10 clusters */
6069 
6070 	ut_spdk_blob_opts_init(&opts);
6071 	opts.num_clusters = 10;
6072 
6073 	blob = ut_blob_create_and_open(bs, &opts);
6074 	blobid = spdk_blob_get_id(blob);
6075 
6076 	/* 2. Create snapshot1 */
6077 
6078 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6079 	poll_threads();
6080 	CU_ASSERT(g_bserrno == 0);
6081 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6082 	snapshotid1 = g_blobid;
6083 
6084 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
6085 	poll_threads();
6086 	CU_ASSERT(g_bserrno == 0);
6087 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6088 	snapshot1 = g_blob;
6089 
6090 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
6091 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
6092 
6093 	CU_ASSERT(blob->parent_id == snapshotid1);
6094 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6095 
6096 	/* Check if blob is the clone of snapshot1 */
6097 	CU_ASSERT(blob->parent_id == snapshotid1);
6098 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6099 
6100 	count = SPDK_COUNTOF(ids);
6101 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
6102 	CU_ASSERT(rc == 0);
6103 	CU_ASSERT(count == 1);
6104 	CU_ASSERT(ids[0] == blobid);
6105 
6106 	/* 3. Create another snapshot */
6107 
6108 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6109 	poll_threads();
6110 	CU_ASSERT(g_bserrno == 0);
6111 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6112 	snapshotid2 = g_blobid;
6113 
6114 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
6115 	poll_threads();
6116 	CU_ASSERT(g_bserrno == 0);
6117 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6118 	snapshot2 = g_blob;
6119 
6120 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
6121 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
6122 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
6123 
6124 	/* Check if snapshot2 is the clone of snapshot1 and blob
6125 	 * is a child of snapshot2 */
6126 	CU_ASSERT(blob->parent_id == snapshotid2);
6127 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6128 
6129 	count = SPDK_COUNTOF(ids);
6130 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6131 	CU_ASSERT(rc == 0);
6132 	CU_ASSERT(count == 1);
6133 	CU_ASSERT(ids[0] == blobid);
6134 
6135 	/* 4. Create clone from snapshot */
6136 
6137 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
6138 	poll_threads();
6139 	CU_ASSERT(g_bserrno == 0);
6140 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6141 	cloneid = g_blobid;
6142 
6143 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
6144 	poll_threads();
6145 	CU_ASSERT(g_bserrno == 0);
6146 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6147 	clone = g_blob;
6148 
6149 	CU_ASSERT(clone->parent_id == snapshotid2);
6150 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
6151 
6152 	/* Check if clone is on the snapshot's list */
6153 	count = SPDK_COUNTOF(ids);
6154 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6155 	CU_ASSERT(rc == 0);
6156 	CU_ASSERT(count == 2);
6157 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6158 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
6159 
6160 	/* 5. Create snapshot of the clone */
6161 
6162 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6163 	poll_threads();
6164 	CU_ASSERT(g_bserrno == 0);
6165 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6166 	snapshotid3 = g_blobid;
6167 
6168 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6169 	poll_threads();
6170 	CU_ASSERT(g_bserrno == 0);
6171 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6172 	snapshot3 = g_blob;
6173 
6174 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
6175 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6176 
6177 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
6178 	 * is a child of snapshot2 */
6179 	CU_ASSERT(clone->parent_id == snapshotid3);
6180 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6181 
6182 	count = SPDK_COUNTOF(ids);
6183 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6184 	CU_ASSERT(rc == 0);
6185 	CU_ASSERT(count == 1);
6186 	CU_ASSERT(ids[0] == cloneid);
6187 
6188 	/* 6. Create another snapshot of the clone */
6189 
6190 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6191 	poll_threads();
6192 	CU_ASSERT(g_bserrno == 0);
6193 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6194 	snapshotid4 = g_blobid;
6195 
6196 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
6197 	poll_threads();
6198 	CU_ASSERT(g_bserrno == 0);
6199 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6200 	snapshot4 = g_blob;
6201 
6202 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
6203 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
6204 
6205 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
6206 	 * is a child of snapshot3 */
6207 	CU_ASSERT(clone->parent_id == snapshotid4);
6208 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
6209 
6210 	count = SPDK_COUNTOF(ids);
6211 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
6212 	CU_ASSERT(rc == 0);
6213 	CU_ASSERT(count == 1);
6214 	CU_ASSERT(ids[0] == cloneid);
6215 
6216 	/* 7. Remove snapshot 4 */
6217 
6218 	ut_blob_close_and_delete(bs, snapshot4);
6219 
6220 	/* Check if relations are back to state from before creating snapshot 4 */
6221 	CU_ASSERT(clone->parent_id == snapshotid3);
6222 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6223 
6224 	count = SPDK_COUNTOF(ids);
6225 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6226 	CU_ASSERT(rc == 0);
6227 	CU_ASSERT(count == 1);
6228 	CU_ASSERT(ids[0] == cloneid);
6229 
6230 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
6231 
6232 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
6233 	poll_threads();
6234 	CU_ASSERT(g_bserrno == 0);
6235 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6236 	cloneid3 = g_blobid;
6237 
6238 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6239 	poll_threads();
6240 	CU_ASSERT(g_bserrno != 0);
6241 
6242 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
6243 
6244 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6245 	poll_threads();
6246 	CU_ASSERT(g_bserrno == 0);
6247 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6248 	snapshot3 = g_blob;
6249 
6250 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6251 	poll_threads();
6252 	CU_ASSERT(g_bserrno != 0);
6253 
6254 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6255 	poll_threads();
6256 	CU_ASSERT(g_bserrno == 0);
6257 
6258 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
6259 	poll_threads();
6260 	CU_ASSERT(g_bserrno == 0);
6261 
6262 	/* 10. Remove snapshot 1 */
6263 
6264 	/* Check snapshot 1 and snapshot 2 allocated clusters */
6265 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot1) == 10);
6266 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
6267 
6268 	ut_blob_close_and_delete(bs, snapshot1);
6269 
6270 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
6271 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
6272 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6273 
6274 	/* Check that snapshot 2 has the clusters that were allocated to snapshot 1 */
6275 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 10);
6276 
6277 	count = SPDK_COUNTOF(ids);
6278 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6279 	CU_ASSERT(rc == 0);
6280 	CU_ASSERT(count == 2);
6281 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6282 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6283 
6284 	/* 11. Try to create clone from read only blob */
6285 
6286 	/* Mark blob as read only */
6287 	spdk_blob_set_read_only(blob);
6288 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6289 	poll_threads();
6290 	CU_ASSERT(g_bserrno == 0);
6291 
6292 	/* Create clone from read only blob */
6293 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6294 	poll_threads();
6295 	CU_ASSERT(g_bserrno == 0);
6296 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6297 	cloneid2 = g_blobid;
6298 
6299 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
6300 	poll_threads();
6301 	CU_ASSERT(g_bserrno == 0);
6302 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6303 	clone2 = g_blob;
6304 
6305 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6306 
6307 	count = SPDK_COUNTOF(ids);
6308 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6309 	CU_ASSERT(rc == 0);
6310 	CU_ASSERT(count == 1);
6311 	CU_ASSERT(ids[0] == cloneid2);
6312 
6313 	/* Close blobs */
6314 
6315 	spdk_blob_close(clone2, blob_op_complete, NULL);
6316 	poll_threads();
6317 	CU_ASSERT(g_bserrno == 0);
6318 
6319 	spdk_blob_close(blob, blob_op_complete, NULL);
6320 	poll_threads();
6321 	CU_ASSERT(g_bserrno == 0);
6322 
6323 	spdk_blob_close(clone, blob_op_complete, NULL);
6324 	poll_threads();
6325 	CU_ASSERT(g_bserrno == 0);
6326 
6327 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6328 	poll_threads();
6329 	CU_ASSERT(g_bserrno == 0);
6330 
6331 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6332 	poll_threads();
6333 	CU_ASSERT(g_bserrno == 0);
6334 
6335 	ut_bs_reload(&bs, &bs_opts);
6336 
6337 	/* Verify structure of loaded blob store */
6338 
6339 	/* snapshot2 */
6340 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6341 
6342 	count = SPDK_COUNTOF(ids);
6343 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6344 	CU_ASSERT(rc == 0);
6345 	CU_ASSERT(count == 2);
6346 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6347 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6348 
6349 	/* blob */
6350 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6351 	count = SPDK_COUNTOF(ids);
6352 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6353 	CU_ASSERT(rc == 0);
6354 	CU_ASSERT(count == 1);
6355 	CU_ASSERT(ids[0] == cloneid2);
6356 
6357 	/* clone */
6358 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6359 	count = SPDK_COUNTOF(ids);
6360 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6361 	CU_ASSERT(rc == 0);
6362 	CU_ASSERT(count == 0);
6363 
6364 	/* snapshot3 */
6365 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6366 	count = SPDK_COUNTOF(ids);
6367 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6368 	CU_ASSERT(rc == 0);
6369 	CU_ASSERT(count == 1);
6370 	CU_ASSERT(ids[0] == cloneid);
6371 
6372 	/* clone2 */
6373 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6374 	count = SPDK_COUNTOF(ids);
6375 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6376 	CU_ASSERT(rc == 0);
6377 	CU_ASSERT(count == 0);
6378 
6379 	/* Try to delete all blobs in the worse possible order */
6380 
6381 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6382 	poll_threads();
6383 	CU_ASSERT(g_bserrno != 0);
6384 
6385 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6386 	poll_threads();
6387 	CU_ASSERT(g_bserrno == 0);
6388 
6389 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6390 	poll_threads();
6391 	CU_ASSERT(g_bserrno != 0);
6392 
6393 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6394 	poll_threads();
6395 	CU_ASSERT(g_bserrno == 0);
6396 
6397 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6398 	poll_threads();
6399 	CU_ASSERT(g_bserrno == 0);
6400 
6401 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6402 	poll_threads();
6403 	CU_ASSERT(g_bserrno == 0);
6404 
6405 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6406 	poll_threads();
6407 	CU_ASSERT(g_bserrno == 0);
6408 
6409 	spdk_bs_unload(bs, bs_op_complete, NULL);
6410 	poll_threads();
6411 	CU_ASSERT(g_bserrno == 0);
6412 
6413 	g_bs = NULL;
6414 }
6415 
6416 /**
6417  * Snapshot-clones relation test 3
6418  *
6419  *         snapshot0
6420  *            |
6421  *         snapshot1
6422  *            |
6423  *         snapshot2
6424  *            |
6425  *           blob
6426  */
6427 static void
6428 blob_relations3(void)
6429 {
6430 	struct spdk_blob_store *bs;
6431 	struct spdk_bs_dev *dev;
6432 	struct spdk_io_channel *channel;
6433 	struct spdk_bs_opts bs_opts;
6434 	struct spdk_blob_opts opts;
6435 	struct spdk_blob *blob;
6436 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
6437 
6438 	dev = init_dev();
6439 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6440 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6441 
6442 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6443 	poll_threads();
6444 	CU_ASSERT(g_bserrno == 0);
6445 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6446 	bs = g_bs;
6447 
6448 	channel = spdk_bs_alloc_io_channel(bs);
6449 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6450 
6451 	/* 1. Create blob with 10 clusters */
6452 	ut_spdk_blob_opts_init(&opts);
6453 	opts.num_clusters = 10;
6454 
6455 	blob = ut_blob_create_and_open(bs, &opts);
6456 	blobid = spdk_blob_get_id(blob);
6457 
6458 	/* 2. Create snapshot0 */
6459 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6460 	poll_threads();
6461 	CU_ASSERT(g_bserrno == 0);
6462 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6463 	snapshotid0 = g_blobid;
6464 
6465 	/* 3. Create snapshot1 */
6466 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6467 	poll_threads();
6468 	CU_ASSERT(g_bserrno == 0);
6469 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6470 	snapshotid1 = g_blobid;
6471 
6472 	/* 4. Create snapshot2 */
6473 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6474 	poll_threads();
6475 	CU_ASSERT(g_bserrno == 0);
6476 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6477 	snapshotid2 = g_blobid;
6478 
6479 	/* 5. Decouple blob */
6480 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
6481 	poll_threads();
6482 	CU_ASSERT(g_bserrno == 0);
6483 
6484 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
6485 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
6486 	poll_threads();
6487 	CU_ASSERT(g_bserrno == 0);
6488 
6489 	/* 7. Delete blob */
6490 	spdk_blob_close(blob, blob_op_complete, NULL);
6491 	poll_threads();
6492 	CU_ASSERT(g_bserrno == 0);
6493 
6494 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6495 	poll_threads();
6496 	CU_ASSERT(g_bserrno == 0);
6497 
6498 	/* 8. Delete snapshot2.
6499 	 * If md of snapshot 2 was updated, it should be possible to delete it */
6500 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6501 	poll_threads();
6502 	CU_ASSERT(g_bserrno == 0);
6503 
6504 	/* Remove remaining blobs and unload bs */
6505 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
6506 	poll_threads();
6507 	CU_ASSERT(g_bserrno == 0);
6508 
6509 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
6510 	poll_threads();
6511 	CU_ASSERT(g_bserrno == 0);
6512 
6513 	spdk_bs_free_io_channel(channel);
6514 	poll_threads();
6515 
6516 	spdk_bs_unload(bs, bs_op_complete, NULL);
6517 	poll_threads();
6518 	CU_ASSERT(g_bserrno == 0);
6519 
6520 	g_bs = NULL;
6521 }
6522 
6523 static void
6524 blobstore_clean_power_failure(void)
6525 {
6526 	struct spdk_blob_store *bs;
6527 	struct spdk_blob *blob;
6528 	struct spdk_power_failure_thresholds thresholds = {};
6529 	bool clean = false;
6530 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6531 	struct spdk_bs_super_block super_copy = {};
6532 
6533 	thresholds.general_threshold = 1;
6534 	while (!clean) {
6535 		/* Create bs and blob */
6536 		suite_blob_setup();
6537 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6538 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6539 		bs = g_bs;
6540 		blob = g_blob;
6541 
6542 		/* Super block should not change for rest of the UT,
6543 		 * save it and compare later. */
6544 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
6545 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
6546 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6547 
6548 		/* Force bs/super block in a clean state.
6549 		 * Along with marking blob dirty, to cause blob persist. */
6550 		blob->state = SPDK_BLOB_STATE_DIRTY;
6551 		bs->clean = 1;
6552 		super->clean = 1;
6553 		super->crc = blob_md_page_calc_crc(super);
6554 
6555 		g_bserrno = -1;
6556 		dev_set_power_failure_thresholds(thresholds);
6557 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6558 		poll_threads();
6559 		dev_reset_power_failure_event();
6560 
6561 		if (g_bserrno == 0) {
6562 			/* After successful md sync, both bs and super block
6563 			 * should be marked as not clean. */
6564 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6565 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
6566 			clean = true;
6567 		}
6568 
6569 		/* Depending on the point of failure, super block was either updated or not. */
6570 		super_copy.clean = super->clean;
6571 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
6572 		/* Compare that the values in super block remained unchanged. */
6573 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
6574 
6575 		/* Delete blob and unload bs */
6576 		suite_blob_cleanup();
6577 
6578 		thresholds.general_threshold++;
6579 	}
6580 }
6581 
6582 static void
6583 blob_delete_snapshot_power_failure(void)
6584 {
6585 	struct spdk_bs_dev *dev;
6586 	struct spdk_blob_store *bs;
6587 	struct spdk_blob_opts opts;
6588 	struct spdk_blob *blob, *snapshot;
6589 	struct spdk_power_failure_thresholds thresholds = {};
6590 	spdk_blob_id blobid, snapshotid;
6591 	const void *value;
6592 	size_t value_len;
6593 	size_t count;
6594 	spdk_blob_id ids[3] = {};
6595 	int rc;
6596 	bool deleted = false;
6597 	int delete_snapshot_bserrno = -1;
6598 
6599 	thresholds.general_threshold = 1;
6600 	while (!deleted) {
6601 		dev = init_dev();
6602 
6603 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6604 		poll_threads();
6605 		CU_ASSERT(g_bserrno == 0);
6606 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6607 		bs = g_bs;
6608 
6609 		/* Create blob */
6610 		ut_spdk_blob_opts_init(&opts);
6611 		opts.num_clusters = 10;
6612 
6613 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6614 		poll_threads();
6615 		CU_ASSERT(g_bserrno == 0);
6616 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6617 		blobid = g_blobid;
6618 
6619 		/* Create snapshot */
6620 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6621 		poll_threads();
6622 		CU_ASSERT(g_bserrno == 0);
6623 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6624 		snapshotid = g_blobid;
6625 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6626 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6627 
6628 		dev_set_power_failure_thresholds(thresholds);
6629 
6630 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6631 		poll_threads();
6632 		delete_snapshot_bserrno = g_bserrno;
6633 
6634 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6635 		 * reports success, changes to both blobs should already persisted. */
6636 		dev_reset_power_failure_event();
6637 		ut_bs_dirty_load(&bs, NULL);
6638 
6639 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6640 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6641 
6642 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6643 		poll_threads();
6644 		CU_ASSERT(g_bserrno == 0);
6645 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6646 		blob = g_blob;
6647 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6648 
6649 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6650 		poll_threads();
6651 
6652 		if (g_bserrno == 0) {
6653 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6654 			snapshot = g_blob;
6655 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6656 			count = SPDK_COUNTOF(ids);
6657 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6658 			CU_ASSERT(rc == 0);
6659 			CU_ASSERT(count == 1);
6660 			CU_ASSERT(ids[0] == blobid);
6661 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6662 			CU_ASSERT(rc != 0);
6663 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6664 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6665 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6666 
6667 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6668 			poll_threads();
6669 			CU_ASSERT(g_bserrno == 0);
6670 		} else {
6671 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6672 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6673 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6674 			 * Yet delete might perform further changes to the clone after that.
6675 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6676 			if (delete_snapshot_bserrno == 0) {
6677 				deleted = true;
6678 			}
6679 		}
6680 
6681 		spdk_blob_close(blob, blob_op_complete, NULL);
6682 		poll_threads();
6683 		CU_ASSERT(g_bserrno == 0);
6684 
6685 		spdk_bs_unload(bs, bs_op_complete, NULL);
6686 		poll_threads();
6687 		CU_ASSERT(g_bserrno == 0);
6688 
6689 		thresholds.general_threshold++;
6690 	}
6691 }
6692 
6693 static void
6694 blob_create_snapshot_power_failure(void)
6695 {
6696 	struct spdk_blob_store *bs = g_bs;
6697 	struct spdk_bs_dev *dev;
6698 	struct spdk_blob_opts opts;
6699 	struct spdk_blob *blob, *snapshot;
6700 	struct spdk_power_failure_thresholds thresholds = {};
6701 	spdk_blob_id blobid, snapshotid;
6702 	const void *value;
6703 	size_t value_len;
6704 	size_t count;
6705 	spdk_blob_id ids[3] = {};
6706 	int rc;
6707 	bool created = false;
6708 	int create_snapshot_bserrno = -1;
6709 
6710 	thresholds.general_threshold = 1;
6711 	while (!created) {
6712 		dev = init_dev();
6713 
6714 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6715 		poll_threads();
6716 		CU_ASSERT(g_bserrno == 0);
6717 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6718 		bs = g_bs;
6719 
6720 		/* Create blob */
6721 		ut_spdk_blob_opts_init(&opts);
6722 		opts.num_clusters = 10;
6723 
6724 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6725 		poll_threads();
6726 		CU_ASSERT(g_bserrno == 0);
6727 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6728 		blobid = g_blobid;
6729 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6730 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6731 
6732 		dev_set_power_failure_thresholds(thresholds);
6733 
6734 		/* Create snapshot */
6735 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6736 		poll_threads();
6737 		create_snapshot_bserrno = g_bserrno;
6738 		snapshotid = g_blobid;
6739 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6740 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6741 
6742 		/* Do not shut down cleanly. Assumption is that after create snapshot
6743 		 * reports success, both blobs should be power-fail safe. */
6744 		dev_reset_power_failure_event();
6745 		ut_bs_dirty_load(&bs, NULL);
6746 
6747 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6748 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6749 
6750 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6751 		poll_threads();
6752 		CU_ASSERT(g_bserrno == 0);
6753 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6754 		blob = g_blob;
6755 
6756 		if (snapshotid != SPDK_BLOBID_INVALID) {
6757 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6758 			poll_threads();
6759 		}
6760 
6761 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6762 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6763 			snapshot = g_blob;
6764 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6765 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6766 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6767 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6768 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6769 			count = SPDK_COUNTOF(ids);
6770 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6771 			CU_ASSERT(rc == 0);
6772 			CU_ASSERT(count == 1);
6773 			CU_ASSERT(ids[0] == blobid);
6774 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6775 			CU_ASSERT(rc != 0);
6776 
6777 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6778 			poll_threads();
6779 			CU_ASSERT(g_bserrno == 0);
6780 			if (create_snapshot_bserrno == 0) {
6781 				created = true;
6782 			}
6783 		} else {
6784 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6785 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6786 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6787 		}
6788 
6789 		spdk_blob_close(blob, blob_op_complete, NULL);
6790 		poll_threads();
6791 		CU_ASSERT(g_bserrno == 0);
6792 
6793 		spdk_bs_unload(bs, bs_op_complete, NULL);
6794 		poll_threads();
6795 		CU_ASSERT(g_bserrno == 0);
6796 
6797 		thresholds.general_threshold++;
6798 	}
6799 }
6800 
6801 static void
6802 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6803 {
6804 	uint8_t payload_ff[64 * 512];
6805 	uint8_t payload_aa[64 * 512];
6806 	uint8_t payload_00[64 * 512];
6807 	uint8_t *cluster0, *cluster1;
6808 
6809 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6810 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6811 	memset(payload_00, 0x00, sizeof(payload_00));
6812 
6813 	/* Try to perform I/O with io unit = 512 */
6814 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6815 	poll_threads();
6816 	CU_ASSERT(g_bserrno == 0);
6817 
6818 	/* If thin provisioned is set cluster should be allocated now */
6819 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6820 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6821 
6822 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6823 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6824 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6825 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6826 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6827 
6828 	/* Verify write with offset on first page */
6829 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6830 	poll_threads();
6831 	CU_ASSERT(g_bserrno == 0);
6832 
6833 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6834 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6835 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6836 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6837 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6838 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6839 
6840 	/* Verify write with offset on first page */
6841 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6842 	poll_threads();
6843 
6844 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6845 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6846 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6847 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6848 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6849 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6850 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6851 
6852 	/* Verify write with offset on second page */
6853 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6854 	poll_threads();
6855 
6856 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6857 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6858 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6859 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6860 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6861 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6862 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6863 
6864 	/* Verify write across multiple pages */
6865 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6866 	poll_threads();
6867 
6868 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6869 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6870 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6871 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6872 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6873 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6874 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6875 
6876 	/* Verify write across multiple clusters */
6877 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6878 	poll_threads();
6879 
6880 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6881 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6882 
6883 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6884 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6885 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6886 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6887 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6888 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6889 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6890 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6891 
6892 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6893 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6894 
6895 	/* Verify write to second cluster */
6896 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6897 	poll_threads();
6898 
6899 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6900 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6901 
6902 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6903 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6904 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6905 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6906 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6907 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6908 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6909 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6910 
6911 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6912 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6913 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6914 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6915 }
6916 
6917 static void
6918 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6919 {
6920 	uint8_t payload_read[64 * 512];
6921 	uint8_t payload_ff[64 * 512];
6922 	uint8_t payload_aa[64 * 512];
6923 	uint8_t payload_00[64 * 512];
6924 
6925 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6926 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6927 	memset(payload_00, 0x00, sizeof(payload_00));
6928 
6929 	/* Read only first io unit */
6930 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6931 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6932 	 * payload_read: F000 0000 | 0000 0000 ... */
6933 	memset(payload_read, 0x00, sizeof(payload_read));
6934 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6935 	poll_threads();
6936 	CU_ASSERT(g_bserrno == 0);
6937 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6938 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6939 
6940 	/* Read four io_units starting from offset = 2
6941 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6942 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6943 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6944 
6945 	memset(payload_read, 0x00, sizeof(payload_read));
6946 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6947 	poll_threads();
6948 	CU_ASSERT(g_bserrno == 0);
6949 
6950 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6951 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6952 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6953 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6954 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6955 
6956 	/* Read eight io_units across multiple pages
6957 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6958 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6959 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6960 	memset(payload_read, 0x00, sizeof(payload_read));
6961 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6962 	poll_threads();
6963 	CU_ASSERT(g_bserrno == 0);
6964 
6965 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6966 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6967 
6968 	/* Read eight io_units across multiple clusters
6969 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6970 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6971 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6972 	memset(payload_read, 0x00, sizeof(payload_read));
6973 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6974 	poll_threads();
6975 	CU_ASSERT(g_bserrno == 0);
6976 
6977 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6978 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6979 
6980 	/* Read four io_units from second cluster
6981 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6982 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6983 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6984 	memset(payload_read, 0x00, sizeof(payload_read));
6985 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6986 	poll_threads();
6987 	CU_ASSERT(g_bserrno == 0);
6988 
6989 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6990 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6991 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6992 
6993 	/* Read second cluster
6994 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6995 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6996 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6997 	memset(payload_read, 0x00, sizeof(payload_read));
6998 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6999 	poll_threads();
7000 	CU_ASSERT(g_bserrno == 0);
7001 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7002 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7003 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7004 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
7005 
7006 	/* Read whole two clusters
7007 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7008 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7009 	memset(payload_read, 0x00, sizeof(payload_read));
7010 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
7011 	poll_threads();
7012 	CU_ASSERT(g_bserrno == 0);
7013 
7014 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7015 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7016 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7017 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7018 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7019 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
7020 
7021 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
7022 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
7023 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
7024 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
7025 }
7026 
7027 
7028 static void
7029 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7030 {
7031 	uint8_t payload_ff[64 * 512];
7032 	uint8_t payload_aa[64 * 512];
7033 	uint8_t payload_00[64 * 512];
7034 	uint8_t *cluster0, *cluster1;
7035 
7036 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7037 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7038 	memset(payload_00, 0x00, sizeof(payload_00));
7039 
7040 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7041 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7042 
7043 	/* Unmap */
7044 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
7045 	poll_threads();
7046 
7047 	CU_ASSERT(g_bserrno == 0);
7048 
7049 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
7050 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
7051 }
7052 
7053 static void
7054 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7055 {
7056 	uint8_t payload_ff[64 * 512];
7057 	uint8_t payload_aa[64 * 512];
7058 	uint8_t payload_00[64 * 512];
7059 	uint8_t *cluster0, *cluster1;
7060 
7061 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7062 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7063 	memset(payload_00, 0x00, sizeof(payload_00));
7064 
7065 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7066 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7067 
7068 	/* Write zeroes  */
7069 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
7070 	poll_threads();
7071 
7072 	CU_ASSERT(g_bserrno == 0);
7073 
7074 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
7075 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
7076 }
7077 
7078 static inline void
7079 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
7080 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7081 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7082 {
7083 	if (io_opts) {
7084 		g_dev_writev_ext_called = false;
7085 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7086 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
7087 					io_opts);
7088 	} else {
7089 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7090 	}
7091 	poll_threads();
7092 	CU_ASSERT(g_bserrno == 0);
7093 	if (io_opts) {
7094 		CU_ASSERT(g_dev_writev_ext_called);
7095 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7096 	}
7097 }
7098 
7099 static void
7100 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7101 	       bool ext_api)
7102 {
7103 	uint8_t payload_ff[64 * 512];
7104 	uint8_t payload_aa[64 * 512];
7105 	uint8_t payload_00[64 * 512];
7106 	uint8_t *cluster0, *cluster1;
7107 	struct iovec iov[4];
7108 	struct spdk_blob_ext_io_opts ext_opts = {
7109 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7110 		.memory_domain_ctx = (void *)0xf00df00d,
7111 		.size = sizeof(struct spdk_blob_ext_io_opts),
7112 		.user_ctx = (void *)123,
7113 	};
7114 
7115 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7116 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7117 	memset(payload_00, 0x00, sizeof(payload_00));
7118 
7119 	/* Try to perform I/O with io unit = 512 */
7120 	iov[0].iov_base = payload_ff;
7121 	iov[0].iov_len = 1 * 512;
7122 
7123 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
7124 			    ext_api ? &ext_opts : NULL);
7125 
7126 	/* If thin provisioned is set cluster should be allocated now */
7127 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
7128 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7129 
7130 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
7131 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
7132 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7133 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7134 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
7135 
7136 	/* Verify write with offset on first page */
7137 	iov[0].iov_base = payload_ff;
7138 	iov[0].iov_len = 1 * 512;
7139 
7140 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
7141 			    ext_api ? &ext_opts : NULL);
7142 
7143 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7144 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7145 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7146 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7147 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7148 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
7149 
7150 	/* Verify write with offset on first page */
7151 	iov[0].iov_base = payload_ff;
7152 	iov[0].iov_len = 4 * 512;
7153 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
7154 	poll_threads();
7155 
7156 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
7157 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7158 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7159 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7160 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7161 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
7162 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
7163 
7164 	/* Verify write with offset on second page */
7165 	iov[0].iov_base = payload_ff;
7166 	iov[0].iov_len = 4 * 512;
7167 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
7168 	poll_threads();
7169 
7170 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
7171 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7172 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7173 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7174 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7175 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
7176 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
7177 
7178 	/* Verify write across multiple pages */
7179 	iov[0].iov_base = payload_aa;
7180 	iov[0].iov_len = 8 * 512;
7181 
7182 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
7183 			    ext_api ? &ext_opts : NULL);
7184 
7185 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
7186 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7187 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7188 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7189 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7190 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7191 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
7192 
7193 	/* Verify write across multiple clusters */
7194 
7195 	iov[0].iov_base = payload_ff;
7196 	iov[0].iov_len = 8 * 512;
7197 
7198 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
7199 			    ext_api ? &ext_opts : NULL);
7200 
7201 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7202 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7203 
7204 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7205 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7206 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7207 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7208 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7209 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7210 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7211 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
7212 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
7213 
7214 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7215 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
7216 
7217 	/* Verify write to second cluster */
7218 
7219 	iov[0].iov_base = payload_ff;
7220 	iov[0].iov_len = 2 * 512;
7221 
7222 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
7223 			    ext_api ? &ext_opts : NULL);
7224 
7225 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7226 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7227 
7228 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7229 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
7230 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7231 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7232 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7233 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7234 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7235 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
7236 
7237 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7238 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
7239 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
7240 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
7241 }
7242 
7243 static inline void
7244 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
7245 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7246 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7247 {
7248 	if (io_opts) {
7249 		g_dev_readv_ext_called = false;
7250 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7251 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
7252 	} else {
7253 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7254 	}
7255 	poll_threads();
7256 	CU_ASSERT(g_bserrno == 0);
7257 	if (io_opts) {
7258 		CU_ASSERT(g_dev_readv_ext_called);
7259 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7260 	}
7261 }
7262 
7263 static void
7264 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7265 	      bool ext_api)
7266 {
7267 	uint8_t payload_read[64 * 512];
7268 	uint8_t payload_ff[64 * 512];
7269 	uint8_t payload_aa[64 * 512];
7270 	uint8_t payload_00[64 * 512];
7271 	struct iovec iov[4];
7272 	struct spdk_blob_ext_io_opts ext_opts = {
7273 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7274 		.memory_domain_ctx = (void *)0xf00df00d,
7275 		.size = sizeof(struct spdk_blob_ext_io_opts),
7276 		.user_ctx = (void *)123,
7277 	};
7278 
7279 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7280 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7281 	memset(payload_00, 0x00, sizeof(payload_00));
7282 
7283 	/* Read only first io unit */
7284 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7285 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7286 	 * payload_read: F000 0000 | 0000 0000 ... */
7287 	memset(payload_read, 0x00, sizeof(payload_read));
7288 	iov[0].iov_base = payload_read;
7289 	iov[0].iov_len = 1 * 512;
7290 
7291 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7292 
7293 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7294 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
7295 
7296 	/* Read four io_units starting from offset = 2
7297 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7298 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7299 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7300 
7301 	memset(payload_read, 0x00, sizeof(payload_read));
7302 	iov[0].iov_base = payload_read;
7303 	iov[0].iov_len = 4 * 512;
7304 
7305 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7306 
7307 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7308 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7309 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7310 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7311 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
7312 
7313 	/* Read eight io_units across multiple pages
7314 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7315 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7316 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7317 	memset(payload_read, 0x00, sizeof(payload_read));
7318 	iov[0].iov_base = payload_read;
7319 	iov[0].iov_len = 4 * 512;
7320 	iov[1].iov_base = payload_read + 4 * 512;
7321 	iov[1].iov_len = 4 * 512;
7322 
7323 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7324 
7325 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7326 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
7327 
7328 	/* Read eight io_units across multiple clusters
7329 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7330 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7331 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7332 	memset(payload_read, 0x00, sizeof(payload_read));
7333 	iov[0].iov_base = payload_read;
7334 	iov[0].iov_len = 2 * 512;
7335 	iov[1].iov_base = payload_read + 2 * 512;
7336 	iov[1].iov_len = 2 * 512;
7337 	iov[2].iov_base = payload_read + 4 * 512;
7338 	iov[2].iov_len = 2 * 512;
7339 	iov[3].iov_base = payload_read + 6 * 512;
7340 	iov[3].iov_len = 2 * 512;
7341 
7342 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
7343 			   ext_api ? &ext_opts : NULL);
7344 
7345 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7346 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
7347 
7348 	/* Read four io_units from second cluster
7349 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7350 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7351 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7352 	memset(payload_read, 0x00, sizeof(payload_read));
7353 	iov[0].iov_base = payload_read;
7354 	iov[0].iov_len = 1 * 512;
7355 	iov[1].iov_base = payload_read + 1 * 512;
7356 	iov[1].iov_len = 3 * 512;
7357 
7358 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
7359 			   ext_api ? &ext_opts : NULL);
7360 
7361 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7362 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7363 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
7364 
7365 	/* Read second cluster
7366 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7367 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7368 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7369 	memset(payload_read, 0x00, sizeof(payload_read));
7370 	iov[0].iov_base = payload_read;
7371 	iov[0].iov_len = 1 * 512;
7372 	iov[1].iov_base = payload_read + 1 * 512;
7373 	iov[1].iov_len = 2 * 512;
7374 	iov[2].iov_base = payload_read + 3 * 512;
7375 	iov[2].iov_len = 4 * 512;
7376 	iov[3].iov_base = payload_read + 7 * 512;
7377 	iov[3].iov_len = 25 * 512;
7378 
7379 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
7380 			   ext_api ? &ext_opts : NULL);
7381 
7382 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7383 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7384 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7385 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
7386 
7387 	/* Read whole two clusters
7388 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7389 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7390 	memset(payload_read, 0x00, sizeof(payload_read));
7391 	iov[0].iov_base = payload_read;
7392 	iov[0].iov_len = 1 * 512;
7393 	iov[1].iov_base = payload_read + 1 * 512;
7394 	iov[1].iov_len = 8 * 512;
7395 	iov[2].iov_base = payload_read + 9 * 512;
7396 	iov[2].iov_len = 16 * 512;
7397 	iov[3].iov_base = payload_read + 25 * 512;
7398 	iov[3].iov_len = 39 * 512;
7399 
7400 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
7401 			   ext_api ? &ext_opts : NULL);
7402 
7403 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7404 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7405 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7406 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7407 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7408 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
7409 
7410 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
7411 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
7412 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
7413 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
7414 }
7415 
7416 static void
7417 blob_io_unit(void)
7418 {
7419 	struct spdk_bs_opts bsopts;
7420 	struct spdk_blob_opts opts;
7421 	struct spdk_blob_store *bs;
7422 	struct spdk_bs_dev *dev;
7423 	struct spdk_blob *blob, *snapshot, *clone;
7424 	spdk_blob_id blobid;
7425 	struct spdk_io_channel *channel;
7426 
7427 	/* Create dev with 512 bytes io unit size */
7428 
7429 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7430 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
7431 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7432 
7433 	/* Try to initialize a new blob store with unsupported io_unit */
7434 	dev = init_dev();
7435 	dev->blocklen = 512;
7436 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7437 
7438 	/* Initialize a new blob store */
7439 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7440 	poll_threads();
7441 	CU_ASSERT(g_bserrno == 0);
7442 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7443 	bs = g_bs;
7444 
7445 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7446 	channel = spdk_bs_alloc_io_channel(bs);
7447 
7448 	/* Create thick provisioned blob */
7449 	ut_spdk_blob_opts_init(&opts);
7450 	opts.thin_provision = false;
7451 	opts.num_clusters = 32;
7452 
7453 	blob = ut_blob_create_and_open(bs, &opts);
7454 	blobid = spdk_blob_get_id(blob);
7455 
7456 	test_io_write(dev, blob, channel);
7457 	test_io_read(dev, blob, channel);
7458 	test_io_zeroes(dev, blob, channel);
7459 
7460 	test_iov_write(dev, blob, channel, false);
7461 	test_iov_read(dev, blob, channel, false);
7462 	test_io_zeroes(dev, blob, channel);
7463 
7464 	test_iov_write(dev, blob, channel, true);
7465 	test_iov_read(dev, blob, channel, true);
7466 
7467 	test_io_unmap(dev, blob, channel);
7468 
7469 	spdk_blob_close(blob, blob_op_complete, NULL);
7470 	poll_threads();
7471 	CU_ASSERT(g_bserrno == 0);
7472 	blob = NULL;
7473 	g_blob = NULL;
7474 
7475 	/* Create thin provisioned blob */
7476 
7477 	ut_spdk_blob_opts_init(&opts);
7478 	opts.thin_provision = true;
7479 	opts.num_clusters = 32;
7480 
7481 	blob = ut_blob_create_and_open(bs, &opts);
7482 	blobid = spdk_blob_get_id(blob);
7483 
7484 	test_io_write(dev, blob, channel);
7485 	test_io_read(dev, blob, channel);
7486 	test_io_zeroes(dev, blob, channel);
7487 
7488 	test_iov_write(dev, blob, channel, false);
7489 	test_iov_read(dev, blob, channel, false);
7490 	test_io_zeroes(dev, blob, channel);
7491 
7492 	test_iov_write(dev, blob, channel, true);
7493 	test_iov_read(dev, blob, channel, true);
7494 
7495 	/* Create snapshot */
7496 
7497 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7498 	poll_threads();
7499 	CU_ASSERT(g_bserrno == 0);
7500 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7501 	blobid = g_blobid;
7502 
7503 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7504 	poll_threads();
7505 	CU_ASSERT(g_bserrno == 0);
7506 	CU_ASSERT(g_blob != NULL);
7507 	snapshot = g_blob;
7508 
7509 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7510 	poll_threads();
7511 	CU_ASSERT(g_bserrno == 0);
7512 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7513 	blobid = g_blobid;
7514 
7515 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7516 	poll_threads();
7517 	CU_ASSERT(g_bserrno == 0);
7518 	CU_ASSERT(g_blob != NULL);
7519 	clone = g_blob;
7520 
7521 	test_io_read(dev, blob, channel);
7522 	test_io_read(dev, snapshot, channel);
7523 	test_io_read(dev, clone, channel);
7524 
7525 	test_iov_read(dev, blob, channel, false);
7526 	test_iov_read(dev, snapshot, channel, false);
7527 	test_iov_read(dev, clone, channel, false);
7528 
7529 	test_iov_read(dev, blob, channel, true);
7530 	test_iov_read(dev, snapshot, channel, true);
7531 	test_iov_read(dev, clone, channel, true);
7532 
7533 	/* Inflate clone */
7534 
7535 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7536 	poll_threads();
7537 
7538 	CU_ASSERT(g_bserrno == 0);
7539 
7540 	test_io_read(dev, clone, channel);
7541 
7542 	test_io_unmap(dev, clone, channel);
7543 
7544 	test_iov_write(dev, clone, channel, false);
7545 	test_iov_read(dev, clone, channel, false);
7546 	test_io_unmap(dev, clone, channel);
7547 
7548 	test_iov_write(dev, clone, channel, true);
7549 	test_iov_read(dev, clone, channel, true);
7550 
7551 	spdk_blob_close(blob, blob_op_complete, NULL);
7552 	spdk_blob_close(snapshot, blob_op_complete, NULL);
7553 	spdk_blob_close(clone, blob_op_complete, NULL);
7554 	poll_threads();
7555 	CU_ASSERT(g_bserrno == 0);
7556 	blob = NULL;
7557 	g_blob = NULL;
7558 
7559 	spdk_bs_free_io_channel(channel);
7560 	poll_threads();
7561 
7562 	/* Unload the blob store */
7563 	spdk_bs_unload(bs, bs_op_complete, NULL);
7564 	poll_threads();
7565 	CU_ASSERT(g_bserrno == 0);
7566 	g_bs = NULL;
7567 	g_blob = NULL;
7568 	g_blobid = 0;
7569 }
7570 
7571 static void
7572 blob_io_unit_compatibility(void)
7573 {
7574 	struct spdk_bs_opts bsopts;
7575 	struct spdk_blob_store *bs;
7576 	struct spdk_bs_dev *dev;
7577 	struct spdk_bs_super_block *super;
7578 
7579 	/* Create dev with 512 bytes io unit size */
7580 
7581 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7582 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
7583 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7584 
7585 	/* Try to initialize a new blob store with unsupported io_unit */
7586 	dev = init_dev();
7587 	dev->blocklen = 512;
7588 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7589 
7590 	/* Initialize a new blob store */
7591 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7592 	poll_threads();
7593 	CU_ASSERT(g_bserrno == 0);
7594 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7595 	bs = g_bs;
7596 
7597 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7598 
7599 	/* Unload the blob store */
7600 	spdk_bs_unload(bs, bs_op_complete, NULL);
7601 	poll_threads();
7602 	CU_ASSERT(g_bserrno == 0);
7603 
7604 	/* Modify super block to behave like older version.
7605 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
7606 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
7607 	super->io_unit_size = 0;
7608 	super->crc = blob_md_page_calc_crc(super);
7609 
7610 	dev = init_dev();
7611 	dev->blocklen = 512;
7612 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7613 
7614 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
7615 	poll_threads();
7616 	CU_ASSERT(g_bserrno == 0);
7617 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7618 	bs = g_bs;
7619 
7620 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
7621 
7622 	/* Unload the blob store */
7623 	spdk_bs_unload(bs, bs_op_complete, NULL);
7624 	poll_threads();
7625 	CU_ASSERT(g_bserrno == 0);
7626 
7627 	g_bs = NULL;
7628 	g_blob = NULL;
7629 	g_blobid = 0;
7630 }
7631 
7632 static void
7633 first_sync_complete(void *cb_arg, int bserrno)
7634 {
7635 	struct spdk_blob *blob = cb_arg;
7636 	int rc;
7637 
7638 	CU_ASSERT(bserrno == 0);
7639 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7640 	CU_ASSERT(rc == 0);
7641 	CU_ASSERT(g_bserrno == -1);
7642 
7643 	/* Keep g_bserrno at -1, only the
7644 	 * second sync completion should set it at 0. */
7645 }
7646 
7647 static void
7648 second_sync_complete(void *cb_arg, int bserrno)
7649 {
7650 	struct spdk_blob *blob = cb_arg;
7651 	const void *value;
7652 	size_t value_len;
7653 	int rc;
7654 
7655 	CU_ASSERT(bserrno == 0);
7656 
7657 	/* Verify that the first sync completion had a chance to execute */
7658 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7659 	CU_ASSERT(rc == 0);
7660 	SPDK_CU_ASSERT_FATAL(value != NULL);
7661 	CU_ASSERT(value_len == strlen("second") + 1);
7662 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7663 
7664 	CU_ASSERT(g_bserrno == -1);
7665 	g_bserrno = bserrno;
7666 }
7667 
7668 static void
7669 blob_simultaneous_operations(void)
7670 {
7671 	struct spdk_blob_store *bs = g_bs;
7672 	struct spdk_blob_opts opts;
7673 	struct spdk_blob *blob, *snapshot;
7674 	spdk_blob_id blobid, snapshotid;
7675 	struct spdk_io_channel *channel;
7676 	int rc;
7677 
7678 	channel = spdk_bs_alloc_io_channel(bs);
7679 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7680 
7681 	ut_spdk_blob_opts_init(&opts);
7682 	opts.num_clusters = 10;
7683 
7684 	blob = ut_blob_create_and_open(bs, &opts);
7685 	blobid = spdk_blob_get_id(blob);
7686 
7687 	/* Create snapshot and try to remove blob in the same time:
7688 	 * - snapshot should be created successfully
7689 	 * - delete operation should fail w -EBUSY */
7690 	CU_ASSERT(blob->locked_operation_in_progress == false);
7691 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7692 	CU_ASSERT(blob->locked_operation_in_progress == true);
7693 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7694 	CU_ASSERT(blob->locked_operation_in_progress == true);
7695 	/* Deletion failure */
7696 	CU_ASSERT(g_bserrno == -EBUSY);
7697 	poll_threads();
7698 	CU_ASSERT(blob->locked_operation_in_progress == false);
7699 	/* Snapshot creation success */
7700 	CU_ASSERT(g_bserrno == 0);
7701 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7702 
7703 	snapshotid = g_blobid;
7704 
7705 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7706 	poll_threads();
7707 	CU_ASSERT(g_bserrno == 0);
7708 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7709 	snapshot = g_blob;
7710 
7711 	/* Inflate blob and try to remove blob in the same time:
7712 	 * - blob should be inflated successfully
7713 	 * - delete operation should fail w -EBUSY */
7714 	CU_ASSERT(blob->locked_operation_in_progress == false);
7715 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7716 	CU_ASSERT(blob->locked_operation_in_progress == true);
7717 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7718 	CU_ASSERT(blob->locked_operation_in_progress == true);
7719 	/* Deletion failure */
7720 	CU_ASSERT(g_bserrno == -EBUSY);
7721 	poll_threads();
7722 	CU_ASSERT(blob->locked_operation_in_progress == false);
7723 	/* Inflation success */
7724 	CU_ASSERT(g_bserrno == 0);
7725 
7726 	/* Clone snapshot and try to remove snapshot in the same time:
7727 	 * - snapshot should be cloned successfully
7728 	 * - delete operation should fail w -EBUSY */
7729 	CU_ASSERT(blob->locked_operation_in_progress == false);
7730 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7731 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7732 	/* Deletion failure */
7733 	CU_ASSERT(g_bserrno == -EBUSY);
7734 	poll_threads();
7735 	CU_ASSERT(blob->locked_operation_in_progress == false);
7736 	/* Clone created */
7737 	CU_ASSERT(g_bserrno == 0);
7738 
7739 	/* Resize blob and try to remove blob in the same time:
7740 	 * - blob should be resized successfully
7741 	 * - delete operation should fail w -EBUSY */
7742 	CU_ASSERT(blob->locked_operation_in_progress == false);
7743 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7744 	CU_ASSERT(blob->locked_operation_in_progress == true);
7745 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7746 	CU_ASSERT(blob->locked_operation_in_progress == true);
7747 	/* Deletion failure */
7748 	CU_ASSERT(g_bserrno == -EBUSY);
7749 	poll_threads();
7750 	CU_ASSERT(blob->locked_operation_in_progress == false);
7751 	/* Blob resized successfully */
7752 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7753 	poll_threads();
7754 	CU_ASSERT(g_bserrno == 0);
7755 
7756 	/* Issue two consecutive blob syncs, neither should fail.
7757 	 * Force sync to actually occur by marking blob dirty each time.
7758 	 * Execution of sync should not be enough to complete the operation,
7759 	 * since disk I/O is required to complete it. */
7760 	g_bserrno = -1;
7761 
7762 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7763 	CU_ASSERT(rc == 0);
7764 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7765 	CU_ASSERT(g_bserrno == -1);
7766 
7767 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7768 	CU_ASSERT(g_bserrno == -1);
7769 
7770 	poll_threads();
7771 	CU_ASSERT(g_bserrno == 0);
7772 
7773 	spdk_bs_free_io_channel(channel);
7774 	poll_threads();
7775 
7776 	ut_blob_close_and_delete(bs, snapshot);
7777 	ut_blob_close_and_delete(bs, blob);
7778 }
7779 
7780 static void
7781 blob_persist_test(void)
7782 {
7783 	struct spdk_blob_store *bs = g_bs;
7784 	struct spdk_blob_opts opts;
7785 	struct spdk_blob *blob;
7786 	spdk_blob_id blobid;
7787 	struct spdk_io_channel *channel;
7788 	char *xattr;
7789 	size_t xattr_length;
7790 	int rc;
7791 	uint32_t page_count_clear, page_count_xattr;
7792 	uint64_t poller_iterations;
7793 	bool run_poller;
7794 
7795 	channel = spdk_bs_alloc_io_channel(bs);
7796 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7797 
7798 	ut_spdk_blob_opts_init(&opts);
7799 	opts.num_clusters = 10;
7800 
7801 	blob = ut_blob_create_and_open(bs, &opts);
7802 	blobid = spdk_blob_get_id(blob);
7803 
7804 	/* Save the amount of md pages used after creation of a blob.
7805 	 * This should be consistent after removing xattr. */
7806 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7807 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7808 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7809 
7810 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7811 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7812 		       strlen("large_xattr");
7813 	xattr = calloc(xattr_length, sizeof(char));
7814 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7815 
7816 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7817 	SPDK_CU_ASSERT_FATAL(rc == 0);
7818 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7819 	poll_threads();
7820 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7821 
7822 	/* Save the amount of md pages used after adding the large xattr */
7823 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7824 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7825 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7826 
7827 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7828 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7829 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7830 	poller_iterations = 1;
7831 	run_poller = true;
7832 	while (run_poller) {
7833 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7834 		SPDK_CU_ASSERT_FATAL(rc == 0);
7835 		g_bserrno = -1;
7836 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7837 		poll_thread_times(0, poller_iterations);
7838 		if (g_bserrno == 0) {
7839 			/* Poller iteration count was high enough for first sync to complete.
7840 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7841 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7842 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7843 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7844 			run_poller = false;
7845 		}
7846 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7847 		SPDK_CU_ASSERT_FATAL(rc == 0);
7848 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7849 		poll_threads();
7850 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7851 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7852 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7853 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7854 
7855 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7856 		spdk_blob_close(blob, blob_op_complete, NULL);
7857 		poll_threads();
7858 		CU_ASSERT(g_bserrno == 0);
7859 
7860 		ut_bs_reload(&bs, NULL);
7861 
7862 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7863 		poll_threads();
7864 		CU_ASSERT(g_bserrno == 0);
7865 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7866 		blob = g_blob;
7867 
7868 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7869 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7870 
7871 		poller_iterations++;
7872 		/* Stop at high iteration count to prevent infinite loop.
7873 		 * This value should be enough for first md sync to complete in any case. */
7874 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7875 	}
7876 
7877 	free(xattr);
7878 
7879 	ut_blob_close_and_delete(bs, blob);
7880 
7881 	spdk_bs_free_io_channel(channel);
7882 	poll_threads();
7883 }
7884 
7885 static void
7886 blob_decouple_snapshot(void)
7887 {
7888 	struct spdk_blob_store *bs = g_bs;
7889 	struct spdk_blob_opts opts;
7890 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7891 	struct spdk_io_channel *channel;
7892 	spdk_blob_id blobid, snapshotid;
7893 	uint64_t cluster;
7894 
7895 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7896 		channel = spdk_bs_alloc_io_channel(bs);
7897 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7898 
7899 		ut_spdk_blob_opts_init(&opts);
7900 		opts.num_clusters = 10;
7901 		opts.thin_provision = false;
7902 
7903 		blob = ut_blob_create_and_open(bs, &opts);
7904 		blobid = spdk_blob_get_id(blob);
7905 
7906 		/* Create first snapshot */
7907 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7908 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7909 		poll_threads();
7910 		CU_ASSERT(g_bserrno == 0);
7911 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7912 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7913 		snapshotid = g_blobid;
7914 
7915 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7916 		poll_threads();
7917 		CU_ASSERT(g_bserrno == 0);
7918 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7919 		snapshot1 = g_blob;
7920 
7921 		/* Create the second one */
7922 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7923 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7924 		poll_threads();
7925 		CU_ASSERT(g_bserrno == 0);
7926 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7927 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7928 		snapshotid = g_blobid;
7929 
7930 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7931 		poll_threads();
7932 		CU_ASSERT(g_bserrno == 0);
7933 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7934 		snapshot2 = g_blob;
7935 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7936 
7937 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7938 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7939 		poll_threads();
7940 		CU_ASSERT(g_bserrno == 0);
7941 
7942 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7943 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7944 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7945 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7946 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7947 					    snapshot1->active.clusters[cluster]);
7948 		}
7949 
7950 		spdk_bs_free_io_channel(channel);
7951 
7952 		if (delete_snapshot_first) {
7953 			ut_blob_close_and_delete(bs, snapshot2);
7954 			ut_blob_close_and_delete(bs, snapshot1);
7955 			ut_blob_close_and_delete(bs, blob);
7956 		} else {
7957 			ut_blob_close_and_delete(bs, blob);
7958 			ut_blob_close_and_delete(bs, snapshot2);
7959 			ut_blob_close_and_delete(bs, snapshot1);
7960 		}
7961 		poll_threads();
7962 	}
7963 }
7964 
7965 static void
7966 blob_seek_io_unit(void)
7967 {
7968 	struct spdk_blob_store *bs = g_bs;
7969 	struct spdk_blob *blob;
7970 	struct spdk_io_channel *channel;
7971 	struct spdk_blob_opts opts;
7972 	uint64_t free_clusters;
7973 	uint8_t payload[10 * 4096];
7974 	uint64_t offset;
7975 	uint64_t io_unit, io_units_per_cluster;
7976 
7977 	free_clusters = spdk_bs_free_cluster_count(bs);
7978 
7979 	channel = spdk_bs_alloc_io_channel(bs);
7980 	CU_ASSERT(channel != NULL);
7981 
7982 	/* Set blob as thin provisioned */
7983 	ut_spdk_blob_opts_init(&opts);
7984 	opts.thin_provision = true;
7985 
7986 	/* Create a blob */
7987 	blob = ut_blob_create_and_open(bs, &opts);
7988 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7989 
7990 	io_units_per_cluster = bs_io_units_per_cluster(blob);
7991 
7992 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
7993 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
7994 	poll_threads();
7995 	CU_ASSERT(g_bserrno == 0);
7996 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7997 	CU_ASSERT(blob->active.num_clusters == 5);
7998 
7999 	/* Write at the beginning of first cluster */
8000 	offset = 0;
8001 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8002 	poll_threads();
8003 	CU_ASSERT(g_bserrno == 0);
8004 
8005 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
8006 	CU_ASSERT(io_unit == offset);
8007 
8008 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
8009 	CU_ASSERT(io_unit == io_units_per_cluster);
8010 
8011 	/* Write in the middle of third cluster */
8012 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
8013 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8014 	poll_threads();
8015 	CU_ASSERT(g_bserrno == 0);
8016 
8017 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
8018 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
8019 
8020 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
8021 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
8022 
8023 	/* Write at the end of last cluster */
8024 	offset = 5 * io_units_per_cluster - 1;
8025 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8026 	poll_threads();
8027 	CU_ASSERT(g_bserrno == 0);
8028 
8029 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
8030 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
8031 
8032 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
8033 	CU_ASSERT(io_unit == UINT64_MAX);
8034 
8035 	spdk_bs_free_io_channel(channel);
8036 	poll_threads();
8037 
8038 	ut_blob_close_and_delete(bs, blob);
8039 }
8040 
8041 static void
8042 blob_esnap_create(void)
8043 {
8044 	struct spdk_blob_store	*bs = g_bs;
8045 	struct spdk_bs_opts	bs_opts;
8046 	struct ut_esnap_opts	esnap_opts;
8047 	struct spdk_blob_opts	opts;
8048 	struct spdk_blob_open_opts open_opts;
8049 	struct spdk_blob	*blob;
8050 	uint32_t		cluster_sz, block_sz;
8051 	const uint32_t		esnap_num_clusters = 4;
8052 	uint64_t		esnap_num_blocks;
8053 	uint32_t		sz;
8054 	spdk_blob_id		blobid;
8055 	uint32_t		bs_ctx_count, blob_ctx_count;
8056 
8057 	cluster_sz = spdk_bs_get_cluster_size(bs);
8058 	block_sz = spdk_bs_get_io_unit_size(bs);
8059 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8060 
8061 	/* Create a normal blob and verify it is not an esnap clone. */
8062 	ut_spdk_blob_opts_init(&opts);
8063 	blob = ut_blob_create_and_open(bs, &opts);
8064 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
8065 	ut_blob_close_and_delete(bs, blob);
8066 
8067 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
8068 	ut_spdk_blob_opts_init(&opts);
8069 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8070 	opts.esnap_id = &esnap_opts;
8071 	opts.esnap_id_len = sizeof(esnap_opts);
8072 	opts.num_clusters = esnap_num_clusters;
8073 	blob = ut_blob_create_and_open(bs, &opts);
8074 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8075 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8076 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
8077 	SPDK_CU_ASSERT_FATAL(!spdk_blob_is_clone(blob));
8078 	sz = spdk_blob_get_num_clusters(blob);
8079 	CU_ASSERT(sz == esnap_num_clusters);
8080 	ut_blob_close_and_delete(bs, blob);
8081 
8082 	/* Create an esnap clone without the size and verify it can be grown */
8083 	ut_spdk_blob_opts_init(&opts);
8084 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8085 	opts.esnap_id = &esnap_opts;
8086 	opts.esnap_id_len = sizeof(esnap_opts);
8087 	blob = ut_blob_create_and_open(bs, &opts);
8088 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8089 	sz = spdk_blob_get_num_clusters(blob);
8090 	CU_ASSERT(sz == 0);
8091 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
8092 	poll_threads();
8093 	CU_ASSERT(g_bserrno == 0);
8094 	sz = spdk_blob_get_num_clusters(blob);
8095 	CU_ASSERT(sz == 1);
8096 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
8097 	poll_threads();
8098 	CU_ASSERT(g_bserrno == 0);
8099 	sz = spdk_blob_get_num_clusters(blob);
8100 	CU_ASSERT(sz == esnap_num_clusters);
8101 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
8102 	poll_threads();
8103 	CU_ASSERT(g_bserrno == 0);
8104 	sz = spdk_blob_get_num_clusters(blob);
8105 	CU_ASSERT(sz == esnap_num_clusters + 1);
8106 
8107 	/* Reload the blobstore and be sure that the blob can be opened. */
8108 	blobid = spdk_blob_get_id(blob);
8109 	spdk_blob_close(blob, blob_op_complete, NULL);
8110 	poll_threads();
8111 	CU_ASSERT(g_bserrno == 0);
8112 	g_blob = NULL;
8113 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8114 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8115 	ut_bs_reload(&bs, &bs_opts);
8116 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8117 	poll_threads();
8118 	CU_ASSERT(g_bserrno == 0);
8119 	CU_ASSERT(g_blob != NULL);
8120 	blob = g_blob;
8121 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8122 	sz = spdk_blob_get_num_clusters(blob);
8123 	CU_ASSERT(sz == esnap_num_clusters + 1);
8124 
8125 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
8126 	spdk_blob_close(blob, blob_op_complete, NULL);
8127 	poll_threads();
8128 	CU_ASSERT(g_bserrno == 0);
8129 	g_blob = NULL;
8130 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8131 	ut_bs_reload(&bs, &bs_opts);
8132 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8133 	poll_threads();
8134 	CU_ASSERT(g_bserrno != 0);
8135 	CU_ASSERT(g_blob == NULL);
8136 
8137 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
8138 	bs_ctx_count = 0;
8139 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8140 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
8141 	bs_opts.esnap_ctx = &bs_ctx_count;
8142 	ut_bs_reload(&bs, &bs_opts);
8143 	/* Loading the blobstore triggers the esnap to be loaded */
8144 	CU_ASSERT(bs_ctx_count == 1);
8145 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8146 	poll_threads();
8147 	CU_ASSERT(g_bserrno == 0);
8148 	CU_ASSERT(g_blob != NULL);
8149 	/* Opening the blob also triggers the esnap to be loaded */
8150 	CU_ASSERT(bs_ctx_count == 2);
8151 	blob = g_blob;
8152 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8153 	sz = spdk_blob_get_num_clusters(blob);
8154 	CU_ASSERT(sz == esnap_num_clusters + 1);
8155 	spdk_blob_close(blob, blob_op_complete, NULL);
8156 	poll_threads();
8157 	CU_ASSERT(g_bserrno == 0);
8158 	g_blob = NULL;
8159 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
8160 	blob_ctx_count = 0;
8161 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
8162 	open_opts.esnap_ctx = &blob_ctx_count;
8163 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
8164 	poll_threads();
8165 	blob = g_blob;
8166 	CU_ASSERT(bs_ctx_count == 3);
8167 	CU_ASSERT(blob_ctx_count == 1);
8168 	spdk_blob_close(blob, blob_op_complete, NULL);
8169 	poll_threads();
8170 	CU_ASSERT(g_bserrno == 0);
8171 	g_blob = NULL;
8172 }
8173 
8174 static void
8175 blob_esnap_clone_reload(void)
8176 {
8177 	struct spdk_blob_store	*bs = g_bs;
8178 	struct spdk_bs_opts	bs_opts;
8179 	struct ut_esnap_opts	esnap_opts;
8180 	struct spdk_blob_opts	opts;
8181 	struct spdk_blob	*eclone1, *snap1, *clone1;
8182 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8183 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
8184 	const uint32_t		esnap_num_clusters = 4;
8185 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8186 	spdk_blob_id		eclone1_id, snap1_id, clone1_id;
8187 	struct spdk_io_channel	*bs_ch;
8188 	char			buf[block_sz];
8189 	int			bserr1, bserr2, bserr3, bserr4;
8190 	struct spdk_bs_dev	*dev;
8191 
8192 	/* Create and open an esnap clone blob */
8193 	ut_spdk_blob_opts_init(&opts);
8194 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8195 	opts.esnap_id = &esnap_opts;
8196 	opts.esnap_id_len = sizeof(esnap_opts);
8197 	opts.num_clusters = esnap_num_clusters;
8198 	eclone1 = ut_blob_create_and_open(bs, &opts);
8199 	CU_ASSERT(eclone1 != NULL);
8200 	CU_ASSERT(spdk_blob_is_esnap_clone(eclone1));
8201 	eclone1_id = eclone1->id;
8202 
8203 	/* Create and open a snapshot of eclone1 */
8204 	spdk_bs_create_snapshot(bs, eclone1_id, NULL, blob_op_with_id_complete, NULL);
8205 	poll_threads();
8206 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8207 	CU_ASSERT(g_bserrno == 0);
8208 	snap1_id = g_blobid;
8209 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8210 	poll_threads();
8211 	CU_ASSERT(g_bserrno == 0);
8212 	CU_ASSERT(g_blob != NULL);
8213 	snap1 = g_blob;
8214 
8215 	/* Create and open regular clone of snap1 */
8216 	spdk_bs_create_clone(bs, snap1_id, NULL, blob_op_with_id_complete, NULL);
8217 	poll_threads();
8218 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8219 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
8220 	clone1_id = g_blobid;
8221 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8222 	poll_threads();
8223 	CU_ASSERT(g_bserrno == 0);
8224 	CU_ASSERT(g_blob != NULL);
8225 	clone1 = g_blob;
8226 
8227 	/* Close the blobs in preparation for reloading the blobstore */
8228 	spdk_blob_close(clone1, blob_op_complete, NULL);
8229 	poll_threads();
8230 	CU_ASSERT(g_bserrno == 0);
8231 	spdk_blob_close(snap1, blob_op_complete, NULL);
8232 	poll_threads();
8233 	CU_ASSERT(g_bserrno == 0);
8234 	spdk_blob_close(eclone1, blob_op_complete, NULL);
8235 	poll_threads();
8236 	CU_ASSERT(g_bserrno == 0);
8237 	g_blob = NULL;
8238 
8239 	/* Reload the blobstore */
8240 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8241 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8242 	ut_bs_reload(&bs, &bs_opts);
8243 
8244 	/* Be sure each of the blobs can be opened */
8245 	spdk_bs_open_blob(bs, eclone1_id, blob_op_with_handle_complete, NULL);
8246 	poll_threads();
8247 	CU_ASSERT(g_bserrno == 0);
8248 	CU_ASSERT(g_blob != NULL);
8249 	eclone1 = g_blob;
8250 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8251 	poll_threads();
8252 	CU_ASSERT(g_bserrno == 0);
8253 	CU_ASSERT(g_blob != NULL);
8254 	snap1 = g_blob;
8255 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8256 	poll_threads();
8257 	CU_ASSERT(g_bserrno == 0);
8258 	CU_ASSERT(g_blob != NULL);
8259 	clone1 = g_blob;
8260 
8261 	/* Perform some reads on each of them to cause channels to be allocated */
8262 	bs_ch = spdk_bs_alloc_io_channel(bs);
8263 	CU_ASSERT(bs_ch != NULL);
8264 	spdk_blob_io_read(eclone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8265 	poll_threads();
8266 	CU_ASSERT(g_bserrno == 0);
8267 	spdk_blob_io_read(snap1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8268 	poll_threads();
8269 	CU_ASSERT(g_bserrno == 0);
8270 	spdk_blob_io_read(clone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8271 	poll_threads();
8272 	CU_ASSERT(g_bserrno == 0);
8273 
8274 	/*
8275 	 * Unload the blobstore in a way similar to how lvstore unloads it.  This should exercise
8276 	 * the deferred unload path in spdk_bs_unload().
8277 	 */
8278 	bserr1 = 0xbad;
8279 	bserr2 = 0xbad;
8280 	bserr3 = 0xbad;
8281 	bserr4 = 0xbad;
8282 	spdk_blob_close(eclone1, blob_op_complete, &bserr1);
8283 	spdk_blob_close(snap1, blob_op_complete, &bserr2);
8284 	spdk_blob_close(clone1, blob_op_complete, &bserr3);
8285 	spdk_bs_unload(bs, blob_op_complete, &bserr4);
8286 	spdk_bs_free_io_channel(bs_ch);
8287 	poll_threads();
8288 	CU_ASSERT(bserr1 == 0);
8289 	CU_ASSERT(bserr2 == 0);
8290 	CU_ASSERT(bserr3 == 0);
8291 	CU_ASSERT(bserr4 == 0);
8292 	g_blob = NULL;
8293 
8294 	/* Reload the blobstore */
8295 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8296 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8297 	dev = init_dev();
8298 	spdk_bs_load(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8299 	poll_threads();
8300 	CU_ASSERT(g_bserrno == 0);
8301 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8302 }
8303 
8304 static bool
8305 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
8306 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
8307 {
8308 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
8309 	const uint32_t	esnap_blksz = blob->back_bs_dev ? blob->back_bs_dev->blocklen : bs_blksz;
8310 	const uint32_t	start_blk = offset / bs_blksz;
8311 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
8312 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
8313 	uint32_t	blob_block;
8314 	struct iovec	iov;
8315 	uint8_t		buf[spdk_min(size, readsize)];
8316 	bool		block_ok;
8317 
8318 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
8319 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
8320 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
8321 
8322 	memset(buf, 0, readsize);
8323 	iov.iov_base = buf;
8324 	iov.iov_len = readsize;
8325 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
8326 		if (strcmp(how, "read") == 0) {
8327 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
8328 					  bs_op_complete, NULL);
8329 		} else if (strcmp(how, "readv") == 0) {
8330 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
8331 					   bs_op_complete, NULL);
8332 		} else if (strcmp(how, "readv_ext") == 0) {
8333 			/*
8334 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
8335 			 * dev->readv_ext().
8336 			 */
8337 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
8338 					       bs_op_complete, NULL, NULL);
8339 		} else {
8340 			abort();
8341 		}
8342 		poll_threads();
8343 		CU_ASSERT(g_bserrno == 0);
8344 		if (g_bserrno != 0) {
8345 			return false;
8346 		}
8347 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
8348 						       blob_block * bs_blksz, esnap_blksz);
8349 		CU_ASSERT(block_ok);
8350 		if (!block_ok) {
8351 			return false;
8352 		}
8353 	}
8354 
8355 	return true;
8356 }
8357 
8358 static void
8359 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
8360 {
8361 	struct spdk_bs_dev	*dev;
8362 	struct spdk_blob_store	*bs;
8363 	struct spdk_bs_opts	bsopts;
8364 	struct spdk_blob_opts	opts;
8365 	struct ut_esnap_opts	esnap_opts;
8366 	struct spdk_blob	*blob;
8367 	const uint32_t		cluster_sz = 16 * 1024;
8368 	const uint64_t		esnap_num_clusters = 4;
8369 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8370 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
8371 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
8372 	uint32_t		block;
8373 	struct spdk_io_channel	*bs_ch;
8374 
8375 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
8376 	bsopts.cluster_sz = cluster_sz;
8377 	bsopts.esnap_bs_dev_create = ut_esnap_create;
8378 
8379 	/* Create device with desired block size */
8380 	dev = init_dev();
8381 	dev->blocklen = bs_blksz;
8382 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8383 
8384 	/* Initialize a new blob store */
8385 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
8386 	poll_threads();
8387 	CU_ASSERT(g_bserrno == 0);
8388 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8389 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8390 	bs = g_bs;
8391 
8392 	bs_ch = spdk_bs_alloc_io_channel(bs);
8393 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
8394 
8395 	/* Create and open the esnap clone  */
8396 	ut_spdk_blob_opts_init(&opts);
8397 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8398 	opts.esnap_id = &esnap_opts;
8399 	opts.esnap_id_len = sizeof(esnap_opts);
8400 	opts.num_clusters = esnap_num_clusters;
8401 	blob = ut_blob_create_and_open(bs, &opts);
8402 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8403 
8404 	/* Verify that large reads return the content of the esnap device */
8405 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
8406 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
8407 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
8408 	/* Verify that small reads return the content of the esnap device */
8409 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
8410 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
8411 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
8412 
8413 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
8414 	for (block = 0; block < blob_num_blocks; block++) {
8415 		char		buf[bs_blksz];
8416 		union ut_word	word;
8417 
8418 		word.f.blob_id = 0xfedcba90;
8419 		word.f.lba = block;
8420 		ut_memset8(buf, word.num, bs_blksz);
8421 
8422 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8423 		poll_threads();
8424 		CU_ASSERT(g_bserrno == 0);
8425 		if (g_bserrno != 0) {
8426 			break;
8427 		}
8428 
8429 		/* Read and verify the block before the current block */
8430 		if (block != 0) {
8431 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
8432 			poll_threads();
8433 			CU_ASSERT(g_bserrno == 0);
8434 			if (g_bserrno != 0) {
8435 				break;
8436 			}
8437 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8438 							      (block - 1) * bs_blksz, bs_blksz));
8439 		}
8440 
8441 		/* Read and verify the current block */
8442 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8443 		poll_threads();
8444 		CU_ASSERT(g_bserrno == 0);
8445 		if (g_bserrno != 0) {
8446 			break;
8447 		}
8448 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8449 						      block * bs_blksz, bs_blksz));
8450 
8451 		/* Check the block that follows */
8452 		if (block + 1 < blob_num_blocks) {
8453 			g_bserrno = 0xbad;
8454 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
8455 			poll_threads();
8456 			CU_ASSERT(g_bserrno == 0);
8457 			if (g_bserrno != 0) {
8458 				break;
8459 			}
8460 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
8461 							      (block + 1) * bs_blksz,
8462 							      esnap_blksz));
8463 		}
8464 	}
8465 
8466 	/* Clean up */
8467 	spdk_bs_free_io_channel(bs_ch);
8468 	g_bserrno = 0xbad;
8469 	spdk_blob_close(blob, blob_op_complete, NULL);
8470 	poll_threads();
8471 	CU_ASSERT(g_bserrno == 0);
8472 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
8473 	poll_threads();
8474 	CU_ASSERT(g_bserrno == 0);
8475 	g_bs = NULL;
8476 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8477 }
8478 
8479 static void
8480 blob_esnap_io_4096_4096(void)
8481 {
8482 	blob_esnap_io_size(4096, 4096);
8483 }
8484 
8485 static void
8486 blob_esnap_io_512_512(void)
8487 {
8488 	blob_esnap_io_size(512, 512);
8489 }
8490 
8491 static void
8492 blob_esnap_io_4096_512(void)
8493 {
8494 	blob_esnap_io_size(4096, 512);
8495 }
8496 
8497 static void
8498 blob_esnap_io_512_4096(void)
8499 {
8500 	struct spdk_bs_dev	*dev;
8501 	struct spdk_blob_store	*bs;
8502 	struct spdk_bs_opts	bs_opts;
8503 	struct spdk_blob_opts	blob_opts;
8504 	struct ut_esnap_opts	esnap_opts;
8505 	uint64_t		cluster_sz = 16 * 1024;
8506 	uint32_t		bs_blksz = 512;
8507 	uint32_t		esnap_blksz = 4096;
8508 	uint64_t		esnap_num_blocks = 64;
8509 	spdk_blob_id		blobid;
8510 
8511 	/* Create device with desired block size */
8512 	dev = init_dev();
8513 	dev->blocklen = bs_blksz;
8514 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8515 
8516 	/* Initialize a new blob store */
8517 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8518 	bs_opts.cluster_sz = cluster_sz;
8519 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8520 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8521 	poll_threads();
8522 	CU_ASSERT(g_bserrno == 0);
8523 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8524 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8525 	bs = g_bs;
8526 
8527 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
8528 	ut_spdk_blob_opts_init(&blob_opts);
8529 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8530 	blob_opts.esnap_id = &esnap_opts;
8531 	blob_opts.esnap_id_len = sizeof(esnap_opts);
8532 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
8533 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
8534 	poll_threads();
8535 	CU_ASSERT(g_bserrno == 0);
8536 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8537 	blobid = g_blobid;
8538 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8539 	poll_threads();
8540 	CU_ASSERT(g_bserrno == -EINVAL);
8541 	CU_ASSERT(g_blob == NULL);
8542 
8543 	/* Clean up */
8544 	spdk_bs_unload(bs, bs_op_complete, NULL);
8545 	poll_threads();
8546 	CU_ASSERT(g_bserrno == 0);
8547 	g_bs = NULL;
8548 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8549 }
8550 
8551 static void
8552 blob_esnap_thread_add_remove(void)
8553 {
8554 	struct spdk_blob_store	*bs = g_bs;
8555 	struct spdk_blob_opts	opts;
8556 	struct ut_esnap_opts	ut_esnap_opts;
8557 	struct spdk_blob	*blob;
8558 	struct ut_esnap_dev	*ut_dev;
8559 	spdk_blob_id		blobid;
8560 	uint64_t		start_thread = g_ut_thread_id;
8561 	bool			destroyed = false;
8562 	struct spdk_io_channel	*ch0, *ch1;
8563 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
8564 	const uint32_t		blocklen = bs->io_unit_size;
8565 	char			buf[blocklen * 4];
8566 
8567 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
8568 	set_thread(0);
8569 
8570 	/* Create the esnap clone */
8571 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
8572 	ut_spdk_blob_opts_init(&opts);
8573 	opts.esnap_id = &ut_esnap_opts;
8574 	opts.esnap_id_len = sizeof(ut_esnap_opts);
8575 	opts.num_clusters = 10;
8576 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8577 	poll_threads();
8578 	CU_ASSERT(g_bserrno == 0);
8579 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8580 	blobid = g_blobid;
8581 
8582 	/* Open the blob. No channels should be allocated yet. */
8583 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8584 	poll_threads();
8585 	CU_ASSERT(g_bserrno == 0);
8586 	CU_ASSERT(g_blob != NULL);
8587 	blob = g_blob;
8588 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8589 	CU_ASSERT(ut_dev != NULL);
8590 	CU_ASSERT(ut_dev->num_channels == 0);
8591 
8592 	/* Create a channel on thread 0. It is lazily created on the first read. */
8593 	ch0 = spdk_bs_alloc_io_channel(bs);
8594 	CU_ASSERT(ch0 != NULL);
8595 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8596 	CU_ASSERT(ut_ch0 == NULL);
8597 	CU_ASSERT(ut_dev->num_channels == 0);
8598 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
8599 	poll_threads();
8600 	CU_ASSERT(g_bserrno == 0);
8601 	CU_ASSERT(ut_dev->num_channels == 1);
8602 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8603 	CU_ASSERT(ut_ch0 != NULL);
8604 	CU_ASSERT(ut_ch0->blocks_read == 1);
8605 
8606 	/* Create a channel on thread 1 and verify its lazy creation too. */
8607 	set_thread(1);
8608 	ch1 = spdk_bs_alloc_io_channel(bs);
8609 	CU_ASSERT(ch1 != NULL);
8610 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8611 	CU_ASSERT(ut_ch1 == NULL);
8612 	CU_ASSERT(ut_dev->num_channels == 1);
8613 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
8614 	poll_threads();
8615 	CU_ASSERT(g_bserrno == 0);
8616 	CU_ASSERT(ut_dev->num_channels == 2);
8617 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8618 	CU_ASSERT(ut_ch1 != NULL);
8619 	CU_ASSERT(ut_ch1->blocks_read == 4);
8620 
8621 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
8622 	set_thread(0);
8623 	spdk_bs_free_io_channel(ch0);
8624 	poll_threads();
8625 	CU_ASSERT(ut_dev->num_channels == 1);
8626 
8627 	/* Close the blob. There is no outstanding IO so it should close right away. */
8628 	g_bserrno = 0xbad;
8629 	spdk_blob_close(blob, blob_op_complete, NULL);
8630 	poll_threads();
8631 	CU_ASSERT(g_bserrno == 0);
8632 	CU_ASSERT(destroyed);
8633 
8634 	/* The esnap channel for the blob should be gone now too. */
8635 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8636 	CU_ASSERT(ut_ch1 == NULL);
8637 
8638 	/* Clean up */
8639 	set_thread(1);
8640 	spdk_bs_free_io_channel(ch1);
8641 	set_thread(start_thread);
8642 }
8643 
8644 static void
8645 freeze_done(void *cb_arg, int bserrno)
8646 {
8647 	uint32_t *freeze_cnt = cb_arg;
8648 
8649 	CU_ASSERT(bserrno == 0);
8650 	(*freeze_cnt)++;
8651 }
8652 
8653 static void
8654 unfreeze_done(void *cb_arg, int bserrno)
8655 {
8656 	uint32_t *unfreeze_cnt = cb_arg;
8657 
8658 	CU_ASSERT(bserrno == 0);
8659 	(*unfreeze_cnt)++;
8660 }
8661 
8662 static void
8663 blob_nested_freezes(void)
8664 {
8665 	struct spdk_blob_store *bs = g_bs;
8666 	struct spdk_blob *blob;
8667 	struct spdk_io_channel *channel[2];
8668 	struct spdk_blob_opts opts;
8669 	uint32_t freeze_cnt, unfreeze_cnt;
8670 	int i;
8671 
8672 	for (i = 0; i < 2; i++) {
8673 		set_thread(i);
8674 		channel[i] = spdk_bs_alloc_io_channel(bs);
8675 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
8676 	}
8677 
8678 	set_thread(0);
8679 
8680 	ut_spdk_blob_opts_init(&opts);
8681 	blob = ut_blob_create_and_open(bs, &opts);
8682 
8683 	/* First just test a single freeze/unfreeze. */
8684 	freeze_cnt = 0;
8685 	unfreeze_cnt = 0;
8686 	CU_ASSERT(blob->frozen_refcnt == 0);
8687 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8688 	CU_ASSERT(blob->frozen_refcnt == 1);
8689 	CU_ASSERT(freeze_cnt == 0);
8690 	poll_threads();
8691 	CU_ASSERT(freeze_cnt == 1);
8692 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8693 	CU_ASSERT(blob->frozen_refcnt == 0);
8694 	CU_ASSERT(unfreeze_cnt == 0);
8695 	poll_threads();
8696 	CU_ASSERT(unfreeze_cnt == 1);
8697 
8698 	/* Now nest multiple freeze/unfreeze operations.  We should
8699 	 * expect a callback for each operation, but only after
8700 	 * the threads have been polled to ensure a for_each_channel()
8701 	 * was executed.
8702 	 */
8703 	freeze_cnt = 0;
8704 	unfreeze_cnt = 0;
8705 	CU_ASSERT(blob->frozen_refcnt == 0);
8706 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8707 	CU_ASSERT(blob->frozen_refcnt == 1);
8708 	CU_ASSERT(freeze_cnt == 0);
8709 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8710 	CU_ASSERT(blob->frozen_refcnt == 2);
8711 	CU_ASSERT(freeze_cnt == 0);
8712 	poll_threads();
8713 	CU_ASSERT(freeze_cnt == 2);
8714 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8715 	CU_ASSERT(blob->frozen_refcnt == 1);
8716 	CU_ASSERT(unfreeze_cnt == 0);
8717 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8718 	CU_ASSERT(blob->frozen_refcnt == 0);
8719 	CU_ASSERT(unfreeze_cnt == 0);
8720 	poll_threads();
8721 	CU_ASSERT(unfreeze_cnt == 2);
8722 
8723 	for (i = 0; i < 2; i++) {
8724 		set_thread(i);
8725 		spdk_bs_free_io_channel(channel[i]);
8726 	}
8727 	set_thread(0);
8728 	ut_blob_close_and_delete(bs, blob);
8729 
8730 	poll_threads();
8731 	g_blob = NULL;
8732 	g_blobid = 0;
8733 }
8734 
8735 static void
8736 blob_ext_md_pages(void)
8737 {
8738 	struct spdk_blob_store *bs;
8739 	struct spdk_bs_dev *dev;
8740 	struct spdk_blob *blob;
8741 	struct spdk_blob_opts opts;
8742 	struct spdk_bs_opts bs_opts;
8743 	uint64_t free_clusters;
8744 
8745 	dev = init_dev();
8746 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8747 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
8748 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
8749 	 * It requires num_md_pages that is much smaller than the number of clusters.
8750 	 * Make sure we can create a blob that uses all of the free clusters.
8751 	 */
8752 	bs_opts.cluster_sz = 65536;
8753 	bs_opts.num_md_pages = 16;
8754 
8755 	/* Initialize a new blob store */
8756 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8757 	poll_threads();
8758 	CU_ASSERT(g_bserrno == 0);
8759 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8760 	bs = g_bs;
8761 
8762 	free_clusters = spdk_bs_free_cluster_count(bs);
8763 
8764 	ut_spdk_blob_opts_init(&opts);
8765 	opts.num_clusters = free_clusters;
8766 
8767 	blob = ut_blob_create_and_open(bs, &opts);
8768 	spdk_blob_close(blob, blob_op_complete, NULL);
8769 	CU_ASSERT(g_bserrno == 0);
8770 
8771 	spdk_bs_unload(bs, bs_op_complete, NULL);
8772 	poll_threads();
8773 	CU_ASSERT(g_bserrno == 0);
8774 	g_bs = NULL;
8775 }
8776 
8777 static void
8778 blob_esnap_clone_snapshot(void)
8779 {
8780 	/*
8781 	 * When a snapshot is created, the blob that is being snapped becomes
8782 	 * the leaf node (a clone of the snapshot) and the newly created
8783 	 * snapshot sits between the snapped blob and the external snapshot.
8784 	 *
8785 	 * Before creating snap1
8786 	 *
8787 	 *   ,--------.     ,----------.
8788 	 *   |  blob  |     |  vbdev   |
8789 	 *   | blob1  |<----| nvme1n42 |
8790 	 *   |  (rw)  |     |   (ro)   |
8791 	 *   `--------'     `----------'
8792 	 *       Figure 1
8793 	 *
8794 	 * After creating snap1
8795 	 *
8796 	 *   ,--------.     ,--------.     ,----------.
8797 	 *   |  blob  |     |  blob  |     |  vbdev   |
8798 	 *   | blob1  |<----| snap1  |<----| nvme1n42 |
8799 	 *   |  (rw)  |     |  (ro)  |     |   (ro)   |
8800 	 *   `--------'     `--------'     `----------'
8801 	 *       Figure 2
8802 	 *
8803 	 * Starting from Figure 2, if snap1 is removed, the chain reverts to
8804 	 * what it looks like in Figure 1.
8805 	 *
8806 	 * Starting from Figure 2, if blob1 is removed, the chain becomes:
8807 	 *
8808 	 *   ,--------.     ,----------.
8809 	 *   |  blob  |     |  vbdev   |
8810 	 *   | snap1  |<----| nvme1n42 |
8811 	 *   |  (ro)  |     |   (ro)   |
8812 	 *   `--------'     `----------'
8813 	 *       Figure 3
8814 	 *
8815 	 * In each case, the blob pointed to by the nvme vbdev is considered
8816 	 * the "esnap clone".  The esnap clone must have:
8817 	 *
8818 	 *   - XATTR_INTERNAL for BLOB_EXTERNAL_SNAPSHOT_ID (e.g. name or UUID)
8819 	 *   - blob->invalid_flags must contain SPDK_BLOB_EXTERNAL_SNAPSHOT
8820 	 *   - blob->parent_id must be SPDK_BLOBID_EXTERNAL_SNAPSHOT.
8821 	 *
8822 	 * No other blob that descends from the esnap clone may have any of
8823 	 * those set.
8824 	 */
8825 	struct spdk_blob_store	*bs = g_bs;
8826 	const uint32_t		blocklen = bs->io_unit_size;
8827 	struct spdk_blob_opts	opts;
8828 	struct ut_esnap_opts	esnap_opts;
8829 	struct spdk_blob	*blob, *snap_blob;
8830 	spdk_blob_id		blobid, snap_blobid;
8831 	bool			destroyed = false;
8832 
8833 	/* Create the esnap clone */
8834 	ut_esnap_opts_init(blocklen, 2048, __func__, &destroyed, &esnap_opts);
8835 	ut_spdk_blob_opts_init(&opts);
8836 	opts.esnap_id = &esnap_opts;
8837 	opts.esnap_id_len = sizeof(esnap_opts);
8838 	opts.num_clusters = 10;
8839 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8840 	poll_threads();
8841 	CU_ASSERT(g_bserrno == 0);
8842 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8843 	blobid = g_blobid;
8844 
8845 	/* Open the blob. */
8846 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8847 	poll_threads();
8848 	CU_ASSERT(g_bserrno == 0);
8849 	CU_ASSERT(g_blob != NULL);
8850 	blob = g_blob;
8851 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8852 
8853 	/*
8854 	 * Create a snapshot of the blob. The snapshot becomes the esnap clone.
8855 	 */
8856 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8857 	poll_threads();
8858 	CU_ASSERT(g_bserrno == 0);
8859 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8860 	snap_blobid = g_blobid;
8861 
8862 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8863 	poll_threads();
8864 	CU_ASSERT(g_bserrno == 0);
8865 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8866 	snap_blob = g_blob;
8867 
8868 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8869 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8870 
8871 	/*
8872 	 * Delete the snapshot.  The original blob becomes the esnap clone.
8873 	 */
8874 	ut_blob_close_and_delete(bs, snap_blob);
8875 	snap_blob = NULL;
8876 	snap_blobid = SPDK_BLOBID_INVALID;
8877 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8878 
8879 	/*
8880 	 * Create the snapshot again, then delete the original blob.  The
8881 	 * snapshot should survive as the esnap clone.
8882 	 */
8883 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8884 	poll_threads();
8885 	CU_ASSERT(g_bserrno == 0);
8886 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8887 	snap_blobid = g_blobid;
8888 
8889 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8890 	poll_threads();
8891 	CU_ASSERT(g_bserrno == 0);
8892 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8893 	snap_blob = g_blob;
8894 
8895 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8896 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8897 
8898 	ut_blob_close_and_delete(bs, blob);
8899 	blob = NULL;
8900 	blobid = SPDK_BLOBID_INVALID;
8901 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8902 
8903 	/*
8904 	 * Clone the snapshot.  The snapshot continues to be the esnap clone.
8905 	 */
8906 	spdk_bs_create_clone(bs, snap_blobid, NULL, blob_op_with_id_complete, NULL);
8907 	poll_threads();
8908 	CU_ASSERT(g_bserrno == 0);
8909 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8910 	blobid = g_blobid;
8911 
8912 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8913 	poll_threads();
8914 	CU_ASSERT(g_bserrno == 0);
8915 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8916 	blob = g_blob;
8917 
8918 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8919 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8920 
8921 	/*
8922 	 * Delete the snapshot. The clone becomes the esnap clone.
8923 	 */
8924 	ut_blob_close_and_delete(bs, snap_blob);
8925 	snap_blob = NULL;
8926 	snap_blobid = SPDK_BLOBID_INVALID;
8927 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8928 
8929 	/*
8930 	 * Clean up
8931 	 */
8932 	ut_blob_close_and_delete(bs, blob);
8933 }
8934 
8935 static uint64_t
8936 _blob_esnap_clone_hydrate(bool inflate)
8937 {
8938 	struct spdk_blob_store	*bs = g_bs;
8939 	struct spdk_blob_opts	opts;
8940 	struct ut_esnap_opts	esnap_opts;
8941 	struct spdk_blob	*blob;
8942 	spdk_blob_id		blobid;
8943 	struct spdk_io_channel *channel;
8944 	bool			destroyed = false;
8945 	const uint32_t		blocklen = spdk_bs_get_io_unit_size(bs);
8946 	const uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8947 	const uint64_t		esnap_num_clusters = 4;
8948 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8949 	const uint64_t		esnap_num_blocks = esnap_sz / blocklen;
8950 	uint64_t		num_failures = CU_get_number_of_failures();
8951 
8952 	channel = spdk_bs_alloc_io_channel(bs);
8953 	SPDK_CU_ASSERT_FATAL(channel != NULL);
8954 
8955 	/* Create the esnap clone */
8956 	ut_spdk_blob_opts_init(&opts);
8957 	ut_esnap_opts_init(blocklen, esnap_num_blocks, __func__, &destroyed, &esnap_opts);
8958 	opts.esnap_id = &esnap_opts;
8959 	opts.esnap_id_len = sizeof(esnap_opts);
8960 	opts.num_clusters = esnap_num_clusters;
8961 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8962 	poll_threads();
8963 	CU_ASSERT(g_bserrno == 0);
8964 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8965 	blobid = g_blobid;
8966 
8967 	/* Open the esnap clone */
8968 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8969 	poll_threads();
8970 	CU_ASSERT(g_bserrno == 0);
8971 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8972 	blob = g_blob;
8973 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8974 
8975 	/*
8976 	 * Inflate or decouple  the blob then verify that it is no longer an esnap clone and has
8977 	 * right content
8978 	 */
8979 	if (inflate) {
8980 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
8981 	} else {
8982 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
8983 	}
8984 	poll_threads();
8985 	CU_ASSERT(g_bserrno == 0);
8986 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8987 	CU_ASSERT(blob_esnap_verify_contents(blob, channel, 0, esnap_sz, esnap_sz, "read"));
8988 	ut_blob_close_and_delete(bs, blob);
8989 
8990 	/*
8991 	 * Clean up
8992 	 */
8993 	spdk_bs_free_io_channel(channel);
8994 	poll_threads();
8995 
8996 	/* Return number of new failures */
8997 	return CU_get_number_of_failures() - num_failures;
8998 }
8999 
9000 static void
9001 blob_esnap_clone_inflate(void)
9002 {
9003 	_blob_esnap_clone_hydrate(true);
9004 }
9005 
9006 static void
9007 blob_esnap_clone_decouple(void)
9008 {
9009 	_blob_esnap_clone_hydrate(false);
9010 }
9011 
9012 static void
9013 blob_esnap_hotplug(void)
9014 {
9015 	struct spdk_blob_store	*bs = g_bs;
9016 	struct ut_esnap_opts	esnap1_opts, esnap2_opts;
9017 	struct spdk_blob_opts	opts;
9018 	struct spdk_blob	*blob;
9019 	struct spdk_bs_dev	*bs_dev;
9020 	struct ut_esnap_dev	*esnap_dev;
9021 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
9022 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
9023 	const uint32_t		esnap_num_clusters = 4;
9024 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9025 	bool			destroyed1 = false, destroyed2 = false;
9026 	uint64_t		start_thread = g_ut_thread_id;
9027 	struct spdk_io_channel	*ch0, *ch1;
9028 	char			buf[block_sz];
9029 
9030 	/* Create and open an esnap clone blob */
9031 	ut_spdk_blob_opts_init(&opts);
9032 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1", &destroyed1, &esnap1_opts);
9033 	opts.esnap_id = &esnap1_opts;
9034 	opts.esnap_id_len = sizeof(esnap1_opts);
9035 	opts.num_clusters = esnap_num_clusters;
9036 	blob = ut_blob_create_and_open(bs, &opts);
9037 	CU_ASSERT(blob != NULL);
9038 	CU_ASSERT(spdk_blob_is_esnap_clone(blob));
9039 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9040 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9041 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1") == 0);
9042 
9043 	/* Replace the external snapshot */
9044 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap2", &destroyed2, &esnap2_opts);
9045 	bs_dev = ut_esnap_dev_alloc(&esnap2_opts);
9046 	CU_ASSERT(!destroyed1);
9047 	CU_ASSERT(!destroyed2);
9048 	g_bserrno = 0xbad;
9049 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9050 	poll_threads();
9051 	CU_ASSERT(g_bserrno == 0);
9052 	CU_ASSERT(destroyed1);
9053 	CU_ASSERT(!destroyed2);
9054 	SPDK_CU_ASSERT_FATAL(bs_dev == blob->back_bs_dev);
9055 	SPDK_CU_ASSERT_FATAL(bs_dev == spdk_blob_get_esnap_bs_dev(blob));
9056 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9057 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap2") == 0);
9058 
9059 	/* Create a couple channels */
9060 	set_thread(0);
9061 	ch0 = spdk_bs_alloc_io_channel(bs);
9062 	CU_ASSERT(ch0 != NULL);
9063 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
9064 	set_thread(1);
9065 	ch1 = spdk_bs_alloc_io_channel(bs);
9066 	CU_ASSERT(ch1 != NULL);
9067 	spdk_blob_io_read(blob, ch1, buf, 0, 1, bs_op_complete, NULL);
9068 	set_thread(start_thread);
9069 	poll_threads();
9070 	CU_ASSERT(esnap_dev->num_channels == 2);
9071 
9072 	/* Replace the external snapshot */
9073 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1a", &destroyed1, &esnap1_opts);
9074 	bs_dev = ut_esnap_dev_alloc(&esnap1_opts);
9075 	destroyed1 = destroyed2 = false;
9076 	g_bserrno = 0xbad;
9077 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9078 	poll_threads();
9079 	CU_ASSERT(g_bserrno == 0);
9080 	CU_ASSERT(!destroyed1);
9081 	CU_ASSERT(destroyed2);
9082 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9083 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9084 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1a") == 0);
9085 
9086 	/* Clean up */
9087 	set_thread(0);
9088 	spdk_bs_free_io_channel(ch0);
9089 	set_thread(1);
9090 	spdk_bs_free_io_channel(ch1);
9091 	set_thread(start_thread);
9092 	g_bserrno = 0xbad;
9093 	spdk_blob_close(blob, bs_op_complete, NULL);
9094 	poll_threads();
9095 	CU_ASSERT(g_bserrno == 0);
9096 }
9097 
9098 static bool g_blob_is_degraded;
9099 static int g_blob_is_degraded_called;
9100 
9101 static bool
9102 _blob_is_degraded(struct spdk_bs_dev *dev)
9103 {
9104 	g_blob_is_degraded_called++;
9105 	return g_blob_is_degraded;
9106 }
9107 
9108 static void
9109 blob_is_degraded(void)
9110 {
9111 	struct spdk_bs_dev bs_is_degraded_null = { 0 };
9112 	struct spdk_bs_dev bs_is_degraded = { .is_degraded = _blob_is_degraded };
9113 
9114 	/* No back_bs_dev, no bs->dev->is_degraded */
9115 	g_blob_is_degraded_called = 0;
9116 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9117 	CU_ASSERT(g_blob_is_degraded_called == 0);
9118 
9119 	/* No back_bs_dev, blobstore device degraded */
9120 	g_bs->dev->is_degraded = _blob_is_degraded;
9121 	g_blob_is_degraded_called = 0;
9122 	g_blob_is_degraded = true;
9123 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9124 	CU_ASSERT(g_blob_is_degraded_called == 1);
9125 
9126 	/* No back_bs_dev, blobstore device not degraded */
9127 	g_bs->dev->is_degraded = _blob_is_degraded;
9128 	g_blob_is_degraded_called = 0;
9129 	g_blob_is_degraded = false;
9130 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9131 	CU_ASSERT(g_blob_is_degraded_called == 1);
9132 
9133 	/* back_bs_dev does not define is_degraded, no bs->dev->is_degraded */
9134 	g_bs->dev->is_degraded = NULL;
9135 	g_blob->back_bs_dev = &bs_is_degraded_null;
9136 	g_blob_is_degraded_called = 0;
9137 	g_blob_is_degraded = false;
9138 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9139 	CU_ASSERT(g_blob_is_degraded_called == 0);
9140 
9141 	/* back_bs_dev is not degraded, no bs->dev->is_degraded */
9142 	g_bs->dev->is_degraded = NULL;
9143 	g_blob->back_bs_dev = &bs_is_degraded;
9144 	g_blob_is_degraded_called = 0;
9145 	g_blob_is_degraded = false;
9146 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9147 	CU_ASSERT(g_blob_is_degraded_called == 1);
9148 
9149 	/* back_bs_dev is degraded, no bs->dev->is_degraded */
9150 	g_bs->dev->is_degraded = NULL;
9151 	g_blob->back_bs_dev = &bs_is_degraded;
9152 	g_blob_is_degraded_called = 0;
9153 	g_blob_is_degraded = true;
9154 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9155 	CU_ASSERT(g_blob_is_degraded_called == 1);
9156 
9157 	/* back_bs_dev is not degraded, blobstore device is not degraded */
9158 	g_bs->dev->is_degraded = _blob_is_degraded;
9159 	g_blob->back_bs_dev = &bs_is_degraded;
9160 	g_blob_is_degraded_called = 0;
9161 	g_blob_is_degraded = false;
9162 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9163 	CU_ASSERT(g_blob_is_degraded_called == 2);
9164 
9165 	g_blob->back_bs_dev = NULL;
9166 }
9167 
9168 /* Resize a blob which is a clone created from snapshot. Verify read/writes to
9169  * expanded clone blob. Then inflate the clone blob. */
9170 static void
9171 blob_clone_resize(void)
9172 {
9173 	struct spdk_blob_store *bs = g_bs;
9174 	struct spdk_blob_opts opts;
9175 	struct spdk_blob *blob, *clone, *snap_blob, *snap_blob_rsz;
9176 	spdk_blob_id blobid, cloneid, snapid1, snapid2;
9177 	uint64_t pages_per_cluster;
9178 	uint8_t payload_read[bs->dev->blocklen];
9179 	uint8_t payload_write[bs->dev->blocklen];
9180 	struct spdk_io_channel *channel;
9181 	uint64_t free_clusters;
9182 
9183 	channel = spdk_bs_alloc_io_channel(bs);
9184 	SPDK_CU_ASSERT_FATAL(channel != NULL);
9185 
9186 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
9187 
9188 	/* Create blob with 10 clusters */
9189 	ut_spdk_blob_opts_init(&opts);
9190 	opts.num_clusters = 10;
9191 
9192 	blob = ut_blob_create_and_open(bs, &opts);
9193 	blobid = spdk_blob_get_id(blob);
9194 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
9195 
9196 	/* Create snapshot */
9197 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9198 	poll_threads();
9199 	CU_ASSERT(g_bserrno == 0);
9200 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9201 	snapid1 = g_blobid;
9202 
9203 	spdk_bs_create_clone(bs, snapid1, NULL, blob_op_with_id_complete, NULL);
9204 	poll_threads();
9205 	CU_ASSERT(g_bserrno == 0);
9206 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9207 	cloneid = g_blobid;
9208 
9209 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
9210 	poll_threads();
9211 	CU_ASSERT(g_bserrno == 0);
9212 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9213 	clone = g_blob;
9214 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
9215 
9216 	g_bserrno = -1;
9217 	spdk_blob_resize(clone, 20, blob_op_complete, NULL);
9218 	poll_threads();
9219 	CU_ASSERT(g_bserrno == 0);
9220 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 20);
9221 
9222 	/* Create another snapshot after resizing the clone */
9223 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
9224 	poll_threads();
9225 	CU_ASSERT(g_bserrno == 0);
9226 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9227 	snapid2 = g_blobid;
9228 
9229 	/* Open the snapshot blobs */
9230 	spdk_bs_open_blob(bs, snapid1, blob_op_with_handle_complete, NULL);
9231 	CU_ASSERT(g_bserrno == 0);
9232 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9233 	snap_blob = g_blob;
9234 	CU_ASSERT(snap_blob->data_ro == true);
9235 	CU_ASSERT(snap_blob->md_ro == true);
9236 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob) == 10);
9237 
9238 	spdk_bs_open_blob(bs, snapid2, blob_op_with_handle_complete, NULL);
9239 	CU_ASSERT(g_bserrno == 0);
9240 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9241 	snap_blob_rsz = g_blob;
9242 	CU_ASSERT(snap_blob_rsz->data_ro == true);
9243 	CU_ASSERT(snap_blob_rsz->md_ro == true);
9244 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob_rsz) == 20);
9245 
9246 	/* Confirm that clone is backed by snap_blob_rsz, and snap_blob_rsz is backed by snap_blob */
9247 	SPDK_CU_ASSERT_FATAL(snap_blob->back_bs_dev == NULL);
9248 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9249 	SPDK_CU_ASSERT_FATAL(snap_blob_rsz->back_bs_dev != NULL);
9250 
9251 	/* Write and read from pre-resize ranges */
9252 	g_bserrno = -1;
9253 	memset(payload_write, 0xE5, sizeof(payload_write));
9254 	spdk_blob_io_write(clone, channel, payload_write, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9255 	poll_threads();
9256 	CU_ASSERT(g_bserrno == 0);
9257 
9258 	g_bserrno = -1;
9259 	memset(payload_read, 0x00, sizeof(payload_read));
9260 	spdk_blob_io_read(clone, channel, payload_read, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9261 	poll_threads();
9262 	CU_ASSERT(g_bserrno == 0);
9263 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
9264 
9265 	/* Write and read from post-resize ranges */
9266 	g_bserrno = -1;
9267 	memset(payload_write, 0xE5, sizeof(payload_write));
9268 	spdk_blob_io_write(clone, channel, payload_write, 15 * pages_per_cluster, 1, blob_op_complete,
9269 			   NULL);
9270 	poll_threads();
9271 	CU_ASSERT(g_bserrno == 0);
9272 
9273 	g_bserrno = -1;
9274 	memset(payload_read, 0x00, sizeof(payload_read));
9275 	spdk_blob_io_read(clone, channel, payload_read, 15 * pages_per_cluster, 1, blob_op_complete, NULL);
9276 	poll_threads();
9277 	CU_ASSERT(g_bserrno == 0);
9278 	CU_ASSERT(memcmp(payload_write, payload_read, bs->dev->blocklen) == 0);
9279 
9280 	/* Now do full blob inflation of the resized blob/clone. */
9281 	free_clusters = spdk_bs_free_cluster_count(bs);
9282 	spdk_bs_inflate_blob(bs, channel, cloneid, blob_op_complete, NULL);
9283 	poll_threads();
9284 	CU_ASSERT(g_bserrno == 0);
9285 	/* We wrote to 2 clusters earlier, all remaining 18 clusters in
9286 	 * blob should get allocated after inflation */
9287 	CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 18);
9288 
9289 	spdk_blob_close(clone, blob_op_complete, NULL);
9290 	poll_threads();
9291 	CU_ASSERT(g_bserrno == 0);
9292 
9293 	spdk_blob_close(snap_blob, blob_op_complete, NULL);
9294 	poll_threads();
9295 	CU_ASSERT(g_bserrno == 0);
9296 
9297 	spdk_blob_close(snap_blob_rsz, blob_op_complete, NULL);
9298 	poll_threads();
9299 	CU_ASSERT(g_bserrno == 0);
9300 
9301 	ut_blob_close_and_delete(bs, blob);
9302 
9303 	spdk_bs_free_io_channel(channel);
9304 }
9305 
9306 
9307 static void
9308 blob_esnap_clone_resize(void)
9309 {
9310 	struct spdk_bs_dev *dev;
9311 	struct spdk_blob_store *bs;
9312 	struct spdk_bs_opts bsopts;
9313 	struct spdk_blob_opts opts;
9314 	struct ut_esnap_opts esnap_opts;
9315 	struct spdk_blob *blob;
9316 	uint32_t block, esnap_blksz = 512, bs_blksz = 512;
9317 	const uint32_t cluster_sz = 16 * 1024;
9318 	const uint64_t esnap_num_clusters = 4;
9319 	const uint32_t esnap_sz = cluster_sz * esnap_num_clusters;
9320 	const uint64_t esnap_num_blocks = esnap_sz / esnap_blksz;
9321 	uint64_t blob_num_blocks = esnap_sz / bs_blksz;
9322 	struct spdk_io_channel *bs_ch;
9323 
9324 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
9325 	bsopts.cluster_sz = cluster_sz;
9326 	bsopts.esnap_bs_dev_create = ut_esnap_create;
9327 	/* Create device with desired block size */
9328 	dev = init_dev();
9329 	dev->blocklen = bs_blksz;
9330 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
9331 	/* Initialize a new blob store */
9332 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
9333 	poll_threads();
9334 	CU_ASSERT(g_bserrno == 0);
9335 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9336 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
9337 	bs = g_bs;
9338 
9339 	bs_ch = spdk_bs_alloc_io_channel(bs);
9340 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
9341 
9342 	/* Create and open the esnap clone  */
9343 	ut_spdk_blob_opts_init(&opts);
9344 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9345 	opts.esnap_id = &esnap_opts;
9346 	opts.esnap_id_len = sizeof(esnap_opts);
9347 	opts.num_clusters = esnap_num_clusters;
9348 	blob = ut_blob_create_and_open(bs, &opts);
9349 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9350 
9351 	g_bserrno = -1;
9352 	spdk_blob_resize(blob, esnap_num_clusters * 2, blob_op_complete, NULL);
9353 	poll_threads();
9354 	CU_ASSERT(g_bserrno == 0);
9355 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == esnap_num_clusters * 2);
9356 
9357 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
9358 	blob_num_blocks = (spdk_blob_get_num_clusters(blob) * cluster_sz) / bs_blksz;
9359 	for (block = 0; block < blob_num_blocks; block++) {
9360 		char buf[bs_blksz];
9361 		union ut_word word;
9362 		word.f.blob_id = 0xfedcba90;
9363 		word.f.lba = block;
9364 		ut_memset8(buf, word.num, bs_blksz);
9365 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9366 		poll_threads();
9367 		CU_ASSERT(g_bserrno == 0);
9368 		if (g_bserrno != 0) {
9369 			break;
9370 		}
9371 		/* Read and verify the block before the current block */
9372 		if (block != 0) {
9373 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
9374 			poll_threads();
9375 			CU_ASSERT(g_bserrno == 0);
9376 			if (g_bserrno != 0) {
9377 				break;
9378 			}
9379 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9380 							      (block - 1) * bs_blksz, bs_blksz));
9381 		}
9382 		/* Read and verify the current block */
9383 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9384 		poll_threads();
9385 		CU_ASSERT(g_bserrno == 0);
9386 		if (g_bserrno != 0) {
9387 			break;
9388 		}
9389 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9390 						      block * bs_blksz, bs_blksz));
9391 		/* Check the block that follows */
9392 		if (block + 1 < blob_num_blocks) {
9393 			g_bserrno = 0xbad;
9394 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
9395 			poll_threads();
9396 			CU_ASSERT(g_bserrno == 0);
9397 			if (g_bserrno != 0) {
9398 				break;
9399 			}
9400 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
9401 							      (block + 1) * bs_blksz,
9402 							      esnap_blksz));
9403 		}
9404 	}
9405 	/* Clean up */
9406 	spdk_bs_free_io_channel(bs_ch);
9407 	g_bserrno = 0xbad;
9408 	spdk_blob_close(blob, blob_op_complete, NULL);
9409 	poll_threads();
9410 	CU_ASSERT(g_bserrno == 0);
9411 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
9412 	poll_threads();
9413 	CU_ASSERT(g_bserrno == 0);
9414 	g_bs = NULL;
9415 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9416 }
9417 
9418 static void
9419 bs_dev_io_complete_cb(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
9420 {
9421 	g_bserrno = bserrno;
9422 }
9423 
9424 static void
9425 blob_shallow_copy(void)
9426 {
9427 	struct spdk_blob_store *bs = g_bs;
9428 	struct spdk_blob_opts blob_opts;
9429 	struct spdk_blob *blob;
9430 	spdk_blob_id blobid;
9431 	uint64_t num_clusters = 4;
9432 	struct spdk_bs_dev *ext_dev;
9433 	struct spdk_bs_dev_cb_args ext_args;
9434 	struct spdk_io_channel *bdev_ch, *blob_ch;
9435 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
9436 	uint8_t buf2[DEV_BUFFER_BLOCKLEN];
9437 	uint64_t io_units_per_cluster;
9438 	uint64_t offset;
9439 	int rc;
9440 
9441 	blob_ch = spdk_bs_alloc_io_channel(bs);
9442 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
9443 
9444 	/* Set blob dimension and as thin provisioned */
9445 	ut_spdk_blob_opts_init(&blob_opts);
9446 	blob_opts.thin_provision = true;
9447 	blob_opts.num_clusters = num_clusters;
9448 
9449 	/* Create a blob */
9450 	blob = ut_blob_create_and_open(bs, &blob_opts);
9451 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9452 	blobid = spdk_blob_get_id(blob);
9453 	io_units_per_cluster = bs_io_units_per_cluster(blob);
9454 
9455 	/* Write on cluster 2 and 4 of blob */
9456 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9457 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9458 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9459 		poll_threads();
9460 		CU_ASSERT(g_bserrno == 0);
9461 	}
9462 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9463 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9464 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9465 		poll_threads();
9466 		CU_ASSERT(g_bserrno == 0);
9467 	}
9468 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9469 
9470 	/* Make a snapshot over blob */
9471 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9472 	poll_threads();
9473 	CU_ASSERT(g_bserrno == 0);
9474 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
9475 
9476 	/* Write on cluster 1 and 3 of blob */
9477 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9478 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9479 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9480 		poll_threads();
9481 		CU_ASSERT(g_bserrno == 0);
9482 	}
9483 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9484 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9485 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9486 		poll_threads();
9487 		CU_ASSERT(g_bserrno == 0);
9488 	}
9489 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9490 
9491 	/* Shallow copy with a not read only blob */
9492 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9493 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9494 				       blob_shallow_copy_status_cb, NULL,
9495 				       blob_op_complete, NULL);
9496 	CU_ASSERT(rc == 0);
9497 	poll_threads();
9498 	CU_ASSERT(g_bserrno == -EPERM);
9499 	ext_dev->destroy(ext_dev);
9500 
9501 	/* Set blob read only */
9502 	spdk_blob_set_read_only(blob);
9503 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
9504 	poll_threads();
9505 	CU_ASSERT(g_bserrno == 0);
9506 
9507 	/* Shallow copy over a spdk_bs_dev with incorrect size */
9508 	ext_dev = init_ext_dev(1, DEV_BUFFER_BLOCKLEN);
9509 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9510 				       blob_shallow_copy_status_cb, NULL,
9511 				       blob_op_complete, NULL);
9512 	CU_ASSERT(rc == 0);
9513 	poll_threads();
9514 	CU_ASSERT(g_bserrno == -EINVAL);
9515 	ext_dev->destroy(ext_dev);
9516 
9517 	/* Shallow copy over a spdk_bs_dev with incorrect block len */
9518 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN * 2);
9519 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9520 				       blob_shallow_copy_status_cb, NULL,
9521 				       blob_op_complete, NULL);
9522 	CU_ASSERT(rc == 0);
9523 	poll_threads();
9524 	CU_ASSERT(g_bserrno == -EINVAL);
9525 	ext_dev->destroy(ext_dev);
9526 
9527 	/* Initialize ext_dev for the successuful shallow copy */
9528 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9529 	bdev_ch = ext_dev->create_channel(ext_dev);
9530 	SPDK_CU_ASSERT_FATAL(bdev_ch != NULL);
9531 	ext_args.cb_fn = bs_dev_io_complete_cb;
9532 	for (offset = 0; offset < 4 * io_units_per_cluster; offset++) {
9533 		memset(buf2, 0xff, DEV_BUFFER_BLOCKLEN);
9534 		ext_dev->write(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9535 		poll_threads();
9536 		CU_ASSERT(g_bserrno == 0);
9537 	}
9538 
9539 	/* Correct shallow copy of blob over bdev */
9540 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9541 				       blob_shallow_copy_status_cb, NULL,
9542 				       blob_op_complete, NULL);
9543 	CU_ASSERT(rc == 0);
9544 	poll_thread_times(0, 1);
9545 	CU_ASSERT(g_copied_clusters_count == 1);
9546 	poll_thread_times(0, 2);
9547 	CU_ASSERT(g_bserrno == 0);
9548 	CU_ASSERT(g_copied_clusters_count == 2);
9549 
9550 	/* Read from bdev */
9551 	/* Only cluster 1 and 3 must be filled */
9552 	/* Clusters 2 and 4 should not have been touched */
9553 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9554 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9555 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9556 		poll_threads();
9557 		CU_ASSERT(g_bserrno == 0);
9558 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9559 	}
9560 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9561 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9562 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9563 		poll_threads();
9564 		CU_ASSERT(g_bserrno == 0);
9565 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9566 	}
9567 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9568 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9569 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9570 		poll_threads();
9571 		CU_ASSERT(g_bserrno == 0);
9572 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9573 	}
9574 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9575 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9576 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9577 		poll_threads();
9578 		CU_ASSERT(g_bserrno == 0);
9579 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9580 	}
9581 
9582 	/* Clean up */
9583 	ext_dev->destroy_channel(ext_dev, bdev_ch);
9584 	ext_dev->destroy(ext_dev);
9585 	spdk_bs_free_io_channel(blob_ch);
9586 	ut_blob_close_and_delete(bs, blob);
9587 	poll_threads();
9588 }
9589 
9590 static void
9591 blob_set_parent(void)
9592 {
9593 	struct spdk_blob_store *bs = g_bs;
9594 	struct spdk_blob_opts opts;
9595 	struct ut_esnap_opts esnap_opts;
9596 	struct spdk_blob *blob1, *blob2, *blob3, *blob4, *blob5;
9597 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, blobid5,
9598 		     snapshotid1, snapshotid2, snapshotid3;
9599 	uint32_t cluster_sz, block_sz;
9600 	const uint32_t esnap_num_clusters = 4;
9601 	uint64_t esnap_num_blocks;
9602 	spdk_blob_id ids[2];
9603 	size_t clone_count = 2;
9604 
9605 	cluster_sz = spdk_bs_get_cluster_size(bs);
9606 	block_sz = spdk_bs_get_io_unit_size(bs);
9607 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9608 
9609 	/* Create a normal blob and make a couple of snapshots */
9610 	ut_spdk_blob_opts_init(&opts);
9611 	blob1 = ut_blob_create_and_open(bs, &opts);
9612 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9613 	blobid1 = spdk_blob_get_id(blob1);
9614 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9615 	poll_threads();
9616 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9617 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9618 	snapshotid1 = g_blobid;
9619 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9620 	poll_threads();
9621 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9622 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9623 	snapshotid2 = g_blobid;
9624 
9625 	/* Call set_parent with an invalid snapshotid */
9626 	spdk_bs_blob_set_parent(bs, blobid1, SPDK_BLOBID_INVALID, blob_op_complete, NULL);
9627 	poll_threads();
9628 	CU_ASSERT(g_bserrno == -EINVAL);
9629 
9630 	/* Call set_parent with blobid and snapshotid the same */
9631 	spdk_bs_blob_set_parent(bs, blobid1, blobid1, blob_op_complete, NULL);
9632 	poll_threads();
9633 	CU_ASSERT(g_bserrno == -EINVAL);
9634 
9635 	/* Call set_parent with a blob and its parent snapshot */
9636 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid2, blob_op_complete, NULL);
9637 	poll_threads();
9638 	CU_ASSERT(g_bserrno == -EEXIST);
9639 
9640 	/* Create an esnap clone blob */
9641 	ut_spdk_blob_opts_init(&opts);
9642 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9643 	opts.esnap_id = &esnap_opts;
9644 	opts.esnap_id_len = sizeof(esnap_opts);
9645 	opts.num_clusters = esnap_num_clusters;
9646 	blob2 = ut_blob_create_and_open(bs, &opts);
9647 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9648 	blobid2 = spdk_blob_get_id(blob2);
9649 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9650 
9651 	/* Call set_parent with a non snapshot parent */
9652 	spdk_bs_blob_set_parent(bs, blobid2, blobid1, blob_op_complete, NULL);
9653 	poll_threads();
9654 	CU_ASSERT(g_bserrno == -EINVAL);
9655 
9656 	/* Call set_parent with blob and snapshot of different size */
9657 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid1, blob_op_complete, NULL);
9658 	poll_threads();
9659 	CU_ASSERT(g_bserrno == -EINVAL);
9660 
9661 	/* Call set_parent correctly with a snapshot's clone blob */
9662 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid1, blob_op_complete, NULL);
9663 	poll_threads();
9664 	CU_ASSERT(g_bserrno == 0);
9665 
9666 	/* Check relations */
9667 	CU_ASSERT(spdk_blob_is_clone(blob1));
9668 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid1) == snapshotid1);
9669 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid1, ids, &clone_count) == 0);
9670 	CU_ASSERT(clone_count == 2);
9671 	CU_ASSERT(ids[1] == blobid1);
9672 
9673 	/* Create another normal blob with size equal to esnap size and make a snapshot */
9674 	ut_spdk_blob_opts_init(&opts);
9675 	opts.num_clusters = esnap_num_clusters;
9676 	opts.thin_provision = true;
9677 	blob3 = ut_blob_create_and_open(bs, &opts);
9678 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9679 	blobid3 = spdk_blob_get_id(blob3);
9680 	spdk_bs_create_snapshot(bs, blobid3, NULL, blob_op_with_id_complete, NULL);
9681 	poll_threads();
9682 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9683 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9684 	snapshotid3 = g_blobid;
9685 
9686 	/* Call set_parent correctly with an esnap's clone blob */
9687 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid3, blob_op_complete, NULL);
9688 	poll_threads();
9689 	CU_ASSERT(g_bserrno == 0);
9690 
9691 	/* Check relations */
9692 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob2));
9693 	CU_ASSERT(spdk_blob_is_clone(blob2));
9694 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid2) == snapshotid3);
9695 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid3, ids, &clone_count) == 0);
9696 	CU_ASSERT(clone_count == 2);
9697 	CU_ASSERT(ids[1] == blobid2);
9698 
9699 	/* Create a not thin-provisioned blob that is not a clone */
9700 	ut_spdk_blob_opts_init(&opts);
9701 	opts.thin_provision = false;
9702 	blob4 = ut_blob_create_and_open(bs, &opts);
9703 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9704 	blobid4 = spdk_blob_get_id(blob4);
9705 
9706 	/* Call set_parent with a blob that isn't a clone and that isn't thin-provisioned */
9707 	spdk_bs_blob_set_parent(bs, blobid4, snapshotid2, blob_op_complete, NULL);
9708 	poll_threads();
9709 	CU_ASSERT(g_bserrno == -EINVAL);
9710 
9711 	/* Create a thin-provisioned blob that is not a clone */
9712 	ut_spdk_blob_opts_init(&opts);
9713 	opts.thin_provision = true;
9714 	blob5 = ut_blob_create_and_open(bs, &opts);
9715 	SPDK_CU_ASSERT_FATAL(blob5 != NULL);
9716 	blobid5 = spdk_blob_get_id(blob5);
9717 
9718 	/* Call set_parent correctly with a blob that isn't a clone */
9719 	spdk_bs_blob_set_parent(bs, blobid5, snapshotid2, blob_op_complete, NULL);
9720 	poll_threads();
9721 	CU_ASSERT(g_bserrno == 0);
9722 
9723 	/* Check relations */
9724 	CU_ASSERT(spdk_blob_is_clone(blob5));
9725 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid5) == snapshotid2);
9726 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &clone_count) == 0);
9727 	CU_ASSERT(clone_count == 1);
9728 	CU_ASSERT(ids[0] == blobid5);
9729 
9730 	/* Clean up */
9731 	ut_blob_close_and_delete(bs, blob5);
9732 	ut_blob_close_and_delete(bs, blob4);
9733 	ut_blob_close_and_delete(bs, blob3);
9734 	ut_blob_close_and_delete(bs, blob2);
9735 	ut_blob_close_and_delete(bs, blob1);
9736 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
9737 	poll_threads();
9738 	CU_ASSERT(g_bserrno == 0);
9739 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
9740 	poll_threads();
9741 	CU_ASSERT(g_bserrno == 0);
9742 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
9743 	poll_threads();
9744 	CU_ASSERT(g_bserrno == 0);
9745 }
9746 
9747 static void
9748 blob_set_external_parent(void)
9749 {
9750 	struct spdk_blob_store *bs = g_bs;
9751 	struct spdk_blob_opts opts;
9752 	struct ut_esnap_opts esnap_opts, esnap_opts2;
9753 	struct spdk_blob *blob1, *blob2, *blob3, *blob4;
9754 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, snapshotid;
9755 	uint32_t cluster_sz, block_sz;
9756 	const uint32_t esnap_num_clusters = 4;
9757 	uint64_t esnap_num_blocks;
9758 	struct spdk_bs_dev *esnap_dev1, *esnap_dev2, *esnap_dev3;
9759 	const void *esnap_id;
9760 	size_t esnap_id_len;
9761 	int rc;
9762 
9763 	cluster_sz = spdk_bs_get_cluster_size(bs);
9764 	block_sz = spdk_bs_get_io_unit_size(bs);
9765 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9766 	esnap_dev1 = init_dev();
9767 	esnap_dev2 = init_dev();
9768 	esnap_dev3 = init_dev();
9769 
9770 	/* Create an esnap clone blob */
9771 	ut_spdk_blob_opts_init(&opts);
9772 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9773 	opts.esnap_id = &esnap_opts;
9774 	opts.esnap_id_len = sizeof(esnap_opts);
9775 	opts.num_clusters = esnap_num_clusters;
9776 	blob1 = ut_blob_create_and_open(bs, &opts);
9777 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9778 	blobid1 = spdk_blob_get_id(blob1);
9779 	CU_ASSERT(spdk_blob_is_esnap_clone(blob1));
9780 
9781 	/* Call set_esternal_parent with blobid and esnapid the same */
9782 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, &blobid1, sizeof(blobid1),
9783 					 blob_op_complete, NULL);
9784 	CU_ASSERT(g_bserrno == -EINVAL);
9785 
9786 	/* Call set_external_parent with esnap of incompatible size */
9787 	esnap_dev1->blockcnt = esnap_num_blocks - 1;
9788 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9789 					 blob_op_complete, NULL);
9790 	CU_ASSERT(g_bserrno == -EINVAL);
9791 
9792 	/* Call set_external_parent with a blob and its parent esnap */
9793 	esnap_dev1->blocklen = block_sz;
9794 	esnap_dev1->blockcnt = esnap_num_blocks;
9795 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9796 					 blob_op_complete, NULL);
9797 	poll_threads();
9798 	CU_ASSERT(g_bserrno == -EEXIST);
9799 
9800 	/* Create a blob that is a clone of a snapshots */
9801 	ut_spdk_blob_opts_init(&opts);
9802 	blob2 = ut_blob_create_and_open(bs, &opts);
9803 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9804 	blobid2 = spdk_blob_get_id(blob2);
9805 	spdk_bs_create_snapshot(bs, blobid2, NULL, blob_op_with_id_complete, NULL);
9806 	poll_threads();
9807 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9808 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9809 	snapshotid = g_blobid;
9810 
9811 	/* Call set_parent correctly with a snapshot's clone blob */
9812 	esnap_dev2->blocklen = block_sz;
9813 	esnap_dev2->blockcnt = esnap_num_blocks;
9814 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts2);
9815 	spdk_bs_blob_set_external_parent(bs, blobid2, esnap_dev2, &esnap_opts2, sizeof(esnap_opts2),
9816 					 blob_op_complete, NULL);
9817 	poll_threads();
9818 	CU_ASSERT(g_bserrno == 0);
9819 
9820 	/* Check relations */
9821 	rc = spdk_blob_get_esnap_id(blob2, &esnap_id, &esnap_id_len);
9822 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9823 	CU_ASSERT(!spdk_blob_is_clone(blob2));
9824 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts2) &&
9825 		  memcmp(esnap_id, &esnap_opts2, esnap_id_len) == 0);
9826 	CU_ASSERT(blob2->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9827 
9828 	/* Create a not thin-provisioned blob that is not a clone */
9829 	ut_spdk_blob_opts_init(&opts);
9830 	opts.thin_provision = false;
9831 	blob3 = ut_blob_create_and_open(bs, &opts);
9832 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9833 	blobid3 = spdk_blob_get_id(blob3);
9834 
9835 	/* Call set_external_parent with a blob that isn't a clone and that isn't thin-provisioned */
9836 	spdk_bs_blob_set_external_parent(bs, blobid3, esnap_dev1, &esnap_opts, sizeof(esnap_opts),
9837 					 blob_op_complete, NULL);
9838 	poll_threads();
9839 	CU_ASSERT(g_bserrno == -EINVAL);
9840 
9841 	/* Create a thin-provisioned blob that is not a clone */
9842 	ut_spdk_blob_opts_init(&opts);
9843 	opts.thin_provision = true;
9844 	blob4 = ut_blob_create_and_open(bs, &opts);
9845 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9846 	blobid4 = spdk_blob_get_id(blob4);
9847 
9848 	/* Call set_external_parent correctly with a blob that isn't a clone */
9849 	esnap_dev3->blocklen = block_sz;
9850 	esnap_dev3->blockcnt = esnap_num_blocks;
9851 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9852 	spdk_bs_blob_set_external_parent(bs, blobid4, esnap_dev3, &esnap_opts, sizeof(esnap_opts),
9853 					 blob_op_complete, NULL);
9854 	poll_threads();
9855 	CU_ASSERT(g_bserrno == 0);
9856 
9857 	/* Check relations */
9858 	rc = spdk_blob_get_esnap_id(blob4, &esnap_id, &esnap_id_len);
9859 	CU_ASSERT(spdk_blob_is_esnap_clone(blob4));
9860 	CU_ASSERT(!spdk_blob_is_clone(blob4));
9861 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts) &&
9862 		  memcmp(esnap_id, &esnap_opts, esnap_id_len) == 0);
9863 	CU_ASSERT(blob4->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9864 
9865 	ut_blob_close_and_delete(bs, blob4);
9866 	ut_blob_close_and_delete(bs, blob3);
9867 	ut_blob_close_and_delete(bs, blob2);
9868 	ut_blob_close_and_delete(bs, blob1);
9869 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
9870 	dev_destroy(esnap_dev1);
9871 	poll_threads();
9872 	CU_ASSERT(g_bserrno == 0);
9873 }
9874 
9875 static void
9876 suite_bs_setup(void)
9877 {
9878 	struct spdk_bs_dev *dev;
9879 
9880 	dev = init_dev();
9881 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9882 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
9883 	poll_threads();
9884 	CU_ASSERT(g_bserrno == 0);
9885 	CU_ASSERT(g_bs != NULL);
9886 }
9887 
9888 static void
9889 suite_esnap_bs_setup(void)
9890 {
9891 	struct spdk_bs_dev	*dev;
9892 	struct spdk_bs_opts	bs_opts;
9893 
9894 	dev = init_dev();
9895 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9896 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
9897 	bs_opts.cluster_sz = 16 * 1024;
9898 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
9899 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
9900 	poll_threads();
9901 	CU_ASSERT(g_bserrno == 0);
9902 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9903 }
9904 
9905 static void
9906 suite_bs_cleanup(void)
9907 {
9908 	if (g_bs != NULL) {
9909 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
9910 		poll_threads();
9911 		CU_ASSERT(g_bserrno == 0);
9912 		g_bs = NULL;
9913 	}
9914 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9915 }
9916 
9917 static struct spdk_blob *
9918 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
9919 {
9920 	struct spdk_blob *blob;
9921 	struct spdk_blob_opts create_blob_opts;
9922 	spdk_blob_id blobid;
9923 
9924 	if (blob_opts == NULL) {
9925 		ut_spdk_blob_opts_init(&create_blob_opts);
9926 		blob_opts = &create_blob_opts;
9927 	}
9928 
9929 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
9930 	poll_threads();
9931 	CU_ASSERT(g_bserrno == 0);
9932 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9933 	blobid = g_blobid;
9934 	g_blobid = -1;
9935 
9936 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9937 	poll_threads();
9938 	CU_ASSERT(g_bserrno == 0);
9939 	CU_ASSERT(g_blob != NULL);
9940 	blob = g_blob;
9941 
9942 	g_blob = NULL;
9943 	g_bserrno = -1;
9944 
9945 	return blob;
9946 }
9947 
9948 static void
9949 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
9950 {
9951 	spdk_blob_id blobid = spdk_blob_get_id(blob);
9952 
9953 	spdk_blob_close(blob, blob_op_complete, NULL);
9954 	poll_threads();
9955 	CU_ASSERT(g_bserrno == 0);
9956 	g_blob = NULL;
9957 
9958 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
9959 	poll_threads();
9960 	CU_ASSERT(g_bserrno == 0);
9961 	g_bserrno = -1;
9962 }
9963 
9964 static void
9965 suite_blob_setup(void)
9966 {
9967 	suite_bs_setup();
9968 	CU_ASSERT(g_bs != NULL);
9969 
9970 	g_blob = ut_blob_create_and_open(g_bs, NULL);
9971 	CU_ASSERT(g_blob != NULL);
9972 }
9973 
9974 static void
9975 suite_blob_cleanup(void)
9976 {
9977 	ut_blob_close_and_delete(g_bs, g_blob);
9978 	CU_ASSERT(g_blob == NULL);
9979 
9980 	suite_bs_cleanup();
9981 	CU_ASSERT(g_bs == NULL);
9982 }
9983 
9984 static int
9985 ut_setup_config_nocopy_noextent(void)
9986 {
9987 	g_dev_copy_enabled = false;
9988 	g_use_extent_table = false;
9989 
9990 	return 0;
9991 }
9992 
9993 static int
9994 ut_setup_config_nocopy_extent(void)
9995 {
9996 	g_dev_copy_enabled = false;
9997 	g_use_extent_table = true;
9998 
9999 	return 0;
10000 }
10001 
10002 static int
10003 ut_setup_config_copy_noextent(void)
10004 {
10005 	g_dev_copy_enabled = true;
10006 	g_use_extent_table = false;
10007 
10008 	return 0;
10009 }
10010 
10011 static int
10012 ut_setup_config_copy_extent(void)
10013 {
10014 	g_dev_copy_enabled = true;
10015 	g_use_extent_table = true;
10016 
10017 	return 0;
10018 }
10019 
10020 struct ut_config {
10021 	const char *suffix;
10022 	CU_InitializeFunc setup_cb;
10023 };
10024 
10025 int
10026 main(int argc, char **argv)
10027 {
10028 	CU_pSuite		suite, suite_bs, suite_blob, suite_esnap_bs;
10029 	unsigned int		i, num_failures;
10030 	char			suite_name[4096];
10031 	struct ut_config	*config;
10032 	struct ut_config	configs[] = {
10033 		{"nocopy_noextent", ut_setup_config_nocopy_noextent},
10034 		{"nocopy_extent", ut_setup_config_nocopy_extent},
10035 		{"copy_noextent", ut_setup_config_copy_noextent},
10036 		{"copy_extent", ut_setup_config_copy_extent},
10037 	};
10038 
10039 	CU_initialize_registry();
10040 
10041 	for (i = 0; i < SPDK_COUNTOF(configs); ++i) {
10042 		config = &configs[i];
10043 
10044 		snprintf(suite_name, sizeof(suite_name), "blob_%s", config->suffix);
10045 		suite = CU_add_suite(suite_name, config->setup_cb, NULL);
10046 
10047 		snprintf(suite_name, sizeof(suite_name), "blob_bs_%s", config->suffix);
10048 		suite_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10049 				suite_bs_setup, suite_bs_cleanup);
10050 
10051 		snprintf(suite_name, sizeof(suite_name), "blob_blob_%s", config->suffix);
10052 		suite_blob = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10053 				suite_blob_setup, suite_blob_cleanup);
10054 
10055 		snprintf(suite_name, sizeof(suite_name), "blob_esnap_bs_%s", config->suffix);
10056 		suite_esnap_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10057 				 suite_esnap_bs_setup,
10058 				 suite_bs_cleanup);
10059 
10060 		CU_ADD_TEST(suite, blob_init);
10061 		CU_ADD_TEST(suite_bs, blob_open);
10062 		CU_ADD_TEST(suite_bs, blob_create);
10063 		CU_ADD_TEST(suite_bs, blob_create_loop);
10064 		CU_ADD_TEST(suite_bs, blob_create_fail);
10065 		CU_ADD_TEST(suite_bs, blob_create_internal);
10066 		CU_ADD_TEST(suite_bs, blob_create_zero_extent);
10067 		CU_ADD_TEST(suite, blob_thin_provision);
10068 		CU_ADD_TEST(suite_bs, blob_snapshot);
10069 		CU_ADD_TEST(suite_bs, blob_clone);
10070 		CU_ADD_TEST(suite_bs, blob_inflate);
10071 		CU_ADD_TEST(suite_bs, blob_delete);
10072 		CU_ADD_TEST(suite_bs, blob_resize_test);
10073 		CU_ADD_TEST(suite_bs, blob_resize_thin_test);
10074 		CU_ADD_TEST(suite, blob_read_only);
10075 		CU_ADD_TEST(suite_bs, channel_ops);
10076 		CU_ADD_TEST(suite_bs, blob_super);
10077 		CU_ADD_TEST(suite_blob, blob_write);
10078 		CU_ADD_TEST(suite_blob, blob_read);
10079 		CU_ADD_TEST(suite_blob, blob_rw_verify);
10080 		CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
10081 		CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
10082 		CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
10083 		CU_ADD_TEST(suite_bs, blob_unmap);
10084 		CU_ADD_TEST(suite_bs, blob_iter);
10085 		CU_ADD_TEST(suite_blob, blob_xattr);
10086 		CU_ADD_TEST(suite_bs, blob_parse_md);
10087 		CU_ADD_TEST(suite, bs_load);
10088 		CU_ADD_TEST(suite_bs, bs_load_pending_removal);
10089 		CU_ADD_TEST(suite, bs_load_custom_cluster_size);
10090 		CU_ADD_TEST(suite, bs_load_after_failed_grow);
10091 		CU_ADD_TEST(suite_bs, bs_unload);
10092 		CU_ADD_TEST(suite, bs_cluster_sz);
10093 		CU_ADD_TEST(suite_bs, bs_usable_clusters);
10094 		CU_ADD_TEST(suite, bs_resize_md);
10095 		CU_ADD_TEST(suite, bs_destroy);
10096 		CU_ADD_TEST(suite, bs_type);
10097 		CU_ADD_TEST(suite, bs_super_block);
10098 		CU_ADD_TEST(suite, bs_test_recover_cluster_count);
10099 		CU_ADD_TEST(suite, bs_grow_live);
10100 		CU_ADD_TEST(suite, bs_grow_live_no_space);
10101 		CU_ADD_TEST(suite, bs_test_grow);
10102 		CU_ADD_TEST(suite, blob_serialize_test);
10103 		CU_ADD_TEST(suite_bs, blob_crc);
10104 		CU_ADD_TEST(suite, super_block_crc);
10105 		CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
10106 		CU_ADD_TEST(suite_bs, blob_flags);
10107 		CU_ADD_TEST(suite_bs, bs_version);
10108 		CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
10109 		CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
10110 		CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
10111 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
10112 		CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
10113 		CU_ADD_TEST(suite, blob_thin_prov_unmap_cluster);
10114 		CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
10115 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
10116 		CU_ADD_TEST(suite, bs_load_iter_test);
10117 		CU_ADD_TEST(suite_bs, blob_snapshot_rw);
10118 		CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
10119 		CU_ADD_TEST(suite, blob_relations);
10120 		CU_ADD_TEST(suite, blob_relations2);
10121 		CU_ADD_TEST(suite, blob_relations3);
10122 		CU_ADD_TEST(suite, blobstore_clean_power_failure);
10123 		CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
10124 		CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
10125 		CU_ADD_TEST(suite_bs, blob_inflate_rw);
10126 		CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
10127 		CU_ADD_TEST(suite_bs, blob_operation_split_rw);
10128 		CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
10129 		CU_ADD_TEST(suite, blob_io_unit);
10130 		CU_ADD_TEST(suite, blob_io_unit_compatibility);
10131 		CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
10132 		CU_ADD_TEST(suite_bs, blob_persist_test);
10133 		CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
10134 		CU_ADD_TEST(suite_bs, blob_seek_io_unit);
10135 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
10136 		CU_ADD_TEST(suite_bs, blob_nested_freezes);
10137 		CU_ADD_TEST(suite, blob_ext_md_pages);
10138 		CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
10139 		CU_ADD_TEST(suite, blob_esnap_io_512_512);
10140 		CU_ADD_TEST(suite, blob_esnap_io_4096_512);
10141 		CU_ADD_TEST(suite, blob_esnap_io_512_4096);
10142 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
10143 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_snapshot);
10144 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_inflate);
10145 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_decouple);
10146 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_reload);
10147 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_hotplug);
10148 		CU_ADD_TEST(suite_blob, blob_is_degraded);
10149 		CU_ADD_TEST(suite_bs, blob_clone_resize);
10150 		CU_ADD_TEST(suite, blob_esnap_clone_resize);
10151 		CU_ADD_TEST(suite_bs, blob_shallow_copy);
10152 		CU_ADD_TEST(suite_esnap_bs, blob_set_parent);
10153 		CU_ADD_TEST(suite_esnap_bs, blob_set_external_parent);
10154 	}
10155 
10156 	allocate_threads(2);
10157 	set_thread(0);
10158 
10159 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
10160 
10161 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
10162 
10163 	free(g_dev_buffer);
10164 
10165 	free_threads();
10166 
10167 	return num_failures;
10168 }
10169