xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 94c43313ab2a544f0091e5caea23efc53b48a4e3)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "blob/blobstore.c"
17 #include "blob/request.c"
18 #include "blob/zeroes.c"
19 #include "blob/blob_bs_dev.c"
20 #include "esnap_dev.c"
21 
22 struct spdk_blob_store *g_bs;
23 spdk_blob_id g_blobid;
24 struct spdk_blob *g_blob, *g_blob2;
25 int g_bserrno, g_bserrno2;
26 struct spdk_xattr_names *g_names;
27 int g_done;
28 char *g_xattr_names[] = {"first", "second", "third"};
29 char *g_xattr_values[] = {"one", "two", "three"};
30 uint64_t g_ctx = 1729;
31 bool g_use_extent_table = false;
32 
33 struct spdk_bs_super_block_ver1 {
34 	uint8_t		signature[8];
35 	uint32_t        version;
36 	uint32_t        length;
37 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
38 	spdk_blob_id	super_blob;
39 
40 	uint32_t	cluster_size; /* In bytes */
41 
42 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
43 	uint32_t	used_page_mask_len; /* Count, in pages */
44 
45 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
46 	uint32_t	used_cluster_mask_len; /* Count, in pages */
47 
48 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
49 	uint32_t	md_len; /* Count, in pages */
50 
51 	uint8_t		reserved[4036];
52 	uint32_t	crc;
53 } __attribute__((packed));
54 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
55 
56 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
57 		struct spdk_blob_opts *blob_opts);
58 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
59 static void suite_blob_setup(void);
60 static void suite_blob_cleanup(void);
61 
62 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
63 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
64 		void *cpl_cb_arg), 0);
65 
66 static bool
67 is_esnap_clone(struct spdk_blob *_blob, const void *id, size_t id_len)
68 {
69 	const void *val = NULL;
70 	size_t len = 0;
71 	bool c0, c1, c2, c3;
72 
73 	CU_ASSERT(blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
74 				       true) == 0);
75 	CU_ASSERT((c0 = (len == id_len)));
76 	CU_ASSERT((c1 = (val != NULL && memcmp(val, id, len) == 0)));
77 	CU_ASSERT((c2 = !!(_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT)));
78 	CU_ASSERT((c3 = (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
79 
80 	return c0 && c1 && c2 && c3;
81 }
82 
83 static bool
84 is_not_esnap_clone(struct spdk_blob *_blob)
85 {
86 	const void *val = NULL;
87 	size_t len = 0;
88 	bool c1, c2, c3, c4;
89 
90 	CU_ASSERT((c1 = (blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
91 					      true) == -ENOENT)));
92 	CU_ASSERT((c2 = (val == NULL)));
93 	CU_ASSERT((c3 = ((_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT) == 0)));
94 	CU_ASSERT((c4 = (_blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
95 
96 	return c1 && c2 && c3 && c4;
97 }
98 
99 #define UT_ASSERT_IS_ESNAP_CLONE(_blob, _id, _len) CU_ASSERT(is_esnap_clone(_blob, _id, _len))
100 #define UT_ASSERT_IS_NOT_ESNAP_CLONE(_blob) CU_ASSERT(is_not_esnap_clone(_blob))
101 
102 static void
103 _get_xattr_value(void *arg, const char *name,
104 		 const void **value, size_t *value_len)
105 {
106 	uint64_t i;
107 
108 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
109 	SPDK_CU_ASSERT_FATAL(value != NULL);
110 	CU_ASSERT(arg == &g_ctx);
111 
112 	for (i = 0; i < sizeof(g_xattr_names); i++) {
113 		if (!strcmp(name, g_xattr_names[i])) {
114 			*value_len = strlen(g_xattr_values[i]);
115 			*value = g_xattr_values[i];
116 			break;
117 		}
118 	}
119 }
120 
121 static void
122 _get_xattr_value_null(void *arg, const char *name,
123 		      const void **value, size_t *value_len)
124 {
125 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
126 	SPDK_CU_ASSERT_FATAL(value != NULL);
127 	CU_ASSERT(arg == NULL);
128 
129 	*value_len = 0;
130 	*value = NULL;
131 }
132 
133 static int
134 _get_snapshots_count(struct spdk_blob_store *bs)
135 {
136 	struct spdk_blob_list *snapshot = NULL;
137 	int count = 0;
138 
139 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
140 		count += 1;
141 	}
142 
143 	return count;
144 }
145 
146 static void
147 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
148 {
149 	spdk_blob_opts_init(opts, sizeof(*opts));
150 	opts->use_extent_table = g_use_extent_table;
151 }
152 
153 static void
154 bs_op_complete(void *cb_arg, int bserrno)
155 {
156 	g_bserrno = bserrno;
157 }
158 
159 static void
160 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
161 			   int bserrno)
162 {
163 	g_bs = bs;
164 	g_bserrno = bserrno;
165 }
166 
167 static void
168 blob_op_complete(void *cb_arg, int bserrno)
169 {
170 	g_bserrno = bserrno;
171 }
172 
173 static void
174 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
175 {
176 	g_blobid = blobid;
177 	g_bserrno = bserrno;
178 }
179 
180 static void
181 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
182 {
183 	g_blob = blb;
184 	g_bserrno = bserrno;
185 }
186 
187 static void
188 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
189 {
190 	if (g_blob == NULL) {
191 		g_blob = blob;
192 		g_bserrno = bserrno;
193 	} else {
194 		g_blob2 = blob;
195 		g_bserrno2 = bserrno;
196 	}
197 }
198 
199 static void
200 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
201 {
202 	struct spdk_bs_dev *dev;
203 
204 	/* Unload the blob store */
205 	spdk_bs_unload(*bs, bs_op_complete, NULL);
206 	poll_threads();
207 	CU_ASSERT(g_bserrno == 0);
208 
209 	dev = init_dev();
210 	/* Load an existing blob store */
211 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
212 	poll_threads();
213 	CU_ASSERT(g_bserrno == 0);
214 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
215 	*bs = g_bs;
216 
217 	g_bserrno = -1;
218 }
219 
220 static void
221 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
222 {
223 	struct spdk_bs_dev *dev;
224 
225 	/* Dirty shutdown */
226 	bs_free(*bs);
227 
228 	dev = init_dev();
229 	/* Load an existing blob store */
230 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
231 	poll_threads();
232 	CU_ASSERT(g_bserrno == 0);
233 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
234 	*bs = g_bs;
235 
236 	g_bserrno = -1;
237 }
238 
239 static void
240 blob_init(void)
241 {
242 	struct spdk_blob_store *bs;
243 	struct spdk_bs_dev *dev;
244 
245 	dev = init_dev();
246 
247 	/* should fail for an unsupported blocklen */
248 	dev->blocklen = 500;
249 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
250 	poll_threads();
251 	CU_ASSERT(g_bserrno == -EINVAL);
252 
253 	dev = init_dev();
254 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
255 	poll_threads();
256 	CU_ASSERT(g_bserrno == 0);
257 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
258 	bs = g_bs;
259 
260 	spdk_bs_unload(bs, bs_op_complete, NULL);
261 	poll_threads();
262 	CU_ASSERT(g_bserrno == 0);
263 	g_bs = NULL;
264 }
265 
266 static void
267 blob_super(void)
268 {
269 	struct spdk_blob_store *bs = g_bs;
270 	spdk_blob_id blobid;
271 	struct spdk_blob_opts blob_opts;
272 
273 	/* Get the super blob without having set one */
274 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
275 	poll_threads();
276 	CU_ASSERT(g_bserrno == -ENOENT);
277 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
278 
279 	/* Create a blob */
280 	ut_spdk_blob_opts_init(&blob_opts);
281 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
282 	poll_threads();
283 	CU_ASSERT(g_bserrno == 0);
284 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
285 	blobid = g_blobid;
286 
287 	/* Set the blob as the super blob */
288 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
289 	poll_threads();
290 	CU_ASSERT(g_bserrno == 0);
291 
292 	/* Get the super blob */
293 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
294 	poll_threads();
295 	CU_ASSERT(g_bserrno == 0);
296 	CU_ASSERT(blobid == g_blobid);
297 }
298 
299 static void
300 blob_open(void)
301 {
302 	struct spdk_blob_store *bs = g_bs;
303 	struct spdk_blob *blob;
304 	struct spdk_blob_opts blob_opts;
305 	spdk_blob_id blobid, blobid2;
306 
307 	ut_spdk_blob_opts_init(&blob_opts);
308 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
309 	poll_threads();
310 	CU_ASSERT(g_bserrno == 0);
311 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
312 	blobid = g_blobid;
313 
314 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
315 	poll_threads();
316 	CU_ASSERT(g_bserrno == 0);
317 	CU_ASSERT(g_blob != NULL);
318 	blob = g_blob;
319 
320 	blobid2 = spdk_blob_get_id(blob);
321 	CU_ASSERT(blobid == blobid2);
322 
323 	/* Try to open file again.  It should return success. */
324 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
325 	poll_threads();
326 	CU_ASSERT(g_bserrno == 0);
327 	CU_ASSERT(blob == g_blob);
328 
329 	spdk_blob_close(blob, blob_op_complete, NULL);
330 	poll_threads();
331 	CU_ASSERT(g_bserrno == 0);
332 
333 	/*
334 	 * Close the file a second time, releasing the second reference.  This
335 	 *  should succeed.
336 	 */
337 	blob = g_blob;
338 	spdk_blob_close(blob, blob_op_complete, NULL);
339 	poll_threads();
340 	CU_ASSERT(g_bserrno == 0);
341 
342 	/*
343 	 * Try to open file again.  It should succeed.  This tests the case
344 	 *  where the file is opened, closed, then re-opened again.
345 	 */
346 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
347 	poll_threads();
348 	CU_ASSERT(g_bserrno == 0);
349 	CU_ASSERT(g_blob != NULL);
350 	blob = g_blob;
351 	spdk_blob_close(blob, blob_op_complete, NULL);
352 	poll_threads();
353 	CU_ASSERT(g_bserrno == 0);
354 
355 	/* Try to open file twice in succession.  This should return the same
356 	 * blob object.
357 	 */
358 	g_blob = NULL;
359 	g_blob2 = NULL;
360 	g_bserrno = -1;
361 	g_bserrno2 = -1;
362 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
363 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
364 	poll_threads();
365 	CU_ASSERT(g_bserrno == 0);
366 	CU_ASSERT(g_bserrno2 == 0);
367 	CU_ASSERT(g_blob != NULL);
368 	CU_ASSERT(g_blob2 != NULL);
369 	CU_ASSERT(g_blob == g_blob2);
370 
371 	g_bserrno = -1;
372 	spdk_blob_close(g_blob, blob_op_complete, NULL);
373 	poll_threads();
374 	CU_ASSERT(g_bserrno == 0);
375 
376 	ut_blob_close_and_delete(bs, g_blob);
377 }
378 
379 static void
380 blob_create(void)
381 {
382 	struct spdk_blob_store *bs = g_bs;
383 	struct spdk_blob *blob;
384 	struct spdk_blob_opts opts;
385 	spdk_blob_id blobid;
386 
387 	/* Create blob with 10 clusters */
388 
389 	ut_spdk_blob_opts_init(&opts);
390 	opts.num_clusters = 10;
391 
392 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
393 	poll_threads();
394 	CU_ASSERT(g_bserrno == 0);
395 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
396 	blobid = g_blobid;
397 
398 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
399 	poll_threads();
400 	CU_ASSERT(g_bserrno == 0);
401 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
402 	blob = g_blob;
403 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
404 
405 	spdk_blob_close(blob, blob_op_complete, NULL);
406 	poll_threads();
407 	CU_ASSERT(g_bserrno == 0);
408 
409 	/* Create blob with 0 clusters */
410 
411 	ut_spdk_blob_opts_init(&opts);
412 	opts.num_clusters = 0;
413 
414 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
415 	poll_threads();
416 	CU_ASSERT(g_bserrno == 0);
417 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
418 	blobid = g_blobid;
419 
420 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
421 	poll_threads();
422 	CU_ASSERT(g_bserrno == 0);
423 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
424 	blob = g_blob;
425 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
426 
427 	spdk_blob_close(blob, blob_op_complete, NULL);
428 	poll_threads();
429 	CU_ASSERT(g_bserrno == 0);
430 
431 	/* Create blob with default options (opts == NULL) */
432 
433 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
434 	poll_threads();
435 	CU_ASSERT(g_bserrno == 0);
436 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
437 	blobid = g_blobid;
438 
439 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
440 	poll_threads();
441 	CU_ASSERT(g_bserrno == 0);
442 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
443 	blob = g_blob;
444 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
445 
446 	spdk_blob_close(blob, blob_op_complete, NULL);
447 	poll_threads();
448 	CU_ASSERT(g_bserrno == 0);
449 
450 	/* Try to create blob with size larger than blobstore */
451 
452 	ut_spdk_blob_opts_init(&opts);
453 	opts.num_clusters = bs->total_clusters + 1;
454 
455 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
456 	poll_threads();
457 	CU_ASSERT(g_bserrno == -ENOSPC);
458 }
459 
460 static void
461 blob_create_zero_extent(void)
462 {
463 	struct spdk_blob_store *bs = g_bs;
464 	struct spdk_blob *blob;
465 	spdk_blob_id blobid;
466 
467 	/* Create blob with default options (opts == NULL) */
468 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
469 	poll_threads();
470 	CU_ASSERT(g_bserrno == 0);
471 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
472 	blobid = g_blobid;
473 
474 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
475 	poll_threads();
476 	CU_ASSERT(g_bserrno == 0);
477 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
478 	blob = g_blob;
479 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
480 	CU_ASSERT(blob->extent_table_found == true);
481 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
482 	CU_ASSERT(blob->active.extent_pages == NULL);
483 
484 	spdk_blob_close(blob, blob_op_complete, NULL);
485 	poll_threads();
486 	CU_ASSERT(g_bserrno == 0);
487 
488 	/* Create blob with NULL internal options  */
489 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
490 	poll_threads();
491 	CU_ASSERT(g_bserrno == 0);
492 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
493 	blobid = g_blobid;
494 
495 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
496 	poll_threads();
497 	CU_ASSERT(g_bserrno == 0);
498 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
499 	blob = g_blob;
500 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
501 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
502 	CU_ASSERT(blob->extent_table_found == true);
503 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
504 	CU_ASSERT(blob->active.extent_pages == NULL);
505 
506 	spdk_blob_close(blob, blob_op_complete, NULL);
507 	poll_threads();
508 	CU_ASSERT(g_bserrno == 0);
509 }
510 
511 /*
512  * Create and delete one blob in a loop over and over again.  This helps ensure
513  * that the internal bit masks tracking used clusters and md_pages are being
514  * tracked correctly.
515  */
516 static void
517 blob_create_loop(void)
518 {
519 	struct spdk_blob_store *bs = g_bs;
520 	struct spdk_blob_opts opts;
521 	uint32_t i, loop_count;
522 
523 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
524 				  spdk_bit_pool_capacity(bs->used_clusters));
525 
526 	for (i = 0; i < loop_count; i++) {
527 		ut_spdk_blob_opts_init(&opts);
528 		opts.num_clusters = 1;
529 		g_bserrno = -1;
530 		g_blobid = SPDK_BLOBID_INVALID;
531 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
532 		poll_threads();
533 		CU_ASSERT(g_bserrno == 0);
534 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
535 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
536 		poll_threads();
537 		CU_ASSERT(g_bserrno == 0);
538 	}
539 }
540 
541 static void
542 blob_create_fail(void)
543 {
544 	struct spdk_blob_store *bs = g_bs;
545 	struct spdk_blob_opts opts;
546 	spdk_blob_id blobid;
547 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
548 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
549 
550 	/* NULL callback */
551 	ut_spdk_blob_opts_init(&opts);
552 	opts.xattrs.names = g_xattr_names;
553 	opts.xattrs.get_value = NULL;
554 	opts.xattrs.count = 1;
555 	opts.xattrs.ctx = &g_ctx;
556 
557 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
558 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
559 	poll_threads();
560 	CU_ASSERT(g_bserrno == -EINVAL);
561 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
562 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
563 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
564 
565 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
566 	poll_threads();
567 	CU_ASSERT(g_bserrno == -ENOENT);
568 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
569 
570 	ut_bs_reload(&bs, NULL);
571 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
572 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
573 
574 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
575 	poll_threads();
576 	CU_ASSERT(g_blob == NULL);
577 	CU_ASSERT(g_bserrno == -ENOENT);
578 }
579 
580 static void
581 blob_create_internal(void)
582 {
583 	struct spdk_blob_store *bs = g_bs;
584 	struct spdk_blob *blob;
585 	struct spdk_blob_opts opts;
586 	struct spdk_blob_xattr_opts internal_xattrs;
587 	const void *value;
588 	size_t value_len;
589 	spdk_blob_id blobid;
590 	int rc;
591 
592 	/* Create blob with custom xattrs */
593 
594 	ut_spdk_blob_opts_init(&opts);
595 	blob_xattrs_init(&internal_xattrs);
596 	internal_xattrs.count = 3;
597 	internal_xattrs.names = g_xattr_names;
598 	internal_xattrs.get_value = _get_xattr_value;
599 	internal_xattrs.ctx = &g_ctx;
600 
601 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
602 	poll_threads();
603 	CU_ASSERT(g_bserrno == 0);
604 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
605 	blobid = g_blobid;
606 
607 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
608 	poll_threads();
609 	CU_ASSERT(g_bserrno == 0);
610 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
611 	blob = g_blob;
612 
613 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
614 	CU_ASSERT(rc == 0);
615 	SPDK_CU_ASSERT_FATAL(value != NULL);
616 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
617 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
618 
619 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
620 	CU_ASSERT(rc == 0);
621 	SPDK_CU_ASSERT_FATAL(value != NULL);
622 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
623 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
624 
625 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
626 	CU_ASSERT(rc == 0);
627 	SPDK_CU_ASSERT_FATAL(value != NULL);
628 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
629 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
630 
631 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
632 	CU_ASSERT(rc != 0);
633 
634 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
635 	CU_ASSERT(rc != 0);
636 
637 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
638 	CU_ASSERT(rc != 0);
639 
640 	spdk_blob_close(blob, blob_op_complete, NULL);
641 	poll_threads();
642 	CU_ASSERT(g_bserrno == 0);
643 
644 	/* Create blob with NULL internal options  */
645 
646 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
647 	poll_threads();
648 	CU_ASSERT(g_bserrno == 0);
649 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
650 	blobid = g_blobid;
651 
652 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
653 	poll_threads();
654 	CU_ASSERT(g_bserrno == 0);
655 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
656 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
657 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
658 
659 	blob = g_blob;
660 
661 	spdk_blob_close(blob, blob_op_complete, NULL);
662 	poll_threads();
663 	CU_ASSERT(g_bserrno == 0);
664 }
665 
666 static void
667 blob_thin_provision(void)
668 {
669 	struct spdk_blob_store *bs;
670 	struct spdk_bs_dev *dev;
671 	struct spdk_blob *blob;
672 	struct spdk_blob_opts opts;
673 	struct spdk_bs_opts bs_opts;
674 	spdk_blob_id blobid;
675 
676 	dev = init_dev();
677 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
678 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
679 
680 	/* Initialize a new blob store */
681 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
682 	poll_threads();
683 	CU_ASSERT(g_bserrno == 0);
684 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
685 
686 	bs = g_bs;
687 
688 	/* Create blob with thin provisioning enabled */
689 
690 	ut_spdk_blob_opts_init(&opts);
691 	opts.thin_provision = true;
692 	opts.num_clusters = 10;
693 
694 	blob = ut_blob_create_and_open(bs, &opts);
695 	blobid = spdk_blob_get_id(blob);
696 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
697 	/* In thin provisioning with num_clusters is set, if not using the
698 	 * extent table, there is no allocation. If extent table is used,
699 	 * there is related allocation happened. */
700 	if (blob->extent_table_found == true) {
701 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
702 		CU_ASSERT(blob->active.extent_pages != NULL);
703 	} else {
704 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
705 		CU_ASSERT(blob->active.extent_pages == NULL);
706 	}
707 
708 	spdk_blob_close(blob, blob_op_complete, NULL);
709 	CU_ASSERT(g_bserrno == 0);
710 
711 	/* Do not shut down cleanly.  This makes sure that when we load again
712 	 *  and try to recover a valid used_cluster map, that blobstore will
713 	 *  ignore clusters with index 0 since these are unallocated clusters.
714 	 */
715 	ut_bs_dirty_load(&bs, &bs_opts);
716 
717 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
718 	poll_threads();
719 	CU_ASSERT(g_bserrno == 0);
720 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
721 	blob = g_blob;
722 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
723 
724 	ut_blob_close_and_delete(bs, blob);
725 
726 	spdk_bs_unload(bs, bs_op_complete, NULL);
727 	poll_threads();
728 	CU_ASSERT(g_bserrno == 0);
729 	g_bs = NULL;
730 }
731 
732 static void
733 blob_snapshot(void)
734 {
735 	struct spdk_blob_store *bs = g_bs;
736 	struct spdk_blob *blob;
737 	struct spdk_blob *snapshot, *snapshot2;
738 	struct spdk_blob_bs_dev *blob_bs_dev;
739 	struct spdk_blob_opts opts;
740 	struct spdk_blob_xattr_opts xattrs;
741 	spdk_blob_id blobid;
742 	spdk_blob_id snapshotid;
743 	spdk_blob_id snapshotid2;
744 	const void *value;
745 	size_t value_len;
746 	int rc;
747 	spdk_blob_id ids[2];
748 	size_t count;
749 
750 	/* Create blob with 10 clusters */
751 	ut_spdk_blob_opts_init(&opts);
752 	opts.num_clusters = 10;
753 
754 	blob = ut_blob_create_and_open(bs, &opts);
755 	blobid = spdk_blob_get_id(blob);
756 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
757 
758 	/* Create snapshot from blob */
759 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
760 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
761 	poll_threads();
762 	CU_ASSERT(g_bserrno == 0);
763 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
764 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
765 	snapshotid = g_blobid;
766 
767 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
768 	poll_threads();
769 	CU_ASSERT(g_bserrno == 0);
770 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
771 	snapshot = g_blob;
772 	CU_ASSERT(snapshot->data_ro == true);
773 	CU_ASSERT(snapshot->md_ro == true);
774 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
775 
776 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
777 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
778 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
779 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
780 
781 	/* Try to create snapshot from clone with xattrs */
782 	xattrs.names = g_xattr_names;
783 	xattrs.get_value = _get_xattr_value;
784 	xattrs.count = 3;
785 	xattrs.ctx = &g_ctx;
786 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
787 	poll_threads();
788 	CU_ASSERT(g_bserrno == 0);
789 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
790 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
791 	snapshotid2 = g_blobid;
792 
793 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
794 	CU_ASSERT(g_bserrno == 0);
795 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
796 	snapshot2 = g_blob;
797 	CU_ASSERT(snapshot2->data_ro == true);
798 	CU_ASSERT(snapshot2->md_ro == true);
799 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
800 
801 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
802 	CU_ASSERT(snapshot->back_bs_dev == NULL);
803 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
804 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
805 
806 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
807 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
808 
809 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
810 	CU_ASSERT(blob_bs_dev->blob == snapshot);
811 
812 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
813 	CU_ASSERT(rc == 0);
814 	SPDK_CU_ASSERT_FATAL(value != NULL);
815 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
816 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
817 
818 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
819 	CU_ASSERT(rc == 0);
820 	SPDK_CU_ASSERT_FATAL(value != NULL);
821 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
822 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
823 
824 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
825 	CU_ASSERT(rc == 0);
826 	SPDK_CU_ASSERT_FATAL(value != NULL);
827 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
828 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
829 
830 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
831 	count = 2;
832 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
833 	CU_ASSERT(count == 1);
834 	CU_ASSERT(ids[0] == blobid);
835 
836 	count = 2;
837 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
838 	CU_ASSERT(count == 1);
839 	CU_ASSERT(ids[0] == snapshotid2);
840 
841 	/* Try to create snapshot from snapshot */
842 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
843 	poll_threads();
844 	CU_ASSERT(g_bserrno == -EINVAL);
845 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
846 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
847 
848 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
849 	ut_blob_close_and_delete(bs, blob);
850 	count = 2;
851 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
852 	CU_ASSERT(count == 0);
853 
854 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
855 	ut_blob_close_and_delete(bs, snapshot2);
856 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
857 	count = 2;
858 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
859 	CU_ASSERT(count == 0);
860 
861 	ut_blob_close_and_delete(bs, snapshot);
862 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
863 }
864 
865 static void
866 blob_snapshot_freeze_io(void)
867 {
868 	struct spdk_io_channel *channel;
869 	struct spdk_bs_channel *bs_channel;
870 	struct spdk_blob_store *bs = g_bs;
871 	struct spdk_blob *blob;
872 	struct spdk_blob_opts opts;
873 	spdk_blob_id blobid;
874 	uint32_t num_of_pages = 10;
875 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
876 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
877 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
878 
879 	memset(payload_write, 0xE5, sizeof(payload_write));
880 	memset(payload_read, 0x00, sizeof(payload_read));
881 	memset(payload_zero, 0x00, sizeof(payload_zero));
882 
883 	/* Test freeze I/O during snapshot */
884 	channel = spdk_bs_alloc_io_channel(bs);
885 	bs_channel = spdk_io_channel_get_ctx(channel);
886 
887 	/* Create blob with 10 clusters */
888 	ut_spdk_blob_opts_init(&opts);
889 	opts.num_clusters = 10;
890 	opts.thin_provision = false;
891 
892 	blob = ut_blob_create_and_open(bs, &opts);
893 	blobid = spdk_blob_get_id(blob);
894 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
895 
896 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
897 
898 	/* This is implementation specific.
899 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
900 	 * Four async I/O operations happen before that. */
901 	poll_thread_times(0, 5);
902 
903 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
904 
905 	/* Blob I/O should be frozen here */
906 	CU_ASSERT(blob->frozen_refcnt == 1);
907 
908 	/* Write to the blob */
909 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
910 
911 	/* Verify that I/O is queued */
912 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
913 	/* Verify that payload is not written to disk, at this point the blobs already switched */
914 	CU_ASSERT(blob->active.clusters[0] == 0);
915 
916 	/* Finish all operations including spdk_bs_create_snapshot */
917 	poll_threads();
918 
919 	/* Verify snapshot */
920 	CU_ASSERT(g_bserrno == 0);
921 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
922 
923 	/* Verify that blob has unset frozen_io */
924 	CU_ASSERT(blob->frozen_refcnt == 0);
925 
926 	/* Verify that postponed I/O completed successfully by comparing payload */
927 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
928 	poll_threads();
929 	CU_ASSERT(g_bserrno == 0);
930 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
931 
932 	spdk_bs_free_io_channel(channel);
933 	poll_threads();
934 
935 	ut_blob_close_and_delete(bs, blob);
936 }
937 
938 static void
939 blob_clone(void)
940 {
941 	struct spdk_blob_store *bs = g_bs;
942 	struct spdk_blob_opts opts;
943 	struct spdk_blob *blob, *snapshot, *clone;
944 	spdk_blob_id blobid, cloneid, snapshotid;
945 	struct spdk_blob_xattr_opts xattrs;
946 	const void *value;
947 	size_t value_len;
948 	int rc;
949 
950 	/* Create blob with 10 clusters */
951 
952 	ut_spdk_blob_opts_init(&opts);
953 	opts.num_clusters = 10;
954 
955 	blob = ut_blob_create_and_open(bs, &opts);
956 	blobid = spdk_blob_get_id(blob);
957 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
958 
959 	/* Create snapshot */
960 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
961 	poll_threads();
962 	CU_ASSERT(g_bserrno == 0);
963 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
964 	snapshotid = g_blobid;
965 
966 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
967 	poll_threads();
968 	CU_ASSERT(g_bserrno == 0);
969 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
970 	snapshot = g_blob;
971 	CU_ASSERT(snapshot->data_ro == true);
972 	CU_ASSERT(snapshot->md_ro == true);
973 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
974 
975 	spdk_blob_close(snapshot, blob_op_complete, NULL);
976 	poll_threads();
977 	CU_ASSERT(g_bserrno == 0);
978 
979 	/* Create clone from snapshot with xattrs */
980 	xattrs.names = g_xattr_names;
981 	xattrs.get_value = _get_xattr_value;
982 	xattrs.count = 3;
983 	xattrs.ctx = &g_ctx;
984 
985 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
986 	poll_threads();
987 	CU_ASSERT(g_bserrno == 0);
988 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
989 	cloneid = g_blobid;
990 
991 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
992 	poll_threads();
993 	CU_ASSERT(g_bserrno == 0);
994 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
995 	clone = g_blob;
996 	CU_ASSERT(clone->data_ro == false);
997 	CU_ASSERT(clone->md_ro == false);
998 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
999 
1000 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
1001 	CU_ASSERT(rc == 0);
1002 	SPDK_CU_ASSERT_FATAL(value != NULL);
1003 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
1004 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
1005 
1006 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
1007 	CU_ASSERT(rc == 0);
1008 	SPDK_CU_ASSERT_FATAL(value != NULL);
1009 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1010 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1011 
1012 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1013 	CU_ASSERT(rc == 0);
1014 	SPDK_CU_ASSERT_FATAL(value != NULL);
1015 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1016 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1017 
1018 
1019 	spdk_blob_close(clone, blob_op_complete, NULL);
1020 	poll_threads();
1021 	CU_ASSERT(g_bserrno == 0);
1022 
1023 	/* Try to create clone from not read only blob */
1024 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1025 	poll_threads();
1026 	CU_ASSERT(g_bserrno == -EINVAL);
1027 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1028 
1029 	/* Mark blob as read only */
1030 	spdk_blob_set_read_only(blob);
1031 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1032 	poll_threads();
1033 	CU_ASSERT(g_bserrno == 0);
1034 
1035 	/* Create clone from read only blob */
1036 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1037 	poll_threads();
1038 	CU_ASSERT(g_bserrno == 0);
1039 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1040 	cloneid = g_blobid;
1041 
1042 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1043 	poll_threads();
1044 	CU_ASSERT(g_bserrno == 0);
1045 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1046 	clone = g_blob;
1047 	CU_ASSERT(clone->data_ro == false);
1048 	CU_ASSERT(clone->md_ro == false);
1049 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1050 
1051 	ut_blob_close_and_delete(bs, clone);
1052 	ut_blob_close_and_delete(bs, blob);
1053 }
1054 
1055 static void
1056 _blob_inflate(bool decouple_parent)
1057 {
1058 	struct spdk_blob_store *bs = g_bs;
1059 	struct spdk_blob_opts opts;
1060 	struct spdk_blob *blob, *snapshot;
1061 	spdk_blob_id blobid, snapshotid;
1062 	struct spdk_io_channel *channel;
1063 	uint64_t free_clusters;
1064 
1065 	channel = spdk_bs_alloc_io_channel(bs);
1066 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1067 
1068 	/* Create blob with 10 clusters */
1069 
1070 	ut_spdk_blob_opts_init(&opts);
1071 	opts.num_clusters = 10;
1072 	opts.thin_provision = true;
1073 
1074 	blob = ut_blob_create_and_open(bs, &opts);
1075 	blobid = spdk_blob_get_id(blob);
1076 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1077 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1078 
1079 	/* 1) Blob with no parent */
1080 	if (decouple_parent) {
1081 		/* Decouple parent of blob with no parent (should fail) */
1082 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1083 		poll_threads();
1084 		CU_ASSERT(g_bserrno != 0);
1085 	} else {
1086 		/* Inflate of thin blob with no parent should made it thick */
1087 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1088 		poll_threads();
1089 		CU_ASSERT(g_bserrno == 0);
1090 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1091 	}
1092 
1093 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1094 	poll_threads();
1095 	CU_ASSERT(g_bserrno == 0);
1096 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1097 	snapshotid = g_blobid;
1098 
1099 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1100 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1101 
1102 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1103 	poll_threads();
1104 	CU_ASSERT(g_bserrno == 0);
1105 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1106 	snapshot = g_blob;
1107 	CU_ASSERT(snapshot->data_ro == true);
1108 	CU_ASSERT(snapshot->md_ro == true);
1109 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1110 
1111 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1112 	poll_threads();
1113 	CU_ASSERT(g_bserrno == 0);
1114 
1115 	free_clusters = spdk_bs_free_cluster_count(bs);
1116 
1117 	/* 2) Blob with parent */
1118 	if (!decouple_parent) {
1119 		/* Do full blob inflation */
1120 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1121 		poll_threads();
1122 		CU_ASSERT(g_bserrno == 0);
1123 		/* all 10 clusters should be allocated */
1124 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1125 	} else {
1126 		/* Decouple parent of blob */
1127 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1128 		poll_threads();
1129 		CU_ASSERT(g_bserrno == 0);
1130 		/* when only parent is removed, none of the clusters should be allocated */
1131 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1132 	}
1133 
1134 	/* Now, it should be possible to delete snapshot */
1135 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1136 	poll_threads();
1137 	CU_ASSERT(g_bserrno == 0);
1138 
1139 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1140 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1141 
1142 	spdk_bs_free_io_channel(channel);
1143 	poll_threads();
1144 
1145 	ut_blob_close_and_delete(bs, blob);
1146 }
1147 
1148 static void
1149 blob_inflate(void)
1150 {
1151 	_blob_inflate(false);
1152 	_blob_inflate(true);
1153 }
1154 
1155 static void
1156 blob_delete(void)
1157 {
1158 	struct spdk_blob_store *bs = g_bs;
1159 	struct spdk_blob_opts blob_opts;
1160 	spdk_blob_id blobid;
1161 
1162 	/* Create a blob and then delete it. */
1163 	ut_spdk_blob_opts_init(&blob_opts);
1164 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1165 	poll_threads();
1166 	CU_ASSERT(g_bserrno == 0);
1167 	CU_ASSERT(g_blobid > 0);
1168 	blobid = g_blobid;
1169 
1170 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1171 	poll_threads();
1172 	CU_ASSERT(g_bserrno == 0);
1173 
1174 	/* Try to open the blob */
1175 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1176 	poll_threads();
1177 	CU_ASSERT(g_bserrno == -ENOENT);
1178 }
1179 
1180 static void
1181 blob_resize_test(void)
1182 {
1183 	struct spdk_blob_store *bs = g_bs;
1184 	struct spdk_blob *blob;
1185 	uint64_t free_clusters;
1186 
1187 	free_clusters = spdk_bs_free_cluster_count(bs);
1188 
1189 	blob = ut_blob_create_and_open(bs, NULL);
1190 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1191 
1192 	/* Confirm that resize fails if blob is marked read-only. */
1193 	blob->md_ro = true;
1194 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1195 	poll_threads();
1196 	CU_ASSERT(g_bserrno == -EPERM);
1197 	blob->md_ro = false;
1198 
1199 	/* The blob started at 0 clusters. Resize it to be 5. */
1200 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1201 	poll_threads();
1202 	CU_ASSERT(g_bserrno == 0);
1203 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1204 
1205 	/* Shrink the blob to 3 clusters. This will not actually release
1206 	 * the old clusters until the blob is synced.
1207 	 */
1208 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1209 	poll_threads();
1210 	CU_ASSERT(g_bserrno == 0);
1211 	/* Verify there are still 5 clusters in use */
1212 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1213 
1214 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1215 	poll_threads();
1216 	CU_ASSERT(g_bserrno == 0);
1217 	/* Now there are only 3 clusters in use */
1218 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1219 
1220 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1221 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1222 	poll_threads();
1223 	CU_ASSERT(g_bserrno == 0);
1224 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1225 
1226 	/* Try to resize the blob to size larger than blobstore. */
1227 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1228 	poll_threads();
1229 	CU_ASSERT(g_bserrno == -ENOSPC);
1230 
1231 	ut_blob_close_and_delete(bs, blob);
1232 }
1233 
1234 static void
1235 blob_read_only(void)
1236 {
1237 	struct spdk_blob_store *bs;
1238 	struct spdk_bs_dev *dev;
1239 	struct spdk_blob *blob;
1240 	struct spdk_bs_opts opts;
1241 	spdk_blob_id blobid;
1242 	int rc;
1243 
1244 	dev = init_dev();
1245 	spdk_bs_opts_init(&opts, sizeof(opts));
1246 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1247 
1248 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1249 	poll_threads();
1250 	CU_ASSERT(g_bserrno == 0);
1251 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1252 	bs = g_bs;
1253 
1254 	blob = ut_blob_create_and_open(bs, NULL);
1255 	blobid = spdk_blob_get_id(blob);
1256 
1257 	rc = spdk_blob_set_read_only(blob);
1258 	CU_ASSERT(rc == 0);
1259 
1260 	CU_ASSERT(blob->data_ro == false);
1261 	CU_ASSERT(blob->md_ro == false);
1262 
1263 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1264 	poll_threads();
1265 
1266 	CU_ASSERT(blob->data_ro == true);
1267 	CU_ASSERT(blob->md_ro == true);
1268 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1269 
1270 	spdk_blob_close(blob, blob_op_complete, NULL);
1271 	poll_threads();
1272 	CU_ASSERT(g_bserrno == 0);
1273 
1274 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1275 	poll_threads();
1276 	CU_ASSERT(g_bserrno == 0);
1277 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1278 	blob = g_blob;
1279 
1280 	CU_ASSERT(blob->data_ro == true);
1281 	CU_ASSERT(blob->md_ro == true);
1282 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1283 
1284 	spdk_blob_close(blob, blob_op_complete, NULL);
1285 	poll_threads();
1286 	CU_ASSERT(g_bserrno == 0);
1287 
1288 	ut_bs_reload(&bs, &opts);
1289 
1290 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1291 	poll_threads();
1292 	CU_ASSERT(g_bserrno == 0);
1293 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1294 	blob = g_blob;
1295 
1296 	CU_ASSERT(blob->data_ro == true);
1297 	CU_ASSERT(blob->md_ro == true);
1298 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1299 
1300 	ut_blob_close_and_delete(bs, blob);
1301 
1302 	spdk_bs_unload(bs, bs_op_complete, NULL);
1303 	poll_threads();
1304 	CU_ASSERT(g_bserrno == 0);
1305 }
1306 
1307 static void
1308 channel_ops(void)
1309 {
1310 	struct spdk_blob_store *bs = g_bs;
1311 	struct spdk_io_channel *channel;
1312 
1313 	channel = spdk_bs_alloc_io_channel(bs);
1314 	CU_ASSERT(channel != NULL);
1315 
1316 	spdk_bs_free_io_channel(channel);
1317 	poll_threads();
1318 }
1319 
1320 static void
1321 blob_write(void)
1322 {
1323 	struct spdk_blob_store *bs = g_bs;
1324 	struct spdk_blob *blob = g_blob;
1325 	struct spdk_io_channel *channel;
1326 	uint64_t pages_per_cluster;
1327 	uint8_t payload[10 * 4096];
1328 
1329 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1330 
1331 	channel = spdk_bs_alloc_io_channel(bs);
1332 	CU_ASSERT(channel != NULL);
1333 
1334 	/* Write to a blob with 0 size */
1335 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1336 	poll_threads();
1337 	CU_ASSERT(g_bserrno == -EINVAL);
1338 
1339 	/* Resize the blob */
1340 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1341 	poll_threads();
1342 	CU_ASSERT(g_bserrno == 0);
1343 
1344 	/* Confirm that write fails if blob is marked read-only. */
1345 	blob->data_ro = true;
1346 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1347 	poll_threads();
1348 	CU_ASSERT(g_bserrno == -EPERM);
1349 	blob->data_ro = false;
1350 
1351 	/* Write to the blob */
1352 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1353 	poll_threads();
1354 	CU_ASSERT(g_bserrno == 0);
1355 
1356 	/* Write starting beyond the end */
1357 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1358 			   NULL);
1359 	poll_threads();
1360 	CU_ASSERT(g_bserrno == -EINVAL);
1361 
1362 	/* Write starting at a valid location but going off the end */
1363 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1364 			   blob_op_complete, NULL);
1365 	poll_threads();
1366 	CU_ASSERT(g_bserrno == -EINVAL);
1367 
1368 	spdk_bs_free_io_channel(channel);
1369 	poll_threads();
1370 }
1371 
1372 static void
1373 blob_read(void)
1374 {
1375 	struct spdk_blob_store *bs = g_bs;
1376 	struct spdk_blob *blob = g_blob;
1377 	struct spdk_io_channel *channel;
1378 	uint64_t pages_per_cluster;
1379 	uint8_t payload[10 * 4096];
1380 
1381 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1382 
1383 	channel = spdk_bs_alloc_io_channel(bs);
1384 	CU_ASSERT(channel != NULL);
1385 
1386 	/* Read from a blob with 0 size */
1387 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1388 	poll_threads();
1389 	CU_ASSERT(g_bserrno == -EINVAL);
1390 
1391 	/* Resize the blob */
1392 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1393 	poll_threads();
1394 	CU_ASSERT(g_bserrno == 0);
1395 
1396 	/* Confirm that read passes if blob is marked read-only. */
1397 	blob->data_ro = true;
1398 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1399 	poll_threads();
1400 	CU_ASSERT(g_bserrno == 0);
1401 	blob->data_ro = false;
1402 
1403 	/* Read from the blob */
1404 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1405 	poll_threads();
1406 	CU_ASSERT(g_bserrno == 0);
1407 
1408 	/* Read starting beyond the end */
1409 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1410 			  NULL);
1411 	poll_threads();
1412 	CU_ASSERT(g_bserrno == -EINVAL);
1413 
1414 	/* Read starting at a valid location but going off the end */
1415 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1416 			  blob_op_complete, NULL);
1417 	poll_threads();
1418 	CU_ASSERT(g_bserrno == -EINVAL);
1419 
1420 	spdk_bs_free_io_channel(channel);
1421 	poll_threads();
1422 }
1423 
1424 static void
1425 blob_rw_verify(void)
1426 {
1427 	struct spdk_blob_store *bs = g_bs;
1428 	struct spdk_blob *blob = g_blob;
1429 	struct spdk_io_channel *channel;
1430 	uint8_t payload_read[10 * 4096];
1431 	uint8_t payload_write[10 * 4096];
1432 
1433 	channel = spdk_bs_alloc_io_channel(bs);
1434 	CU_ASSERT(channel != NULL);
1435 
1436 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1437 	poll_threads();
1438 	CU_ASSERT(g_bserrno == 0);
1439 
1440 	memset(payload_write, 0xE5, sizeof(payload_write));
1441 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1442 	poll_threads();
1443 	CU_ASSERT(g_bserrno == 0);
1444 
1445 	memset(payload_read, 0x00, sizeof(payload_read));
1446 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1447 	poll_threads();
1448 	CU_ASSERT(g_bserrno == 0);
1449 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1450 
1451 	spdk_bs_free_io_channel(channel);
1452 	poll_threads();
1453 }
1454 
1455 static void
1456 blob_rw_verify_iov(void)
1457 {
1458 	struct spdk_blob_store *bs = g_bs;
1459 	struct spdk_blob *blob;
1460 	struct spdk_io_channel *channel;
1461 	uint8_t payload_read[10 * 4096];
1462 	uint8_t payload_write[10 * 4096];
1463 	struct iovec iov_read[3];
1464 	struct iovec iov_write[3];
1465 	void *buf;
1466 
1467 	channel = spdk_bs_alloc_io_channel(bs);
1468 	CU_ASSERT(channel != NULL);
1469 
1470 	blob = ut_blob_create_and_open(bs, NULL);
1471 
1472 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1473 	poll_threads();
1474 	CU_ASSERT(g_bserrno == 0);
1475 
1476 	/*
1477 	 * Manually adjust the offset of the blob's second cluster.  This allows
1478 	 *  us to make sure that the readv/write code correctly accounts for I/O
1479 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1480 	 *  clusters are where we expect before modifying the second cluster.
1481 	 */
1482 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1483 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1484 	blob->active.clusters[1] = 3 * 256;
1485 
1486 	memset(payload_write, 0xE5, sizeof(payload_write));
1487 	iov_write[0].iov_base = payload_write;
1488 	iov_write[0].iov_len = 1 * 4096;
1489 	iov_write[1].iov_base = payload_write + 1 * 4096;
1490 	iov_write[1].iov_len = 5 * 4096;
1491 	iov_write[2].iov_base = payload_write + 6 * 4096;
1492 	iov_write[2].iov_len = 4 * 4096;
1493 	/*
1494 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1495 	 *  will get written to the first cluster, the last 4 to the second cluster.
1496 	 */
1497 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1498 	poll_threads();
1499 	CU_ASSERT(g_bserrno == 0);
1500 
1501 	memset(payload_read, 0xAA, sizeof(payload_read));
1502 	iov_read[0].iov_base = payload_read;
1503 	iov_read[0].iov_len = 3 * 4096;
1504 	iov_read[1].iov_base = payload_read + 3 * 4096;
1505 	iov_read[1].iov_len = 4 * 4096;
1506 	iov_read[2].iov_base = payload_read + 7 * 4096;
1507 	iov_read[2].iov_len = 3 * 4096;
1508 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1509 	poll_threads();
1510 	CU_ASSERT(g_bserrno == 0);
1511 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1512 
1513 	buf = calloc(1, 256 * 4096);
1514 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1515 	/* Check that cluster 2 on "disk" was not modified. */
1516 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1517 	free(buf);
1518 
1519 	spdk_blob_close(blob, blob_op_complete, NULL);
1520 	poll_threads();
1521 	CU_ASSERT(g_bserrno == 0);
1522 
1523 	spdk_bs_free_io_channel(channel);
1524 	poll_threads();
1525 }
1526 
1527 static uint32_t
1528 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1529 {
1530 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1531 	struct spdk_bs_request_set *set;
1532 	uint32_t count = 0;
1533 
1534 	TAILQ_FOREACH(set, &channel->reqs, link) {
1535 		count++;
1536 	}
1537 
1538 	return count;
1539 }
1540 
1541 static void
1542 blob_rw_verify_iov_nomem(void)
1543 {
1544 	struct spdk_blob_store *bs = g_bs;
1545 	struct spdk_blob *blob = g_blob;
1546 	struct spdk_io_channel *channel;
1547 	uint8_t payload_write[10 * 4096];
1548 	struct iovec iov_write[3];
1549 	uint32_t req_count;
1550 
1551 	channel = spdk_bs_alloc_io_channel(bs);
1552 	CU_ASSERT(channel != NULL);
1553 
1554 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1555 	poll_threads();
1556 	CU_ASSERT(g_bserrno == 0);
1557 
1558 	/*
1559 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1560 	 *  will get written to the first cluster, the last 4 to the second cluster.
1561 	 */
1562 	iov_write[0].iov_base = payload_write;
1563 	iov_write[0].iov_len = 1 * 4096;
1564 	iov_write[1].iov_base = payload_write + 1 * 4096;
1565 	iov_write[1].iov_len = 5 * 4096;
1566 	iov_write[2].iov_base = payload_write + 6 * 4096;
1567 	iov_write[2].iov_len = 4 * 4096;
1568 	MOCK_SET(calloc, NULL);
1569 	req_count = bs_channel_get_req_count(channel);
1570 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1571 	poll_threads();
1572 	CU_ASSERT(g_bserrno = -ENOMEM);
1573 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1574 	MOCK_CLEAR(calloc);
1575 
1576 	spdk_bs_free_io_channel(channel);
1577 	poll_threads();
1578 }
1579 
1580 static void
1581 blob_rw_iov_read_only(void)
1582 {
1583 	struct spdk_blob_store *bs = g_bs;
1584 	struct spdk_blob *blob = g_blob;
1585 	struct spdk_io_channel *channel;
1586 	uint8_t payload_read[4096];
1587 	uint8_t payload_write[4096];
1588 	struct iovec iov_read;
1589 	struct iovec iov_write;
1590 
1591 	channel = spdk_bs_alloc_io_channel(bs);
1592 	CU_ASSERT(channel != NULL);
1593 
1594 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1595 	poll_threads();
1596 	CU_ASSERT(g_bserrno == 0);
1597 
1598 	/* Verify that writev failed if read_only flag is set. */
1599 	blob->data_ro = true;
1600 	iov_write.iov_base = payload_write;
1601 	iov_write.iov_len = sizeof(payload_write);
1602 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1603 	poll_threads();
1604 	CU_ASSERT(g_bserrno == -EPERM);
1605 
1606 	/* Verify that reads pass if data_ro flag is set. */
1607 	iov_read.iov_base = payload_read;
1608 	iov_read.iov_len = sizeof(payload_read);
1609 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1610 	poll_threads();
1611 	CU_ASSERT(g_bserrno == 0);
1612 
1613 	spdk_bs_free_io_channel(channel);
1614 	poll_threads();
1615 }
1616 
1617 static void
1618 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1619 		       uint8_t *payload, uint64_t offset, uint64_t length,
1620 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1621 {
1622 	uint64_t i;
1623 	uint8_t *buf;
1624 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1625 
1626 	/* To be sure that operation is NOT split, read one page at the time */
1627 	buf = payload;
1628 	for (i = 0; i < length; i++) {
1629 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1630 		poll_threads();
1631 		if (g_bserrno != 0) {
1632 			/* Pass the error code up */
1633 			break;
1634 		}
1635 		buf += page_size;
1636 	}
1637 
1638 	cb_fn(cb_arg, g_bserrno);
1639 }
1640 
1641 static void
1642 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1643 			uint8_t *payload, uint64_t offset, uint64_t length,
1644 			spdk_blob_op_complete cb_fn, void *cb_arg)
1645 {
1646 	uint64_t i;
1647 	uint8_t *buf;
1648 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1649 
1650 	/* To be sure that operation is NOT split, write one page at the time */
1651 	buf = payload;
1652 	for (i = 0; i < length; i++) {
1653 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1654 		poll_threads();
1655 		if (g_bserrno != 0) {
1656 			/* Pass the error code up */
1657 			break;
1658 		}
1659 		buf += page_size;
1660 	}
1661 
1662 	cb_fn(cb_arg, g_bserrno);
1663 }
1664 
1665 static void
1666 blob_operation_split_rw(void)
1667 {
1668 	struct spdk_blob_store *bs = g_bs;
1669 	struct spdk_blob *blob;
1670 	struct spdk_io_channel *channel;
1671 	struct spdk_blob_opts opts;
1672 	uint64_t cluster_size;
1673 
1674 	uint64_t payload_size;
1675 	uint8_t *payload_read;
1676 	uint8_t *payload_write;
1677 	uint8_t *payload_pattern;
1678 
1679 	uint64_t page_size;
1680 	uint64_t pages_per_cluster;
1681 	uint64_t pages_per_payload;
1682 
1683 	uint64_t i;
1684 
1685 	cluster_size = spdk_bs_get_cluster_size(bs);
1686 	page_size = spdk_bs_get_page_size(bs);
1687 	pages_per_cluster = cluster_size / page_size;
1688 	pages_per_payload = pages_per_cluster * 5;
1689 	payload_size = cluster_size * 5;
1690 
1691 	payload_read = malloc(payload_size);
1692 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1693 
1694 	payload_write = malloc(payload_size);
1695 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1696 
1697 	payload_pattern = malloc(payload_size);
1698 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1699 
1700 	/* Prepare random pattern to write */
1701 	memset(payload_pattern, 0xFF, payload_size);
1702 	for (i = 0; i < pages_per_payload; i++) {
1703 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1704 	}
1705 
1706 	channel = spdk_bs_alloc_io_channel(bs);
1707 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1708 
1709 	/* Create blob */
1710 	ut_spdk_blob_opts_init(&opts);
1711 	opts.thin_provision = false;
1712 	opts.num_clusters = 5;
1713 
1714 	blob = ut_blob_create_and_open(bs, &opts);
1715 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1716 
1717 	/* Initial read should return zeroed payload */
1718 	memset(payload_read, 0xFF, payload_size);
1719 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1720 	poll_threads();
1721 	CU_ASSERT(g_bserrno == 0);
1722 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1723 
1724 	/* Fill whole blob except last page */
1725 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1726 			   blob_op_complete, NULL);
1727 	poll_threads();
1728 	CU_ASSERT(g_bserrno == 0);
1729 
1730 	/* Write last page with a pattern */
1731 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1732 			   blob_op_complete, NULL);
1733 	poll_threads();
1734 	CU_ASSERT(g_bserrno == 0);
1735 
1736 	/* Read whole blob and check consistency */
1737 	memset(payload_read, 0xFF, payload_size);
1738 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1739 	poll_threads();
1740 	CU_ASSERT(g_bserrno == 0);
1741 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1742 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1743 
1744 	/* Fill whole blob except first page */
1745 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1746 			   blob_op_complete, NULL);
1747 	poll_threads();
1748 	CU_ASSERT(g_bserrno == 0);
1749 
1750 	/* Write first page with a pattern */
1751 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1752 			   blob_op_complete, NULL);
1753 	poll_threads();
1754 	CU_ASSERT(g_bserrno == 0);
1755 
1756 	/* Read whole blob and check consistency */
1757 	memset(payload_read, 0xFF, payload_size);
1758 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1759 	poll_threads();
1760 	CU_ASSERT(g_bserrno == 0);
1761 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1762 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1763 
1764 
1765 	/* Fill whole blob with a pattern (5 clusters) */
1766 
1767 	/* 1. Read test. */
1768 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1769 				blob_op_complete, NULL);
1770 	poll_threads();
1771 	CU_ASSERT(g_bserrno == 0);
1772 
1773 	memset(payload_read, 0xFF, payload_size);
1774 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1775 	poll_threads();
1776 	poll_threads();
1777 	CU_ASSERT(g_bserrno == 0);
1778 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1779 
1780 	/* 2. Write test. */
1781 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1782 			   blob_op_complete, NULL);
1783 	poll_threads();
1784 	CU_ASSERT(g_bserrno == 0);
1785 
1786 	memset(payload_read, 0xFF, payload_size);
1787 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1788 	poll_threads();
1789 	CU_ASSERT(g_bserrno == 0);
1790 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1791 
1792 	spdk_bs_free_io_channel(channel);
1793 	poll_threads();
1794 
1795 	g_blob = NULL;
1796 	g_blobid = 0;
1797 
1798 	free(payload_read);
1799 	free(payload_write);
1800 	free(payload_pattern);
1801 
1802 	ut_blob_close_and_delete(bs, blob);
1803 }
1804 
1805 static void
1806 blob_operation_split_rw_iov(void)
1807 {
1808 	struct spdk_blob_store *bs = g_bs;
1809 	struct spdk_blob *blob;
1810 	struct spdk_io_channel *channel;
1811 	struct spdk_blob_opts opts;
1812 	uint64_t cluster_size;
1813 
1814 	uint64_t payload_size;
1815 	uint8_t *payload_read;
1816 	uint8_t *payload_write;
1817 	uint8_t *payload_pattern;
1818 
1819 	uint64_t page_size;
1820 	uint64_t pages_per_cluster;
1821 	uint64_t pages_per_payload;
1822 
1823 	struct iovec iov_read[2];
1824 	struct iovec iov_write[2];
1825 
1826 	uint64_t i, j;
1827 
1828 	cluster_size = spdk_bs_get_cluster_size(bs);
1829 	page_size = spdk_bs_get_page_size(bs);
1830 	pages_per_cluster = cluster_size / page_size;
1831 	pages_per_payload = pages_per_cluster * 5;
1832 	payload_size = cluster_size * 5;
1833 
1834 	payload_read = malloc(payload_size);
1835 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1836 
1837 	payload_write = malloc(payload_size);
1838 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1839 
1840 	payload_pattern = malloc(payload_size);
1841 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1842 
1843 	/* Prepare random pattern to write */
1844 	for (i = 0; i < pages_per_payload; i++) {
1845 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1846 			uint64_t *tmp;
1847 
1848 			tmp = (uint64_t *)payload_pattern;
1849 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1850 			*tmp = i + 1;
1851 		}
1852 	}
1853 
1854 	channel = spdk_bs_alloc_io_channel(bs);
1855 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1856 
1857 	/* Create blob */
1858 	ut_spdk_blob_opts_init(&opts);
1859 	opts.thin_provision = false;
1860 	opts.num_clusters = 5;
1861 
1862 	blob = ut_blob_create_and_open(bs, &opts);
1863 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1864 
1865 	/* Initial read should return zeroes payload */
1866 	memset(payload_read, 0xFF, payload_size);
1867 	iov_read[0].iov_base = payload_read;
1868 	iov_read[0].iov_len = cluster_size * 3;
1869 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1870 	iov_read[1].iov_len = cluster_size * 2;
1871 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1872 	poll_threads();
1873 	CU_ASSERT(g_bserrno == 0);
1874 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1875 
1876 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1877 	 *  with a pattern. */
1878 	iov_write[0].iov_base = payload_pattern;
1879 	iov_write[0].iov_len = payload_size - page_size;
1880 	iov_write[1].iov_base = payload_pattern;
1881 	iov_write[1].iov_len = page_size;
1882 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1883 	poll_threads();
1884 	CU_ASSERT(g_bserrno == 0);
1885 
1886 	/* Read whole blob and check consistency */
1887 	memset(payload_read, 0xFF, payload_size);
1888 	iov_read[0].iov_base = payload_read;
1889 	iov_read[0].iov_len = cluster_size * 2;
1890 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1891 	iov_read[1].iov_len = cluster_size * 3;
1892 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1893 	poll_threads();
1894 	CU_ASSERT(g_bserrno == 0);
1895 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1896 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1897 
1898 	/* First of iovs fills only first page and second of iovs writes whole blob except
1899 	 *  first page with a pattern. */
1900 	iov_write[0].iov_base = payload_pattern;
1901 	iov_write[0].iov_len = page_size;
1902 	iov_write[1].iov_base = payload_pattern;
1903 	iov_write[1].iov_len = payload_size - page_size;
1904 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1905 	poll_threads();
1906 	CU_ASSERT(g_bserrno == 0);
1907 
1908 	/* Read whole blob and check consistency */
1909 	memset(payload_read, 0xFF, payload_size);
1910 	iov_read[0].iov_base = payload_read;
1911 	iov_read[0].iov_len = cluster_size * 4;
1912 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1913 	iov_read[1].iov_len = cluster_size;
1914 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1915 	poll_threads();
1916 	CU_ASSERT(g_bserrno == 0);
1917 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1918 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1919 
1920 
1921 	/* Fill whole blob with a pattern (5 clusters) */
1922 
1923 	/* 1. Read test. */
1924 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1925 				blob_op_complete, NULL);
1926 	poll_threads();
1927 	CU_ASSERT(g_bserrno == 0);
1928 
1929 	memset(payload_read, 0xFF, payload_size);
1930 	iov_read[0].iov_base = payload_read;
1931 	iov_read[0].iov_len = cluster_size;
1932 	iov_read[1].iov_base = payload_read + cluster_size;
1933 	iov_read[1].iov_len = cluster_size * 4;
1934 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1935 	poll_threads();
1936 	CU_ASSERT(g_bserrno == 0);
1937 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1938 
1939 	/* 2. Write test. */
1940 	iov_write[0].iov_base = payload_read;
1941 	iov_write[0].iov_len = cluster_size * 2;
1942 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1943 	iov_write[1].iov_len = cluster_size * 3;
1944 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1945 	poll_threads();
1946 	CU_ASSERT(g_bserrno == 0);
1947 
1948 	memset(payload_read, 0xFF, payload_size);
1949 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1950 	poll_threads();
1951 	CU_ASSERT(g_bserrno == 0);
1952 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1953 
1954 	spdk_bs_free_io_channel(channel);
1955 	poll_threads();
1956 
1957 	g_blob = NULL;
1958 	g_blobid = 0;
1959 
1960 	free(payload_read);
1961 	free(payload_write);
1962 	free(payload_pattern);
1963 
1964 	ut_blob_close_and_delete(bs, blob);
1965 }
1966 
1967 static void
1968 blob_unmap(void)
1969 {
1970 	struct spdk_blob_store *bs = g_bs;
1971 	struct spdk_blob *blob;
1972 	struct spdk_io_channel *channel;
1973 	struct spdk_blob_opts opts;
1974 	uint8_t payload[4096];
1975 	int i;
1976 
1977 	channel = spdk_bs_alloc_io_channel(bs);
1978 	CU_ASSERT(channel != NULL);
1979 
1980 	ut_spdk_blob_opts_init(&opts);
1981 	opts.num_clusters = 10;
1982 
1983 	blob = ut_blob_create_and_open(bs, &opts);
1984 
1985 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1986 	poll_threads();
1987 	CU_ASSERT(g_bserrno == 0);
1988 
1989 	memset(payload, 0, sizeof(payload));
1990 	payload[0] = 0xFF;
1991 
1992 	/*
1993 	 * Set first byte of every cluster to 0xFF.
1994 	 * First cluster on device is reserved so let's start from cluster number 1
1995 	 */
1996 	for (i = 1; i < 11; i++) {
1997 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1998 	}
1999 
2000 	/* Confirm writes */
2001 	for (i = 0; i < 10; i++) {
2002 		payload[0] = 0;
2003 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
2004 				  blob_op_complete, NULL);
2005 		poll_threads();
2006 		CU_ASSERT(g_bserrno == 0);
2007 		CU_ASSERT(payload[0] == 0xFF);
2008 	}
2009 
2010 	/* Mark some clusters as unallocated */
2011 	blob->active.clusters[1] = 0;
2012 	blob->active.clusters[2] = 0;
2013 	blob->active.clusters[3] = 0;
2014 	blob->active.clusters[6] = 0;
2015 	blob->active.clusters[8] = 0;
2016 
2017 	/* Unmap clusters by resizing to 0 */
2018 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2019 	poll_threads();
2020 	CU_ASSERT(g_bserrno == 0);
2021 
2022 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2023 	poll_threads();
2024 	CU_ASSERT(g_bserrno == 0);
2025 
2026 	/* Confirm that only 'allocated' clusters were unmapped */
2027 	for (i = 1; i < 11; i++) {
2028 		switch (i) {
2029 		case 2:
2030 		case 3:
2031 		case 4:
2032 		case 7:
2033 		case 9:
2034 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2035 			break;
2036 		default:
2037 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2038 			break;
2039 		}
2040 	}
2041 
2042 	spdk_bs_free_io_channel(channel);
2043 	poll_threads();
2044 
2045 	ut_blob_close_and_delete(bs, blob);
2046 }
2047 
2048 static void
2049 blob_iter(void)
2050 {
2051 	struct spdk_blob_store *bs = g_bs;
2052 	struct spdk_blob *blob;
2053 	spdk_blob_id blobid;
2054 	struct spdk_blob_opts blob_opts;
2055 
2056 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2057 	poll_threads();
2058 	CU_ASSERT(g_blob == NULL);
2059 	CU_ASSERT(g_bserrno == -ENOENT);
2060 
2061 	ut_spdk_blob_opts_init(&blob_opts);
2062 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2063 	poll_threads();
2064 	CU_ASSERT(g_bserrno == 0);
2065 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2066 	blobid = g_blobid;
2067 
2068 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2069 	poll_threads();
2070 	CU_ASSERT(g_blob != NULL);
2071 	CU_ASSERT(g_bserrno == 0);
2072 	blob = g_blob;
2073 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2074 
2075 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2076 	poll_threads();
2077 	CU_ASSERT(g_blob == NULL);
2078 	CU_ASSERT(g_bserrno == -ENOENT);
2079 }
2080 
2081 static void
2082 blob_xattr(void)
2083 {
2084 	struct spdk_blob_store *bs = g_bs;
2085 	struct spdk_blob *blob = g_blob;
2086 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2087 	uint64_t length;
2088 	int rc;
2089 	const char *name1, *name2;
2090 	const void *value;
2091 	size_t value_len;
2092 	struct spdk_xattr_names *names;
2093 
2094 	/* Test that set_xattr fails if md_ro flag is set. */
2095 	blob->md_ro = true;
2096 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2097 	CU_ASSERT(rc == -EPERM);
2098 
2099 	blob->md_ro = false;
2100 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2101 	CU_ASSERT(rc == 0);
2102 
2103 	length = 2345;
2104 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2105 	CU_ASSERT(rc == 0);
2106 
2107 	/* Overwrite "length" xattr. */
2108 	length = 3456;
2109 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2110 	CU_ASSERT(rc == 0);
2111 
2112 	/* get_xattr should still work even if md_ro flag is set. */
2113 	value = NULL;
2114 	blob->md_ro = true;
2115 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2116 	CU_ASSERT(rc == 0);
2117 	SPDK_CU_ASSERT_FATAL(value != NULL);
2118 	CU_ASSERT(*(uint64_t *)value == length);
2119 	CU_ASSERT(value_len == 8);
2120 	blob->md_ro = false;
2121 
2122 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2123 	CU_ASSERT(rc == -ENOENT);
2124 
2125 	names = NULL;
2126 	rc = spdk_blob_get_xattr_names(blob, &names);
2127 	CU_ASSERT(rc == 0);
2128 	SPDK_CU_ASSERT_FATAL(names != NULL);
2129 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2130 	name1 = spdk_xattr_names_get_name(names, 0);
2131 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2132 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2133 	name2 = spdk_xattr_names_get_name(names, 1);
2134 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2135 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2136 	CU_ASSERT(strcmp(name1, name2));
2137 	spdk_xattr_names_free(names);
2138 
2139 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2140 	blob->md_ro = true;
2141 	rc = spdk_blob_remove_xattr(blob, "name");
2142 	CU_ASSERT(rc == -EPERM);
2143 
2144 	blob->md_ro = false;
2145 	rc = spdk_blob_remove_xattr(blob, "name");
2146 	CU_ASSERT(rc == 0);
2147 
2148 	rc = spdk_blob_remove_xattr(blob, "foobar");
2149 	CU_ASSERT(rc == -ENOENT);
2150 
2151 	/* Set internal xattr */
2152 	length = 7898;
2153 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2154 	CU_ASSERT(rc == 0);
2155 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2156 	CU_ASSERT(rc == 0);
2157 	CU_ASSERT(*(uint64_t *)value == length);
2158 	/* try to get public xattr with same name */
2159 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2160 	CU_ASSERT(rc != 0);
2161 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2162 	CU_ASSERT(rc != 0);
2163 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2164 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2165 		  SPDK_BLOB_INTERNAL_XATTR);
2166 
2167 	spdk_blob_close(blob, blob_op_complete, NULL);
2168 	poll_threads();
2169 
2170 	/* Check if xattrs are persisted */
2171 	ut_bs_reload(&bs, NULL);
2172 
2173 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2174 	poll_threads();
2175 	CU_ASSERT(g_bserrno == 0);
2176 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2177 	blob = g_blob;
2178 
2179 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2180 	CU_ASSERT(rc == 0);
2181 	CU_ASSERT(*(uint64_t *)value == length);
2182 
2183 	/* try to get internal xattr trough public call */
2184 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2185 	CU_ASSERT(rc != 0);
2186 
2187 	rc = blob_remove_xattr(blob, "internal", true);
2188 	CU_ASSERT(rc == 0);
2189 
2190 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2191 }
2192 
2193 static void
2194 blob_parse_md(void)
2195 {
2196 	struct spdk_blob_store *bs = g_bs;
2197 	struct spdk_blob *blob;
2198 	int rc;
2199 	uint32_t used_pages;
2200 	size_t xattr_length;
2201 	char *xattr;
2202 
2203 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2204 	blob = ut_blob_create_and_open(bs, NULL);
2205 
2206 	/* Create large extent to force more than 1 page of metadata. */
2207 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2208 		       strlen("large_xattr");
2209 	xattr = calloc(xattr_length, sizeof(char));
2210 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2211 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2212 	free(xattr);
2213 	SPDK_CU_ASSERT_FATAL(rc == 0);
2214 
2215 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2216 	poll_threads();
2217 
2218 	/* Delete the blob and verify that number of pages returned to before its creation. */
2219 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2220 	ut_blob_close_and_delete(bs, blob);
2221 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2222 }
2223 
2224 static void
2225 bs_load(void)
2226 {
2227 	struct spdk_blob_store *bs;
2228 	struct spdk_bs_dev *dev;
2229 	spdk_blob_id blobid;
2230 	struct spdk_blob *blob;
2231 	struct spdk_bs_super_block *super_block;
2232 	uint64_t length;
2233 	int rc;
2234 	const void *value;
2235 	size_t value_len;
2236 	struct spdk_bs_opts opts;
2237 	struct spdk_blob_opts blob_opts;
2238 
2239 	dev = init_dev();
2240 	spdk_bs_opts_init(&opts, sizeof(opts));
2241 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2242 
2243 	/* Initialize a new blob store */
2244 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2245 	poll_threads();
2246 	CU_ASSERT(g_bserrno == 0);
2247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2248 	bs = g_bs;
2249 
2250 	/* Try to open a blobid that does not exist */
2251 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2252 	poll_threads();
2253 	CU_ASSERT(g_bserrno == -ENOENT);
2254 	CU_ASSERT(g_blob == NULL);
2255 
2256 	/* Create a blob */
2257 	blob = ut_blob_create_and_open(bs, NULL);
2258 	blobid = spdk_blob_get_id(blob);
2259 
2260 	/* Try again to open valid blob but without the upper bit set */
2261 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2262 	poll_threads();
2263 	CU_ASSERT(g_bserrno == -ENOENT);
2264 	CU_ASSERT(g_blob == NULL);
2265 
2266 	/* Set some xattrs */
2267 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2268 	CU_ASSERT(rc == 0);
2269 
2270 	length = 2345;
2271 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2272 	CU_ASSERT(rc == 0);
2273 
2274 	/* Resize the blob */
2275 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2276 	poll_threads();
2277 	CU_ASSERT(g_bserrno == 0);
2278 
2279 	spdk_blob_close(blob, blob_op_complete, NULL);
2280 	poll_threads();
2281 	CU_ASSERT(g_bserrno == 0);
2282 	blob = NULL;
2283 	g_blob = NULL;
2284 	g_blobid = SPDK_BLOBID_INVALID;
2285 
2286 	/* Unload the blob store */
2287 	spdk_bs_unload(bs, bs_op_complete, NULL);
2288 	poll_threads();
2289 	CU_ASSERT(g_bserrno == 0);
2290 	g_bs = NULL;
2291 	g_blob = NULL;
2292 	g_blobid = 0;
2293 
2294 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2295 	CU_ASSERT(super_block->clean == 1);
2296 
2297 	/* Load should fail for device with an unsupported blocklen */
2298 	dev = init_dev();
2299 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2300 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2301 	poll_threads();
2302 	CU_ASSERT(g_bserrno == -EINVAL);
2303 
2304 	/* Load should when max_md_ops is set to zero */
2305 	dev = init_dev();
2306 	spdk_bs_opts_init(&opts, sizeof(opts));
2307 	opts.max_md_ops = 0;
2308 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2309 	poll_threads();
2310 	CU_ASSERT(g_bserrno == -EINVAL);
2311 
2312 	/* Load should when max_channel_ops is set to zero */
2313 	dev = init_dev();
2314 	spdk_bs_opts_init(&opts, sizeof(opts));
2315 	opts.max_channel_ops = 0;
2316 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2317 	poll_threads();
2318 	CU_ASSERT(g_bserrno == -EINVAL);
2319 
2320 	/* Load an existing blob store */
2321 	dev = init_dev();
2322 	spdk_bs_opts_init(&opts, sizeof(opts));
2323 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2324 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2325 	poll_threads();
2326 	CU_ASSERT(g_bserrno == 0);
2327 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2328 	bs = g_bs;
2329 
2330 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2331 	CU_ASSERT(super_block->clean == 1);
2332 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2333 
2334 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2335 	poll_threads();
2336 	CU_ASSERT(g_bserrno == 0);
2337 	CU_ASSERT(g_blob != NULL);
2338 	blob = g_blob;
2339 
2340 	/* Verify that blobstore is marked dirty after first metadata sync */
2341 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2342 	CU_ASSERT(super_block->clean == 1);
2343 
2344 	/* Get the xattrs */
2345 	value = NULL;
2346 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2347 	CU_ASSERT(rc == 0);
2348 	SPDK_CU_ASSERT_FATAL(value != NULL);
2349 	CU_ASSERT(*(uint64_t *)value == length);
2350 	CU_ASSERT(value_len == 8);
2351 
2352 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2353 	CU_ASSERT(rc == -ENOENT);
2354 
2355 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2356 
2357 	spdk_blob_close(blob, blob_op_complete, NULL);
2358 	poll_threads();
2359 	CU_ASSERT(g_bserrno == 0);
2360 	blob = NULL;
2361 	g_blob = NULL;
2362 
2363 	spdk_bs_unload(bs, bs_op_complete, NULL);
2364 	poll_threads();
2365 	CU_ASSERT(g_bserrno == 0);
2366 	g_bs = NULL;
2367 
2368 	/* Load should fail: bdev size < saved size */
2369 	dev = init_dev();
2370 	dev->blockcnt /= 2;
2371 
2372 	spdk_bs_opts_init(&opts, sizeof(opts));
2373 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2374 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2375 	poll_threads();
2376 
2377 	CU_ASSERT(g_bserrno == -EILSEQ);
2378 
2379 	/* Load should succeed: bdev size > saved size */
2380 	dev = init_dev();
2381 	dev->blockcnt *= 4;
2382 
2383 	spdk_bs_opts_init(&opts, sizeof(opts));
2384 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2385 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2386 	poll_threads();
2387 	CU_ASSERT(g_bserrno == 0);
2388 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2389 	bs = g_bs;
2390 
2391 	CU_ASSERT(g_bserrno == 0);
2392 	spdk_bs_unload(bs, bs_op_complete, NULL);
2393 	poll_threads();
2394 
2395 
2396 	/* Test compatibility mode */
2397 
2398 	dev = init_dev();
2399 	super_block->size = 0;
2400 	super_block->crc = blob_md_page_calc_crc(super_block);
2401 
2402 	spdk_bs_opts_init(&opts, sizeof(opts));
2403 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2404 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2405 	poll_threads();
2406 	CU_ASSERT(g_bserrno == 0);
2407 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2408 	bs = g_bs;
2409 
2410 	/* Create a blob */
2411 	ut_spdk_blob_opts_init(&blob_opts);
2412 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2413 	poll_threads();
2414 	CU_ASSERT(g_bserrno == 0);
2415 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2416 
2417 	/* Blobstore should update number of blocks in super_block */
2418 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2419 	CU_ASSERT(super_block->clean == 0);
2420 
2421 	spdk_bs_unload(bs, bs_op_complete, NULL);
2422 	poll_threads();
2423 	CU_ASSERT(g_bserrno == 0);
2424 	CU_ASSERT(super_block->clean == 1);
2425 	g_bs = NULL;
2426 
2427 }
2428 
2429 static void
2430 bs_load_pending_removal(void)
2431 {
2432 	struct spdk_blob_store *bs = g_bs;
2433 	struct spdk_blob_opts opts;
2434 	struct spdk_blob *blob, *snapshot;
2435 	spdk_blob_id blobid, snapshotid;
2436 	const void *value;
2437 	size_t value_len;
2438 	int rc;
2439 
2440 	/* Create blob */
2441 	ut_spdk_blob_opts_init(&opts);
2442 	opts.num_clusters = 10;
2443 
2444 	blob = ut_blob_create_and_open(bs, &opts);
2445 	blobid = spdk_blob_get_id(blob);
2446 
2447 	/* Create snapshot */
2448 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2449 	poll_threads();
2450 	CU_ASSERT(g_bserrno == 0);
2451 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2452 	snapshotid = g_blobid;
2453 
2454 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2455 	poll_threads();
2456 	CU_ASSERT(g_bserrno == 0);
2457 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2458 	snapshot = g_blob;
2459 
2460 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2461 	snapshot->md_ro = false;
2462 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2463 	CU_ASSERT(rc == 0);
2464 	snapshot->md_ro = true;
2465 
2466 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2467 	poll_threads();
2468 	CU_ASSERT(g_bserrno == 0);
2469 
2470 	spdk_blob_close(blob, blob_op_complete, NULL);
2471 	poll_threads();
2472 	CU_ASSERT(g_bserrno == 0);
2473 
2474 	/* Reload blobstore */
2475 	ut_bs_reload(&bs, NULL);
2476 
2477 	/* Snapshot should not be removed as blob is still pointing to it */
2478 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2479 	poll_threads();
2480 	CU_ASSERT(g_bserrno == 0);
2481 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2482 	snapshot = g_blob;
2483 
2484 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2485 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2486 	CU_ASSERT(rc != 0);
2487 
2488 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2489 	snapshot->md_ro = false;
2490 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2491 	CU_ASSERT(rc == 0);
2492 	snapshot->md_ro = true;
2493 
2494 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2495 	poll_threads();
2496 	CU_ASSERT(g_bserrno == 0);
2497 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2498 	blob = g_blob;
2499 
2500 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2501 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2502 
2503 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2504 	poll_threads();
2505 	CU_ASSERT(g_bserrno == 0);
2506 
2507 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2508 	poll_threads();
2509 	CU_ASSERT(g_bserrno == 0);
2510 
2511 	spdk_blob_close(blob, blob_op_complete, NULL);
2512 	poll_threads();
2513 	CU_ASSERT(g_bserrno == 0);
2514 
2515 	/* Reload blobstore */
2516 	ut_bs_reload(&bs, NULL);
2517 
2518 	/* Snapshot should be removed as blob is not pointing to it anymore */
2519 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2520 	poll_threads();
2521 	CU_ASSERT(g_bserrno != 0);
2522 }
2523 
2524 static void
2525 bs_load_custom_cluster_size(void)
2526 {
2527 	struct spdk_blob_store *bs;
2528 	struct spdk_bs_dev *dev;
2529 	struct spdk_bs_super_block *super_block;
2530 	struct spdk_bs_opts opts;
2531 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2532 	uint32_t cluster_sz;
2533 	uint64_t total_clusters;
2534 
2535 	dev = init_dev();
2536 	spdk_bs_opts_init(&opts, sizeof(opts));
2537 	opts.cluster_sz = custom_cluster_size;
2538 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2539 
2540 	/* Initialize a new blob store */
2541 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2542 	poll_threads();
2543 	CU_ASSERT(g_bserrno == 0);
2544 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2545 	bs = g_bs;
2546 	cluster_sz = bs->cluster_sz;
2547 	total_clusters = bs->total_clusters;
2548 
2549 	/* Unload the blob store */
2550 	spdk_bs_unload(bs, bs_op_complete, NULL);
2551 	poll_threads();
2552 	CU_ASSERT(g_bserrno == 0);
2553 	g_bs = NULL;
2554 	g_blob = NULL;
2555 	g_blobid = 0;
2556 
2557 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2558 	CU_ASSERT(super_block->clean == 1);
2559 
2560 	/* Load an existing blob store */
2561 	dev = init_dev();
2562 	spdk_bs_opts_init(&opts, sizeof(opts));
2563 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2564 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2565 	poll_threads();
2566 	CU_ASSERT(g_bserrno == 0);
2567 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2568 	bs = g_bs;
2569 	/* Compare cluster size and number to one after initialization */
2570 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2571 	CU_ASSERT(total_clusters == bs->total_clusters);
2572 
2573 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2574 	CU_ASSERT(super_block->clean == 1);
2575 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2576 
2577 	spdk_bs_unload(bs, bs_op_complete, NULL);
2578 	poll_threads();
2579 	CU_ASSERT(g_bserrno == 0);
2580 	CU_ASSERT(super_block->clean == 1);
2581 	g_bs = NULL;
2582 }
2583 
2584 static void
2585 bs_load_after_failed_grow(void)
2586 {
2587 	struct spdk_blob_store *bs;
2588 	struct spdk_bs_dev *dev;
2589 	struct spdk_bs_super_block *super_block;
2590 	struct spdk_bs_opts opts;
2591 	struct spdk_bs_md_mask *mask;
2592 	struct spdk_blob_opts blob_opts;
2593 	struct spdk_blob *blob, *snapshot;
2594 	spdk_blob_id blobid, snapshotid;
2595 	uint64_t total_data_clusters;
2596 
2597 	dev = init_dev();
2598 	spdk_bs_opts_init(&opts, sizeof(opts));
2599 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2600 	/*
2601 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2602 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2603 	 * thus the blobstore could grow to the double size.
2604 	 */
2605 	opts.num_md_pages = 128;
2606 
2607 	/* Initialize a new blob store */
2608 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2609 	poll_threads();
2610 	CU_ASSERT(g_bserrno == 0);
2611 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2612 	bs = g_bs;
2613 
2614 	/* Create blob */
2615 	ut_spdk_blob_opts_init(&blob_opts);
2616 	blob_opts.num_clusters = 10;
2617 
2618 	blob = ut_blob_create_and_open(bs, &blob_opts);
2619 	blobid = spdk_blob_get_id(blob);
2620 
2621 	/* Create snapshot */
2622 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2623 	poll_threads();
2624 	CU_ASSERT(g_bserrno == 0);
2625 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2626 	snapshotid = g_blobid;
2627 
2628 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2629 	poll_threads();
2630 	CU_ASSERT(g_bserrno == 0);
2631 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2632 	snapshot = g_blob;
2633 
2634 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2635 	poll_threads();
2636 	CU_ASSERT(g_bserrno == 0);
2637 
2638 	spdk_blob_close(blob, blob_op_complete, NULL);
2639 	poll_threads();
2640 	CU_ASSERT(g_bserrno == 0);
2641 
2642 	total_data_clusters = bs->total_data_clusters;
2643 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2644 
2645 	/* Unload the blob store */
2646 	spdk_bs_unload(bs, bs_op_complete, NULL);
2647 	poll_threads();
2648 	CU_ASSERT(g_bserrno == 0);
2649 	g_bs = NULL;
2650 	g_blob = NULL;
2651 	g_blobid = 0;
2652 
2653 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2654 	CU_ASSERT(super_block->clean == 1);
2655 
2656 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * 4096);
2657 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2658 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2659 
2660 	/*
2661 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2662 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2663 	 */
2664 	mask->length *= 2;
2665 
2666 	/* Load an existing blob store */
2667 	dev = init_dev();
2668 	dev->blockcnt *= 2;
2669 	spdk_bs_opts_init(&opts, sizeof(opts));
2670 	opts.clear_method = BS_CLEAR_WITH_NONE;
2671 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2672 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2673 	poll_threads();
2674 	CU_ASSERT(g_bserrno == 0);
2675 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2676 	bs = g_bs;
2677 
2678 	/* Check the capacity is the same as before */
2679 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2680 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2681 
2682 	/* Check the blob and the snapshot are still available */
2683 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2684 	poll_threads();
2685 	CU_ASSERT(g_bserrno == 0);
2686 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2687 	blob = g_blob;
2688 
2689 	spdk_blob_close(blob, blob_op_complete, NULL);
2690 	poll_threads();
2691 	CU_ASSERT(g_bserrno == 0);
2692 
2693 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2694 	poll_threads();
2695 	CU_ASSERT(g_bserrno == 0);
2696 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2697 	snapshot = g_blob;
2698 
2699 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2700 	poll_threads();
2701 	CU_ASSERT(g_bserrno == 0);
2702 
2703 	spdk_bs_unload(bs, bs_op_complete, NULL);
2704 	poll_threads();
2705 	CU_ASSERT(g_bserrno == 0);
2706 	CU_ASSERT(super_block->clean == 1);
2707 	g_bs = NULL;
2708 }
2709 
2710 static void
2711 bs_type(void)
2712 {
2713 	struct spdk_blob_store *bs;
2714 	struct spdk_bs_dev *dev;
2715 	struct spdk_bs_opts opts;
2716 
2717 	dev = init_dev();
2718 	spdk_bs_opts_init(&opts, sizeof(opts));
2719 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2720 
2721 	/* Initialize a new blob store */
2722 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2723 	poll_threads();
2724 	CU_ASSERT(g_bserrno == 0);
2725 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2726 	bs = g_bs;
2727 
2728 	/* Unload the blob store */
2729 	spdk_bs_unload(bs, bs_op_complete, NULL);
2730 	poll_threads();
2731 	CU_ASSERT(g_bserrno == 0);
2732 	g_bs = NULL;
2733 	g_blob = NULL;
2734 	g_blobid = 0;
2735 
2736 	/* Load non existing blobstore type */
2737 	dev = init_dev();
2738 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2739 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2740 	poll_threads();
2741 	CU_ASSERT(g_bserrno != 0);
2742 
2743 	/* Load with empty blobstore type */
2744 	dev = init_dev();
2745 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2746 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2747 	poll_threads();
2748 	CU_ASSERT(g_bserrno == 0);
2749 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2750 	bs = g_bs;
2751 
2752 	spdk_bs_unload(bs, bs_op_complete, NULL);
2753 	poll_threads();
2754 	CU_ASSERT(g_bserrno == 0);
2755 	g_bs = NULL;
2756 
2757 	/* Initialize a new blob store with empty bstype */
2758 	dev = init_dev();
2759 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2760 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2761 	poll_threads();
2762 	CU_ASSERT(g_bserrno == 0);
2763 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2764 	bs = g_bs;
2765 
2766 	spdk_bs_unload(bs, bs_op_complete, NULL);
2767 	poll_threads();
2768 	CU_ASSERT(g_bserrno == 0);
2769 	g_bs = NULL;
2770 
2771 	/* Load non existing blobstore type */
2772 	dev = init_dev();
2773 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2774 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2775 	poll_threads();
2776 	CU_ASSERT(g_bserrno != 0);
2777 
2778 	/* Load with empty blobstore type */
2779 	dev = init_dev();
2780 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2781 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2782 	poll_threads();
2783 	CU_ASSERT(g_bserrno == 0);
2784 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2785 	bs = g_bs;
2786 
2787 	spdk_bs_unload(bs, bs_op_complete, NULL);
2788 	poll_threads();
2789 	CU_ASSERT(g_bserrno == 0);
2790 	g_bs = NULL;
2791 }
2792 
2793 static void
2794 bs_super_block(void)
2795 {
2796 	struct spdk_blob_store *bs;
2797 	struct spdk_bs_dev *dev;
2798 	struct spdk_bs_super_block *super_block;
2799 	struct spdk_bs_opts opts;
2800 	struct spdk_bs_super_block_ver1 super_block_v1;
2801 
2802 	dev = init_dev();
2803 	spdk_bs_opts_init(&opts, sizeof(opts));
2804 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2805 
2806 	/* Initialize a new blob store */
2807 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2808 	poll_threads();
2809 	CU_ASSERT(g_bserrno == 0);
2810 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2811 	bs = g_bs;
2812 
2813 	/* Unload the blob store */
2814 	spdk_bs_unload(bs, bs_op_complete, NULL);
2815 	poll_threads();
2816 	CU_ASSERT(g_bserrno == 0);
2817 	g_bs = NULL;
2818 	g_blob = NULL;
2819 	g_blobid = 0;
2820 
2821 	/* Load an existing blob store with version newer than supported */
2822 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2823 	super_block->version++;
2824 
2825 	dev = init_dev();
2826 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2827 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2828 	poll_threads();
2829 	CU_ASSERT(g_bserrno != 0);
2830 
2831 	/* Create a new blob store with super block version 1 */
2832 	dev = init_dev();
2833 	super_block_v1.version = 1;
2834 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2835 	super_block_v1.length = 0x1000;
2836 	super_block_v1.clean = 1;
2837 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2838 	super_block_v1.cluster_size = 0x100000;
2839 	super_block_v1.used_page_mask_start = 0x01;
2840 	super_block_v1.used_page_mask_len = 0x01;
2841 	super_block_v1.used_cluster_mask_start = 0x02;
2842 	super_block_v1.used_cluster_mask_len = 0x01;
2843 	super_block_v1.md_start = 0x03;
2844 	super_block_v1.md_len = 0x40;
2845 	memset(super_block_v1.reserved, 0, 4036);
2846 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2847 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2848 
2849 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2850 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2851 	poll_threads();
2852 	CU_ASSERT(g_bserrno == 0);
2853 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2854 	bs = g_bs;
2855 
2856 	spdk_bs_unload(bs, bs_op_complete, NULL);
2857 	poll_threads();
2858 	CU_ASSERT(g_bserrno == 0);
2859 	g_bs = NULL;
2860 }
2861 
2862 static void
2863 bs_test_recover_cluster_count(void)
2864 {
2865 	struct spdk_blob_store *bs;
2866 	struct spdk_bs_dev *dev;
2867 	struct spdk_bs_super_block super_block;
2868 	struct spdk_bs_opts opts;
2869 
2870 	dev = init_dev();
2871 	spdk_bs_opts_init(&opts, sizeof(opts));
2872 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2873 
2874 	super_block.version = 3;
2875 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2876 	super_block.length = 0x1000;
2877 	super_block.clean = 0;
2878 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2879 	super_block.cluster_size = 4096;
2880 	super_block.used_page_mask_start = 0x01;
2881 	super_block.used_page_mask_len = 0x01;
2882 	super_block.used_cluster_mask_start = 0x02;
2883 	super_block.used_cluster_mask_len = 0x01;
2884 	super_block.used_blobid_mask_start = 0x03;
2885 	super_block.used_blobid_mask_len = 0x01;
2886 	super_block.md_start = 0x04;
2887 	super_block.md_len = 0x40;
2888 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2889 	super_block.size = dev->blockcnt * dev->blocklen;
2890 	super_block.io_unit_size = 0x1000;
2891 	memset(super_block.reserved, 0, 4000);
2892 	super_block.crc = blob_md_page_calc_crc(&super_block);
2893 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2894 
2895 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2896 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2897 	poll_threads();
2898 	CU_ASSERT(g_bserrno == 0);
2899 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2900 	bs = g_bs;
2901 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2902 			super_block.md_len));
2903 
2904 	spdk_bs_unload(bs, bs_op_complete, NULL);
2905 	poll_threads();
2906 	CU_ASSERT(g_bserrno == 0);
2907 	g_bs = NULL;
2908 }
2909 
2910 static void
2911 bs_test_grow(void)
2912 {
2913 	struct spdk_blob_store *bs;
2914 	struct spdk_bs_dev *dev;
2915 	struct spdk_bs_super_block super_block;
2916 	struct spdk_bs_opts opts;
2917 	struct spdk_bs_md_mask mask;
2918 	uint64_t bdev_size;
2919 
2920 	dev = init_dev();
2921 	bdev_size = dev->blockcnt * dev->blocklen;
2922 	spdk_bs_opts_init(&opts, sizeof(opts));
2923 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2924 	poll_threads();
2925 	CU_ASSERT(g_bserrno == 0);
2926 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2927 	bs = g_bs;
2928 
2929 	spdk_bs_unload(bs, bs_op_complete, NULL);
2930 	poll_threads();
2931 	CU_ASSERT(g_bserrno == 0);
2932 	g_bs = NULL;
2933 
2934 	/*
2935 	 * To make sure all the metadata are updated to the disk,
2936 	 * we check the g_dev_buffer after spdk_bs_unload.
2937 	 */
2938 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2939 	CU_ASSERT(super_block.size == bdev_size);
2940 
2941 	/*
2942 	 * Make sure the used_cluster mask is correct.
2943 	 */
2944 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2945 	       sizeof(struct spdk_bs_md_mask));
2946 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2947 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2948 
2949 	/*
2950 	 * The default dev size is 64M, here we set the dev size to 128M,
2951 	 * then the blobstore will adjust the metadata according to the new size.
2952 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
2953 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
2954 	 * the end of g_dev_buffer.
2955 	 */
2956 	dev = init_dev();
2957 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
2958 	bdev_size = dev->blockcnt * dev->blocklen;
2959 	spdk_bs_opts_init(&opts, sizeof(opts));
2960 	opts.clear_method = BS_CLEAR_WITH_NONE;
2961 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
2962 	poll_threads();
2963 	CU_ASSERT(g_bserrno == 0);
2964 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2965 	bs = g_bs;
2966 
2967 	/*
2968 	 * After spdk_bs_grow, all metadata are updated to the disk.
2969 	 * So we can check g_dev_buffer now.
2970 	 */
2971 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2972 	CU_ASSERT(super_block.size == bdev_size);
2973 
2974 	/*
2975 	 * Make sure the used_cluster mask has been updated according to the bdev size
2976 	 */
2977 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2978 	       sizeof(struct spdk_bs_md_mask));
2979 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2980 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2981 
2982 	spdk_bs_unload(bs, bs_op_complete, NULL);
2983 	poll_threads();
2984 	CU_ASSERT(g_bserrno == 0);
2985 	g_bs = NULL;
2986 }
2987 
2988 /*
2989  * Create a blobstore and then unload it.
2990  */
2991 static void
2992 bs_unload(void)
2993 {
2994 	struct spdk_blob_store *bs = g_bs;
2995 	struct spdk_blob *blob;
2996 
2997 	/* Create a blob and open it. */
2998 	blob = ut_blob_create_and_open(bs, NULL);
2999 
3000 	/* Try to unload blobstore, should fail with open blob */
3001 	g_bserrno = -1;
3002 	spdk_bs_unload(bs, bs_op_complete, NULL);
3003 	poll_threads();
3004 	CU_ASSERT(g_bserrno == -EBUSY);
3005 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3006 
3007 	/* Close the blob, then successfully unload blobstore */
3008 	g_bserrno = -1;
3009 	spdk_blob_close(blob, blob_op_complete, NULL);
3010 	poll_threads();
3011 	CU_ASSERT(g_bserrno == 0);
3012 }
3013 
3014 /*
3015  * Create a blobstore with a cluster size different than the default, and ensure it is
3016  *  persisted.
3017  */
3018 static void
3019 bs_cluster_sz(void)
3020 {
3021 	struct spdk_blob_store *bs;
3022 	struct spdk_bs_dev *dev;
3023 	struct spdk_bs_opts opts;
3024 	uint32_t cluster_sz;
3025 
3026 	/* Set cluster size to zero */
3027 	dev = init_dev();
3028 	spdk_bs_opts_init(&opts, sizeof(opts));
3029 	opts.cluster_sz = 0;
3030 
3031 	/* Initialize a new blob store */
3032 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3033 	poll_threads();
3034 	CU_ASSERT(g_bserrno == -EINVAL);
3035 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3036 
3037 	/*
3038 	 * Set cluster size to blobstore page size,
3039 	 * to work it is required to be at least twice the blobstore page size.
3040 	 */
3041 	dev = init_dev();
3042 	spdk_bs_opts_init(&opts, sizeof(opts));
3043 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
3044 
3045 	/* Initialize a new blob store */
3046 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3047 	poll_threads();
3048 	CU_ASSERT(g_bserrno == -ENOMEM);
3049 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3050 
3051 	/*
3052 	 * Set cluster size to lower than page size,
3053 	 * to work it is required to be at least twice the blobstore page size.
3054 	 */
3055 	dev = init_dev();
3056 	spdk_bs_opts_init(&opts, sizeof(opts));
3057 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
3058 
3059 	/* Initialize a new blob store */
3060 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3061 	poll_threads();
3062 	CU_ASSERT(g_bserrno == -EINVAL);
3063 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3064 
3065 	/* Set cluster size to twice the default */
3066 	dev = init_dev();
3067 	spdk_bs_opts_init(&opts, sizeof(opts));
3068 	opts.cluster_sz *= 2;
3069 	cluster_sz = opts.cluster_sz;
3070 
3071 	/* Initialize a new blob store */
3072 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3073 	poll_threads();
3074 	CU_ASSERT(g_bserrno == 0);
3075 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3076 	bs = g_bs;
3077 
3078 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3079 
3080 	ut_bs_reload(&bs, &opts);
3081 
3082 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3083 
3084 	spdk_bs_unload(bs, bs_op_complete, NULL);
3085 	poll_threads();
3086 	CU_ASSERT(g_bserrno == 0);
3087 	g_bs = NULL;
3088 }
3089 
3090 /*
3091  * Create a blobstore, reload it and ensure total usable cluster count
3092  *  stays the same.
3093  */
3094 static void
3095 bs_usable_clusters(void)
3096 {
3097 	struct spdk_blob_store *bs = g_bs;
3098 	struct spdk_blob *blob;
3099 	uint32_t clusters;
3100 	int i;
3101 
3102 
3103 	clusters = spdk_bs_total_data_cluster_count(bs);
3104 
3105 	ut_bs_reload(&bs, NULL);
3106 
3107 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3108 
3109 	/* Create and resize blobs to make sure that useable cluster count won't change */
3110 	for (i = 0; i < 4; i++) {
3111 		g_bserrno = -1;
3112 		g_blobid = SPDK_BLOBID_INVALID;
3113 		blob = ut_blob_create_and_open(bs, NULL);
3114 
3115 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3116 		poll_threads();
3117 		CU_ASSERT(g_bserrno == 0);
3118 
3119 		g_bserrno = -1;
3120 		spdk_blob_close(blob, blob_op_complete, NULL);
3121 		poll_threads();
3122 		CU_ASSERT(g_bserrno == 0);
3123 
3124 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3125 	}
3126 
3127 	/* Reload the blob store to make sure that nothing changed */
3128 	ut_bs_reload(&bs, NULL);
3129 
3130 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3131 }
3132 
3133 /*
3134  * Test resizing of the metadata blob.  This requires creating enough blobs
3135  *  so that one cluster is not enough to fit the metadata for those blobs.
3136  *  To induce this condition to happen more quickly, we reduce the cluster
3137  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3138  */
3139 static void
3140 bs_resize_md(void)
3141 {
3142 	struct spdk_blob_store *bs;
3143 	const int CLUSTER_PAGE_COUNT = 4;
3144 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3145 	struct spdk_bs_dev *dev;
3146 	struct spdk_bs_opts opts;
3147 	struct spdk_blob *blob;
3148 	struct spdk_blob_opts blob_opts;
3149 	uint32_t cluster_sz;
3150 	spdk_blob_id blobids[NUM_BLOBS];
3151 	int i;
3152 
3153 
3154 	dev = init_dev();
3155 	spdk_bs_opts_init(&opts, sizeof(opts));
3156 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
3157 	cluster_sz = opts.cluster_sz;
3158 
3159 	/* Initialize a new blob store */
3160 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3161 	poll_threads();
3162 	CU_ASSERT(g_bserrno == 0);
3163 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3164 	bs = g_bs;
3165 
3166 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3167 
3168 	ut_spdk_blob_opts_init(&blob_opts);
3169 
3170 	for (i = 0; i < NUM_BLOBS; i++) {
3171 		g_bserrno = -1;
3172 		g_blobid = SPDK_BLOBID_INVALID;
3173 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3174 		poll_threads();
3175 		CU_ASSERT(g_bserrno == 0);
3176 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3177 		blobids[i] = g_blobid;
3178 	}
3179 
3180 	ut_bs_reload(&bs, &opts);
3181 
3182 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3183 
3184 	for (i = 0; i < NUM_BLOBS; i++) {
3185 		g_bserrno = -1;
3186 		g_blob = NULL;
3187 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3188 		poll_threads();
3189 		CU_ASSERT(g_bserrno == 0);
3190 		CU_ASSERT(g_blob !=  NULL);
3191 		blob = g_blob;
3192 		g_bserrno = -1;
3193 		spdk_blob_close(blob, blob_op_complete, NULL);
3194 		poll_threads();
3195 		CU_ASSERT(g_bserrno == 0);
3196 	}
3197 
3198 	spdk_bs_unload(bs, bs_op_complete, NULL);
3199 	poll_threads();
3200 	CU_ASSERT(g_bserrno == 0);
3201 	g_bs = NULL;
3202 }
3203 
3204 static void
3205 bs_destroy(void)
3206 {
3207 	struct spdk_blob_store *bs;
3208 	struct spdk_bs_dev *dev;
3209 
3210 	/* Initialize a new blob store */
3211 	dev = init_dev();
3212 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3213 	poll_threads();
3214 	CU_ASSERT(g_bserrno == 0);
3215 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3216 	bs = g_bs;
3217 
3218 	/* Destroy the blob store */
3219 	g_bserrno = -1;
3220 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3221 	poll_threads();
3222 	CU_ASSERT(g_bserrno == 0);
3223 
3224 	/* Loading an non-existent blob store should fail. */
3225 	g_bs = NULL;
3226 	dev = init_dev();
3227 
3228 	g_bserrno = 0;
3229 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3230 	poll_threads();
3231 	CU_ASSERT(g_bserrno != 0);
3232 }
3233 
3234 /* Try to hit all of the corner cases associated with serializing
3235  * a blob to disk
3236  */
3237 static void
3238 blob_serialize_test(void)
3239 {
3240 	struct spdk_bs_dev *dev;
3241 	struct spdk_bs_opts opts;
3242 	struct spdk_blob_store *bs;
3243 	spdk_blob_id blobid[2];
3244 	struct spdk_blob *blob[2];
3245 	uint64_t i;
3246 	char *value;
3247 	int rc;
3248 
3249 	dev = init_dev();
3250 
3251 	/* Initialize a new blobstore with very small clusters */
3252 	spdk_bs_opts_init(&opts, sizeof(opts));
3253 	opts.cluster_sz = dev->blocklen * 8;
3254 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3255 	poll_threads();
3256 	CU_ASSERT(g_bserrno == 0);
3257 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3258 	bs = g_bs;
3259 
3260 	/* Create and open two blobs */
3261 	for (i = 0; i < 2; i++) {
3262 		blob[i] = ut_blob_create_and_open(bs, NULL);
3263 		blobid[i] = spdk_blob_get_id(blob[i]);
3264 
3265 		/* Set a fairly large xattr on both blobs to eat up
3266 		 * metadata space
3267 		 */
3268 		value = calloc(dev->blocklen - 64, sizeof(char));
3269 		SPDK_CU_ASSERT_FATAL(value != NULL);
3270 		memset(value, i, dev->blocklen / 2);
3271 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3272 		CU_ASSERT(rc == 0);
3273 		free(value);
3274 	}
3275 
3276 	/* Resize the blobs, alternating 1 cluster at a time.
3277 	 * This thwarts run length encoding and will cause spill
3278 	 * over of the extents.
3279 	 */
3280 	for (i = 0; i < 6; i++) {
3281 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3282 		poll_threads();
3283 		CU_ASSERT(g_bserrno == 0);
3284 	}
3285 
3286 	for (i = 0; i < 2; i++) {
3287 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3288 		poll_threads();
3289 		CU_ASSERT(g_bserrno == 0);
3290 	}
3291 
3292 	/* Close the blobs */
3293 	for (i = 0; i < 2; i++) {
3294 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3295 		poll_threads();
3296 		CU_ASSERT(g_bserrno == 0);
3297 	}
3298 
3299 	ut_bs_reload(&bs, &opts);
3300 
3301 	for (i = 0; i < 2; i++) {
3302 		blob[i] = NULL;
3303 
3304 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3305 		poll_threads();
3306 		CU_ASSERT(g_bserrno == 0);
3307 		CU_ASSERT(g_blob != NULL);
3308 		blob[i] = g_blob;
3309 
3310 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3311 
3312 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3313 		poll_threads();
3314 		CU_ASSERT(g_bserrno == 0);
3315 	}
3316 
3317 	spdk_bs_unload(bs, bs_op_complete, NULL);
3318 	poll_threads();
3319 	CU_ASSERT(g_bserrno == 0);
3320 	g_bs = NULL;
3321 }
3322 
3323 static void
3324 blob_crc(void)
3325 {
3326 	struct spdk_blob_store *bs = g_bs;
3327 	struct spdk_blob *blob;
3328 	spdk_blob_id blobid;
3329 	uint32_t page_num;
3330 	int index;
3331 	struct spdk_blob_md_page *page;
3332 
3333 	blob = ut_blob_create_and_open(bs, NULL);
3334 	blobid = spdk_blob_get_id(blob);
3335 
3336 	spdk_blob_close(blob, blob_op_complete, NULL);
3337 	poll_threads();
3338 	CU_ASSERT(g_bserrno == 0);
3339 
3340 	page_num = bs_blobid_to_page(blobid);
3341 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3342 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3343 	page->crc = 0;
3344 
3345 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3346 	poll_threads();
3347 	CU_ASSERT(g_bserrno == -EINVAL);
3348 	CU_ASSERT(g_blob == NULL);
3349 	g_bserrno = 0;
3350 
3351 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3352 	poll_threads();
3353 	CU_ASSERT(g_bserrno == -EINVAL);
3354 }
3355 
3356 static void
3357 super_block_crc(void)
3358 {
3359 	struct spdk_blob_store *bs;
3360 	struct spdk_bs_dev *dev;
3361 	struct spdk_bs_super_block *super_block;
3362 
3363 	dev = init_dev();
3364 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3365 	poll_threads();
3366 	CU_ASSERT(g_bserrno == 0);
3367 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3368 	bs = g_bs;
3369 
3370 	spdk_bs_unload(bs, bs_op_complete, NULL);
3371 	poll_threads();
3372 	CU_ASSERT(g_bserrno == 0);
3373 	g_bs = NULL;
3374 
3375 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3376 	super_block->crc = 0;
3377 	dev = init_dev();
3378 
3379 	/* Load an existing blob store */
3380 	g_bserrno = 0;
3381 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3382 	poll_threads();
3383 	CU_ASSERT(g_bserrno == -EILSEQ);
3384 }
3385 
3386 /* For blob dirty shutdown test case we do the following sub-test cases:
3387  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3388  *   dirty shutdown and reload the blob store and verify the xattrs.
3389  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3390  *   reload the blob store and verify the clusters number.
3391  * 3 Create the second blob and then dirty shutdown, reload the blob store
3392  *   and verify the second blob.
3393  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3394  *   and verify the second blob is invalid.
3395  * 5 Create the second blob again and also create the third blob, modify the
3396  *   md of second blob which makes the md invalid, and then dirty shutdown,
3397  *   reload the blob store verify the second blob, it should invalid and also
3398  *   verify the third blob, it should correct.
3399  */
3400 static void
3401 blob_dirty_shutdown(void)
3402 {
3403 	int rc;
3404 	int index;
3405 	struct spdk_blob_store *bs = g_bs;
3406 	spdk_blob_id blobid1, blobid2, blobid3;
3407 	struct spdk_blob *blob = g_blob;
3408 	uint64_t length;
3409 	uint64_t free_clusters;
3410 	const void *value;
3411 	size_t value_len;
3412 	uint32_t page_num;
3413 	struct spdk_blob_md_page *page;
3414 	struct spdk_blob_opts blob_opts;
3415 
3416 	/* Create first blob */
3417 	blobid1 = spdk_blob_get_id(blob);
3418 
3419 	/* Set some xattrs */
3420 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3421 	CU_ASSERT(rc == 0);
3422 
3423 	length = 2345;
3424 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3425 	CU_ASSERT(rc == 0);
3426 
3427 	/* Put xattr that fits exactly single page.
3428 	 * This results in adding additional pages to MD.
3429 	 * First is flags and smaller xattr, second the large xattr,
3430 	 * third are just the extents.
3431 	 */
3432 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3433 			      strlen("large_xattr");
3434 	char *xattr = calloc(xattr_length, sizeof(char));
3435 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3436 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3437 	free(xattr);
3438 	SPDK_CU_ASSERT_FATAL(rc == 0);
3439 
3440 	/* Resize the blob */
3441 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3442 	poll_threads();
3443 	CU_ASSERT(g_bserrno == 0);
3444 
3445 	/* Set the blob as the super blob */
3446 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3447 	poll_threads();
3448 	CU_ASSERT(g_bserrno == 0);
3449 
3450 	free_clusters = spdk_bs_free_cluster_count(bs);
3451 
3452 	spdk_blob_close(blob, blob_op_complete, NULL);
3453 	poll_threads();
3454 	CU_ASSERT(g_bserrno == 0);
3455 	blob = NULL;
3456 	g_blob = NULL;
3457 	g_blobid = SPDK_BLOBID_INVALID;
3458 
3459 	ut_bs_dirty_load(&bs, NULL);
3460 
3461 	/* Get the super blob */
3462 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3463 	poll_threads();
3464 	CU_ASSERT(g_bserrno == 0);
3465 	CU_ASSERT(blobid1 == g_blobid);
3466 
3467 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3468 	poll_threads();
3469 	CU_ASSERT(g_bserrno == 0);
3470 	CU_ASSERT(g_blob != NULL);
3471 	blob = g_blob;
3472 
3473 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3474 
3475 	/* Get the xattrs */
3476 	value = NULL;
3477 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3478 	CU_ASSERT(rc == 0);
3479 	SPDK_CU_ASSERT_FATAL(value != NULL);
3480 	CU_ASSERT(*(uint64_t *)value == length);
3481 	CU_ASSERT(value_len == 8);
3482 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3483 
3484 	/* Resize the blob */
3485 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3486 	poll_threads();
3487 	CU_ASSERT(g_bserrno == 0);
3488 
3489 	free_clusters = spdk_bs_free_cluster_count(bs);
3490 
3491 	spdk_blob_close(blob, blob_op_complete, NULL);
3492 	poll_threads();
3493 	CU_ASSERT(g_bserrno == 0);
3494 	blob = NULL;
3495 	g_blob = NULL;
3496 	g_blobid = SPDK_BLOBID_INVALID;
3497 
3498 	ut_bs_dirty_load(&bs, NULL);
3499 
3500 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3501 	poll_threads();
3502 	CU_ASSERT(g_bserrno == 0);
3503 	CU_ASSERT(g_blob != NULL);
3504 	blob = g_blob;
3505 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3506 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3507 
3508 	spdk_blob_close(blob, blob_op_complete, NULL);
3509 	poll_threads();
3510 	CU_ASSERT(g_bserrno == 0);
3511 	blob = NULL;
3512 	g_blob = NULL;
3513 	g_blobid = SPDK_BLOBID_INVALID;
3514 
3515 	/* Create second blob */
3516 	blob = ut_blob_create_and_open(bs, NULL);
3517 	blobid2 = spdk_blob_get_id(blob);
3518 
3519 	/* Set some xattrs */
3520 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3521 	CU_ASSERT(rc == 0);
3522 
3523 	length = 5432;
3524 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3525 	CU_ASSERT(rc == 0);
3526 
3527 	/* Resize the blob */
3528 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3529 	poll_threads();
3530 	CU_ASSERT(g_bserrno == 0);
3531 
3532 	free_clusters = spdk_bs_free_cluster_count(bs);
3533 
3534 	spdk_blob_close(blob, blob_op_complete, NULL);
3535 	poll_threads();
3536 	CU_ASSERT(g_bserrno == 0);
3537 	blob = NULL;
3538 	g_blob = NULL;
3539 	g_blobid = SPDK_BLOBID_INVALID;
3540 
3541 	ut_bs_dirty_load(&bs, NULL);
3542 
3543 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3544 	poll_threads();
3545 	CU_ASSERT(g_bserrno == 0);
3546 	CU_ASSERT(g_blob != NULL);
3547 	blob = g_blob;
3548 
3549 	/* Get the xattrs */
3550 	value = NULL;
3551 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3552 	CU_ASSERT(rc == 0);
3553 	SPDK_CU_ASSERT_FATAL(value != NULL);
3554 	CU_ASSERT(*(uint64_t *)value == length);
3555 	CU_ASSERT(value_len == 8);
3556 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3557 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3558 
3559 	ut_blob_close_and_delete(bs, blob);
3560 
3561 	free_clusters = spdk_bs_free_cluster_count(bs);
3562 
3563 	ut_bs_dirty_load(&bs, NULL);
3564 
3565 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3566 	poll_threads();
3567 	CU_ASSERT(g_bserrno != 0);
3568 	CU_ASSERT(g_blob == NULL);
3569 
3570 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3571 	poll_threads();
3572 	CU_ASSERT(g_bserrno == 0);
3573 	CU_ASSERT(g_blob != NULL);
3574 	blob = g_blob;
3575 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3576 	spdk_blob_close(blob, blob_op_complete, NULL);
3577 	poll_threads();
3578 	CU_ASSERT(g_bserrno == 0);
3579 
3580 	ut_bs_reload(&bs, NULL);
3581 
3582 	/* Create second blob */
3583 	ut_spdk_blob_opts_init(&blob_opts);
3584 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3585 	poll_threads();
3586 	CU_ASSERT(g_bserrno == 0);
3587 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3588 	blobid2 = g_blobid;
3589 
3590 	/* Create third blob */
3591 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3592 	poll_threads();
3593 	CU_ASSERT(g_bserrno == 0);
3594 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3595 	blobid3 = g_blobid;
3596 
3597 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3598 	poll_threads();
3599 	CU_ASSERT(g_bserrno == 0);
3600 	CU_ASSERT(g_blob != NULL);
3601 	blob = g_blob;
3602 
3603 	/* Set some xattrs for second blob */
3604 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3605 	CU_ASSERT(rc == 0);
3606 
3607 	length = 5432;
3608 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3609 	CU_ASSERT(rc == 0);
3610 
3611 	spdk_blob_close(blob, blob_op_complete, NULL);
3612 	poll_threads();
3613 	CU_ASSERT(g_bserrno == 0);
3614 	blob = NULL;
3615 	g_blob = NULL;
3616 	g_blobid = SPDK_BLOBID_INVALID;
3617 
3618 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3619 	poll_threads();
3620 	CU_ASSERT(g_bserrno == 0);
3621 	CU_ASSERT(g_blob != NULL);
3622 	blob = g_blob;
3623 
3624 	/* Set some xattrs for third blob */
3625 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3626 	CU_ASSERT(rc == 0);
3627 
3628 	length = 5432;
3629 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3630 	CU_ASSERT(rc == 0);
3631 
3632 	spdk_blob_close(blob, blob_op_complete, NULL);
3633 	poll_threads();
3634 	CU_ASSERT(g_bserrno == 0);
3635 	blob = NULL;
3636 	g_blob = NULL;
3637 	g_blobid = SPDK_BLOBID_INVALID;
3638 
3639 	/* Mark second blob as invalid */
3640 	page_num = bs_blobid_to_page(blobid2);
3641 
3642 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3643 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3644 	page->sequence_num = 1;
3645 	page->crc = blob_md_page_calc_crc(page);
3646 
3647 	free_clusters = spdk_bs_free_cluster_count(bs);
3648 
3649 	ut_bs_dirty_load(&bs, NULL);
3650 
3651 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3652 	poll_threads();
3653 	CU_ASSERT(g_bserrno != 0);
3654 	CU_ASSERT(g_blob == NULL);
3655 
3656 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3657 	poll_threads();
3658 	CU_ASSERT(g_bserrno == 0);
3659 	CU_ASSERT(g_blob != NULL);
3660 	blob = g_blob;
3661 
3662 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3663 }
3664 
3665 static void
3666 blob_flags(void)
3667 {
3668 	struct spdk_blob_store *bs = g_bs;
3669 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3670 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3671 	struct spdk_blob_opts blob_opts;
3672 	int rc;
3673 
3674 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3675 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3676 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3677 
3678 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3679 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3680 
3681 	ut_spdk_blob_opts_init(&blob_opts);
3682 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3683 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3684 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3685 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3686 
3687 	/* Change the size of blob_data_ro to check if flags are serialized
3688 	 * when blob has non zero number of extents */
3689 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3690 	poll_threads();
3691 	CU_ASSERT(g_bserrno == 0);
3692 
3693 	/* Set the xattr to check if flags are serialized
3694 	 * when blob has non zero number of xattrs */
3695 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3696 	CU_ASSERT(rc == 0);
3697 
3698 	blob_invalid->invalid_flags = (1ULL << 63);
3699 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3700 	blob_data_ro->data_ro_flags = (1ULL << 62);
3701 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3702 	blob_md_ro->md_ro_flags = (1ULL << 61);
3703 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3704 
3705 	g_bserrno = -1;
3706 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3707 	poll_threads();
3708 	CU_ASSERT(g_bserrno == 0);
3709 	g_bserrno = -1;
3710 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3711 	poll_threads();
3712 	CU_ASSERT(g_bserrno == 0);
3713 	g_bserrno = -1;
3714 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3715 	poll_threads();
3716 	CU_ASSERT(g_bserrno == 0);
3717 
3718 	g_bserrno = -1;
3719 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3720 	poll_threads();
3721 	CU_ASSERT(g_bserrno == 0);
3722 	blob_invalid = NULL;
3723 	g_bserrno = -1;
3724 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3725 	poll_threads();
3726 	CU_ASSERT(g_bserrno == 0);
3727 	blob_data_ro = NULL;
3728 	g_bserrno = -1;
3729 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3730 	poll_threads();
3731 	CU_ASSERT(g_bserrno == 0);
3732 	blob_md_ro = NULL;
3733 
3734 	g_blob = NULL;
3735 	g_blobid = SPDK_BLOBID_INVALID;
3736 
3737 	ut_bs_reload(&bs, NULL);
3738 
3739 	g_blob = NULL;
3740 	g_bserrno = 0;
3741 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3742 	poll_threads();
3743 	CU_ASSERT(g_bserrno != 0);
3744 	CU_ASSERT(g_blob == NULL);
3745 
3746 	g_blob = NULL;
3747 	g_bserrno = -1;
3748 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3749 	poll_threads();
3750 	CU_ASSERT(g_bserrno == 0);
3751 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3752 	blob_data_ro = g_blob;
3753 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3754 	CU_ASSERT(blob_data_ro->data_ro == true);
3755 	CU_ASSERT(blob_data_ro->md_ro == true);
3756 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3757 
3758 	g_blob = NULL;
3759 	g_bserrno = -1;
3760 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3761 	poll_threads();
3762 	CU_ASSERT(g_bserrno == 0);
3763 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3764 	blob_md_ro = g_blob;
3765 	CU_ASSERT(blob_md_ro->data_ro == false);
3766 	CU_ASSERT(blob_md_ro->md_ro == true);
3767 
3768 	g_bserrno = -1;
3769 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3770 	poll_threads();
3771 	CU_ASSERT(g_bserrno == 0);
3772 
3773 	ut_blob_close_and_delete(bs, blob_data_ro);
3774 	ut_blob_close_and_delete(bs, blob_md_ro);
3775 }
3776 
3777 static void
3778 bs_version(void)
3779 {
3780 	struct spdk_bs_super_block *super;
3781 	struct spdk_blob_store *bs = g_bs;
3782 	struct spdk_bs_dev *dev;
3783 	struct spdk_blob *blob;
3784 	struct spdk_blob_opts blob_opts;
3785 	spdk_blob_id blobid;
3786 
3787 	/* Unload the blob store */
3788 	spdk_bs_unload(bs, bs_op_complete, NULL);
3789 	poll_threads();
3790 	CU_ASSERT(g_bserrno == 0);
3791 	g_bs = NULL;
3792 
3793 	/*
3794 	 * Change the bs version on disk.  This will allow us to
3795 	 *  test that the version does not get modified automatically
3796 	 *  when loading and unloading the blobstore.
3797 	 */
3798 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3799 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3800 	CU_ASSERT(super->clean == 1);
3801 	super->version = 2;
3802 	/*
3803 	 * Version 2 metadata does not have a used blobid mask, so clear
3804 	 *  those fields in the super block and zero the corresponding
3805 	 *  region on "disk".  We will use this to ensure blob IDs are
3806 	 *  correctly reconstructed.
3807 	 */
3808 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3809 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3810 	super->used_blobid_mask_start = 0;
3811 	super->used_blobid_mask_len = 0;
3812 	super->crc = blob_md_page_calc_crc(super);
3813 
3814 	/* Load an existing blob store */
3815 	dev = init_dev();
3816 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3817 	poll_threads();
3818 	CU_ASSERT(g_bserrno == 0);
3819 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3820 	CU_ASSERT(super->clean == 1);
3821 	bs = g_bs;
3822 
3823 	/*
3824 	 * Create a blob - just to make sure that when we unload it
3825 	 *  results in writing the super block (since metadata pages
3826 	 *  were allocated.
3827 	 */
3828 	ut_spdk_blob_opts_init(&blob_opts);
3829 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3830 	poll_threads();
3831 	CU_ASSERT(g_bserrno == 0);
3832 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3833 	blobid = g_blobid;
3834 
3835 	/* Unload the blob store */
3836 	spdk_bs_unload(bs, bs_op_complete, NULL);
3837 	poll_threads();
3838 	CU_ASSERT(g_bserrno == 0);
3839 	g_bs = NULL;
3840 	CU_ASSERT(super->version == 2);
3841 	CU_ASSERT(super->used_blobid_mask_start == 0);
3842 	CU_ASSERT(super->used_blobid_mask_len == 0);
3843 
3844 	dev = init_dev();
3845 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3846 	poll_threads();
3847 	CU_ASSERT(g_bserrno == 0);
3848 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3849 	bs = g_bs;
3850 
3851 	g_blob = NULL;
3852 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3853 	poll_threads();
3854 	CU_ASSERT(g_bserrno == 0);
3855 	CU_ASSERT(g_blob != NULL);
3856 	blob = g_blob;
3857 
3858 	ut_blob_close_and_delete(bs, blob);
3859 
3860 	CU_ASSERT(super->version == 2);
3861 	CU_ASSERT(super->used_blobid_mask_start == 0);
3862 	CU_ASSERT(super->used_blobid_mask_len == 0);
3863 }
3864 
3865 static void
3866 blob_set_xattrs_test(void)
3867 {
3868 	struct spdk_blob_store *bs = g_bs;
3869 	struct spdk_blob *blob;
3870 	struct spdk_blob_opts opts;
3871 	const void *value;
3872 	size_t value_len;
3873 	char *xattr;
3874 	size_t xattr_length;
3875 	int rc;
3876 
3877 	/* Create blob with extra attributes */
3878 	ut_spdk_blob_opts_init(&opts);
3879 
3880 	opts.xattrs.names = g_xattr_names;
3881 	opts.xattrs.get_value = _get_xattr_value;
3882 	opts.xattrs.count = 3;
3883 	opts.xattrs.ctx = &g_ctx;
3884 
3885 	blob = ut_blob_create_and_open(bs, &opts);
3886 
3887 	/* Get the xattrs */
3888 	value = NULL;
3889 
3890 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3891 	CU_ASSERT(rc == 0);
3892 	SPDK_CU_ASSERT_FATAL(value != NULL);
3893 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3894 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3895 
3896 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3897 	CU_ASSERT(rc == 0);
3898 	SPDK_CU_ASSERT_FATAL(value != NULL);
3899 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3900 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3901 
3902 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3903 	CU_ASSERT(rc == 0);
3904 	SPDK_CU_ASSERT_FATAL(value != NULL);
3905 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3906 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3907 
3908 	/* Try to get non existing attribute */
3909 
3910 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3911 	CU_ASSERT(rc == -ENOENT);
3912 
3913 	/* Try xattr exceeding maximum length of descriptor in single page */
3914 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3915 		       strlen("large_xattr") + 1;
3916 	xattr = calloc(xattr_length, sizeof(char));
3917 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3918 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3919 	free(xattr);
3920 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3921 
3922 	spdk_blob_close(blob, blob_op_complete, NULL);
3923 	poll_threads();
3924 	CU_ASSERT(g_bserrno == 0);
3925 	blob = NULL;
3926 	g_blob = NULL;
3927 	g_blobid = SPDK_BLOBID_INVALID;
3928 
3929 	/* NULL callback */
3930 	ut_spdk_blob_opts_init(&opts);
3931 	opts.xattrs.names = g_xattr_names;
3932 	opts.xattrs.get_value = NULL;
3933 	opts.xattrs.count = 1;
3934 	opts.xattrs.ctx = &g_ctx;
3935 
3936 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3937 	poll_threads();
3938 	CU_ASSERT(g_bserrno == -EINVAL);
3939 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3940 
3941 	/* NULL values */
3942 	ut_spdk_blob_opts_init(&opts);
3943 	opts.xattrs.names = g_xattr_names;
3944 	opts.xattrs.get_value = _get_xattr_value_null;
3945 	opts.xattrs.count = 1;
3946 	opts.xattrs.ctx = NULL;
3947 
3948 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3949 	poll_threads();
3950 	CU_ASSERT(g_bserrno == -EINVAL);
3951 }
3952 
3953 static void
3954 blob_thin_prov_alloc(void)
3955 {
3956 	struct spdk_blob_store *bs = g_bs;
3957 	struct spdk_blob *blob;
3958 	struct spdk_blob_opts opts;
3959 	spdk_blob_id blobid;
3960 	uint64_t free_clusters;
3961 
3962 	free_clusters = spdk_bs_free_cluster_count(bs);
3963 
3964 	/* Set blob as thin provisioned */
3965 	ut_spdk_blob_opts_init(&opts);
3966 	opts.thin_provision = true;
3967 
3968 	blob = ut_blob_create_and_open(bs, &opts);
3969 	blobid = spdk_blob_get_id(blob);
3970 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3971 
3972 	CU_ASSERT(blob->active.num_clusters == 0);
3973 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3974 
3975 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3976 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3977 	poll_threads();
3978 	CU_ASSERT(g_bserrno == 0);
3979 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3980 	CU_ASSERT(blob->active.num_clusters == 5);
3981 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3982 
3983 	/* Grow it to 1TB - still unallocated */
3984 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3985 	poll_threads();
3986 	CU_ASSERT(g_bserrno == 0);
3987 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3988 	CU_ASSERT(blob->active.num_clusters == 262144);
3989 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3990 
3991 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3992 	poll_threads();
3993 	CU_ASSERT(g_bserrno == 0);
3994 	/* Sync must not change anything */
3995 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3996 	CU_ASSERT(blob->active.num_clusters == 262144);
3997 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3998 	/* Since clusters are not allocated,
3999 	 * number of metadata pages is expected to be minimal.
4000 	 */
4001 	CU_ASSERT(blob->active.num_pages == 1);
4002 
4003 	/* Shrink the blob to 3 clusters - still unallocated */
4004 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
4005 	poll_threads();
4006 	CU_ASSERT(g_bserrno == 0);
4007 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4008 	CU_ASSERT(blob->active.num_clusters == 3);
4009 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4010 
4011 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4012 	poll_threads();
4013 	CU_ASSERT(g_bserrno == 0);
4014 	/* Sync must not change anything */
4015 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4016 	CU_ASSERT(blob->active.num_clusters == 3);
4017 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4018 
4019 	spdk_blob_close(blob, blob_op_complete, NULL);
4020 	poll_threads();
4021 	CU_ASSERT(g_bserrno == 0);
4022 
4023 	ut_bs_reload(&bs, NULL);
4024 
4025 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4026 	poll_threads();
4027 	CU_ASSERT(g_bserrno == 0);
4028 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4029 	blob = g_blob;
4030 
4031 	/* Check that clusters allocation and size is still the same */
4032 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4033 	CU_ASSERT(blob->active.num_clusters == 3);
4034 
4035 	ut_blob_close_and_delete(bs, blob);
4036 }
4037 
4038 static void
4039 blob_insert_cluster_msg_test(void)
4040 {
4041 	struct spdk_blob_store *bs = g_bs;
4042 	struct spdk_blob *blob;
4043 	struct spdk_blob_opts opts;
4044 	struct spdk_blob_md_page page = {};
4045 	spdk_blob_id blobid;
4046 	uint64_t free_clusters;
4047 	uint64_t new_cluster = 0;
4048 	uint32_t cluster_num = 3;
4049 	uint32_t extent_page = 0;
4050 
4051 	free_clusters = spdk_bs_free_cluster_count(bs);
4052 
4053 	/* Set blob as thin provisioned */
4054 	ut_spdk_blob_opts_init(&opts);
4055 	opts.thin_provision = true;
4056 	opts.num_clusters = 4;
4057 
4058 	blob = ut_blob_create_and_open(bs, &opts);
4059 	blobid = spdk_blob_get_id(blob);
4060 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4061 
4062 	CU_ASSERT(blob->active.num_clusters == 4);
4063 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4064 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4065 
4066 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4067 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4068 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4069 	spdk_spin_lock(&bs->used_lock);
4070 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4071 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4072 	spdk_spin_unlock(&bs->used_lock);
4073 
4074 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
4075 					 blob_op_complete, NULL);
4076 	poll_threads();
4077 
4078 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4079 
4080 	spdk_blob_close(blob, blob_op_complete, NULL);
4081 	poll_threads();
4082 	CU_ASSERT(g_bserrno == 0);
4083 
4084 	ut_bs_reload(&bs, NULL);
4085 
4086 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4087 	poll_threads();
4088 	CU_ASSERT(g_bserrno == 0);
4089 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4090 	blob = g_blob;
4091 
4092 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4093 
4094 	ut_blob_close_and_delete(bs, blob);
4095 }
4096 
4097 static void
4098 blob_thin_prov_rw(void)
4099 {
4100 	static const uint8_t zero[10 * 4096] = { 0 };
4101 	struct spdk_blob_store *bs = g_bs;
4102 	struct spdk_blob *blob, *blob_id0;
4103 	struct spdk_io_channel *channel, *channel_thread1;
4104 	struct spdk_blob_opts opts;
4105 	uint64_t free_clusters;
4106 	uint64_t page_size;
4107 	uint8_t payload_read[10 * 4096];
4108 	uint8_t payload_write[10 * 4096];
4109 	uint64_t write_bytes;
4110 	uint64_t read_bytes;
4111 
4112 	free_clusters = spdk_bs_free_cluster_count(bs);
4113 	page_size = spdk_bs_get_page_size(bs);
4114 
4115 	channel = spdk_bs_alloc_io_channel(bs);
4116 	CU_ASSERT(channel != NULL);
4117 
4118 	ut_spdk_blob_opts_init(&opts);
4119 	opts.thin_provision = true;
4120 
4121 	/* Create and delete blob at md page 0, so that next md page allocation
4122 	 * for extent will use that. */
4123 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4124 	blob = ut_blob_create_and_open(bs, &opts);
4125 	ut_blob_close_and_delete(bs, blob_id0);
4126 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4127 
4128 	CU_ASSERT(blob->active.num_clusters == 0);
4129 
4130 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4131 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4132 	poll_threads();
4133 	CU_ASSERT(g_bserrno == 0);
4134 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4135 	CU_ASSERT(blob->active.num_clusters == 5);
4136 
4137 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4138 	poll_threads();
4139 	CU_ASSERT(g_bserrno == 0);
4140 	/* Sync must not change anything */
4141 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4142 	CU_ASSERT(blob->active.num_clusters == 5);
4143 
4144 	/* Payload should be all zeros from unallocated clusters */
4145 	memset(payload_read, 0xFF, sizeof(payload_read));
4146 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4147 	poll_threads();
4148 	CU_ASSERT(g_bserrno == 0);
4149 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4150 
4151 	write_bytes = g_dev_write_bytes;
4152 	read_bytes = g_dev_read_bytes;
4153 
4154 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4155 	set_thread(1);
4156 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4157 	CU_ASSERT(channel_thread1 != NULL);
4158 	memset(payload_write, 0xE5, sizeof(payload_write));
4159 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4160 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4161 	/* Perform write on thread 0. That will try to allocate cluster,
4162 	 * but fail due to another thread issuing the cluster allocation first. */
4163 	set_thread(0);
4164 	memset(payload_write, 0xE5, sizeof(payload_write));
4165 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4166 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4167 	poll_threads();
4168 	CU_ASSERT(g_bserrno == 0);
4169 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4170 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
4171 	 * read 0 bytes */
4172 	if (g_use_extent_table) {
4173 		/* Add one more page for EXTENT_PAGE write */
4174 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
4175 	} else {
4176 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
4177 	}
4178 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4179 
4180 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4181 	poll_threads();
4182 	CU_ASSERT(g_bserrno == 0);
4183 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4184 
4185 	ut_blob_close_and_delete(bs, blob);
4186 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4187 
4188 	set_thread(1);
4189 	spdk_bs_free_io_channel(channel_thread1);
4190 	set_thread(0);
4191 	spdk_bs_free_io_channel(channel);
4192 	poll_threads();
4193 	g_blob = NULL;
4194 	g_blobid = 0;
4195 }
4196 
4197 static void
4198 blob_thin_prov_write_count_io(void)
4199 {
4200 	struct spdk_blob_store *bs;
4201 	struct spdk_blob *blob;
4202 	struct spdk_io_channel *ch;
4203 	struct spdk_bs_dev *dev;
4204 	struct spdk_bs_opts bs_opts;
4205 	struct spdk_blob_opts opts;
4206 	uint64_t free_clusters;
4207 	uint64_t page_size;
4208 	uint8_t payload_write[4096];
4209 	uint64_t write_bytes;
4210 	uint64_t read_bytes;
4211 	const uint32_t CLUSTER_SZ = 16384;
4212 	uint32_t pages_per_cluster;
4213 	uint32_t pages_per_extent_page;
4214 	uint32_t i;
4215 
4216 	/* Use a very small cluster size for this test.  This ensures we need multiple
4217 	 * extent pages to hold all of the clusters even for relatively small blobs like
4218 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4219 	 * buffers).
4220 	 */
4221 	dev = init_dev();
4222 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4223 	bs_opts.cluster_sz = CLUSTER_SZ;
4224 
4225 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4226 	poll_threads();
4227 	CU_ASSERT(g_bserrno == 0);
4228 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4229 	bs = g_bs;
4230 
4231 	free_clusters = spdk_bs_free_cluster_count(bs);
4232 	page_size = spdk_bs_get_page_size(bs);
4233 	pages_per_cluster = CLUSTER_SZ / page_size;
4234 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4235 
4236 	ch = spdk_bs_alloc_io_channel(bs);
4237 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4238 
4239 	ut_spdk_blob_opts_init(&opts);
4240 	opts.thin_provision = true;
4241 
4242 	blob = ut_blob_create_and_open(bs, &opts);
4243 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4244 
4245 	/* Resize the blob so that it will require 8 extent pages to hold all of
4246 	 * the clusters.
4247 	 */
4248 	g_bserrno = -1;
4249 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4250 	poll_threads();
4251 	CU_ASSERT(g_bserrno == 0);
4252 
4253 	g_bserrno = -1;
4254 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4255 	poll_threads();
4256 	CU_ASSERT(g_bserrno == 0);
4257 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4258 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4259 
4260 	memset(payload_write, 0, sizeof(payload_write));
4261 	for (i = 0; i < 8; i++) {
4262 		write_bytes = g_dev_write_bytes;
4263 		read_bytes = g_dev_read_bytes;
4264 
4265 		g_bserrno = -1;
4266 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4267 		poll_threads();
4268 		CU_ASSERT(g_bserrno == 0);
4269 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4270 
4271 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4272 		if (!g_use_extent_table) {
4273 			/* For legacy metadata, we should have written two pages - one for the
4274 			 * write I/O itself, another for the blob's primary metadata.
4275 			 */
4276 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4277 		} else {
4278 			/* For extent table metadata, we should have written three pages - one
4279 			 * for the write I/O, one for the extent page, one for the blob's primary
4280 			 * metadata.
4281 			 */
4282 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4283 		}
4284 
4285 		/* The write should have synced the metadata already.  Do another sync here
4286 		 * just to confirm.
4287 		 */
4288 		write_bytes = g_dev_write_bytes;
4289 		read_bytes = g_dev_read_bytes;
4290 
4291 		g_bserrno = -1;
4292 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4293 		poll_threads();
4294 		CU_ASSERT(g_bserrno == 0);
4295 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4296 
4297 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4298 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4299 
4300 		/* Now write to another unallocated cluster that is part of the same extent page. */
4301 		g_bserrno = -1;
4302 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4303 				   1, blob_op_complete, NULL);
4304 		poll_threads();
4305 		CU_ASSERT(g_bserrno == 0);
4306 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4307 
4308 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4309 		/*
4310 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4311 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4312 		 */
4313 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4314 	}
4315 
4316 	ut_blob_close_and_delete(bs, blob);
4317 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4318 
4319 	spdk_bs_free_io_channel(ch);
4320 	poll_threads();
4321 	g_blob = NULL;
4322 	g_blobid = 0;
4323 
4324 	spdk_bs_unload(bs, bs_op_complete, NULL);
4325 	poll_threads();
4326 	CU_ASSERT(g_bserrno == 0);
4327 	g_bs = NULL;
4328 }
4329 
4330 static void
4331 blob_thin_prov_rle(void)
4332 {
4333 	static const uint8_t zero[10 * 4096] = { 0 };
4334 	struct spdk_blob_store *bs = g_bs;
4335 	struct spdk_blob *blob;
4336 	struct spdk_io_channel *channel;
4337 	struct spdk_blob_opts opts;
4338 	spdk_blob_id blobid;
4339 	uint64_t free_clusters;
4340 	uint64_t page_size;
4341 	uint8_t payload_read[10 * 4096];
4342 	uint8_t payload_write[10 * 4096];
4343 	uint64_t write_bytes;
4344 	uint64_t read_bytes;
4345 	uint64_t io_unit;
4346 
4347 	free_clusters = spdk_bs_free_cluster_count(bs);
4348 	page_size = spdk_bs_get_page_size(bs);
4349 
4350 	ut_spdk_blob_opts_init(&opts);
4351 	opts.thin_provision = true;
4352 	opts.num_clusters = 5;
4353 
4354 	blob = ut_blob_create_and_open(bs, &opts);
4355 	blobid = spdk_blob_get_id(blob);
4356 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4357 
4358 	channel = spdk_bs_alloc_io_channel(bs);
4359 	CU_ASSERT(channel != NULL);
4360 
4361 	/* Target specifically second cluster in a blob as first allocation */
4362 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4363 
4364 	/* Payload should be all zeros from unallocated clusters */
4365 	memset(payload_read, 0xFF, sizeof(payload_read));
4366 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4367 	poll_threads();
4368 	CU_ASSERT(g_bserrno == 0);
4369 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4370 
4371 	write_bytes = g_dev_write_bytes;
4372 	read_bytes = g_dev_read_bytes;
4373 
4374 	/* Issue write to second cluster in a blob */
4375 	memset(payload_write, 0xE5, sizeof(payload_write));
4376 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4377 	poll_threads();
4378 	CU_ASSERT(g_bserrno == 0);
4379 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4380 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4381 	 * read 0 bytes */
4382 	if (g_use_extent_table) {
4383 		/* Add one more page for EXTENT_PAGE write */
4384 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4385 	} else {
4386 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4387 	}
4388 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4389 
4390 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4391 	poll_threads();
4392 	CU_ASSERT(g_bserrno == 0);
4393 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4394 
4395 	spdk_bs_free_io_channel(channel);
4396 	poll_threads();
4397 
4398 	spdk_blob_close(blob, blob_op_complete, NULL);
4399 	poll_threads();
4400 	CU_ASSERT(g_bserrno == 0);
4401 
4402 	ut_bs_reload(&bs, NULL);
4403 
4404 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4405 	poll_threads();
4406 	CU_ASSERT(g_bserrno == 0);
4407 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4408 	blob = g_blob;
4409 
4410 	channel = spdk_bs_alloc_io_channel(bs);
4411 	CU_ASSERT(channel != NULL);
4412 
4413 	/* Read second cluster after blob reload to confirm data written */
4414 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4415 	poll_threads();
4416 	CU_ASSERT(g_bserrno == 0);
4417 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4418 
4419 	spdk_bs_free_io_channel(channel);
4420 	poll_threads();
4421 
4422 	ut_blob_close_and_delete(bs, blob);
4423 }
4424 
4425 static void
4426 blob_thin_prov_rw_iov(void)
4427 {
4428 	static const uint8_t zero[10 * 4096] = { 0 };
4429 	struct spdk_blob_store *bs = g_bs;
4430 	struct spdk_blob *blob;
4431 	struct spdk_io_channel *channel;
4432 	struct spdk_blob_opts opts;
4433 	uint64_t free_clusters;
4434 	uint8_t payload_read[10 * 4096];
4435 	uint8_t payload_write[10 * 4096];
4436 	struct iovec iov_read[3];
4437 	struct iovec iov_write[3];
4438 
4439 	free_clusters = spdk_bs_free_cluster_count(bs);
4440 
4441 	channel = spdk_bs_alloc_io_channel(bs);
4442 	CU_ASSERT(channel != NULL);
4443 
4444 	ut_spdk_blob_opts_init(&opts);
4445 	opts.thin_provision = true;
4446 
4447 	blob = ut_blob_create_and_open(bs, &opts);
4448 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4449 
4450 	CU_ASSERT(blob->active.num_clusters == 0);
4451 
4452 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4453 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4454 	poll_threads();
4455 	CU_ASSERT(g_bserrno == 0);
4456 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4457 	CU_ASSERT(blob->active.num_clusters == 5);
4458 
4459 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4460 	poll_threads();
4461 	CU_ASSERT(g_bserrno == 0);
4462 	/* Sync must not change anything */
4463 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4464 	CU_ASSERT(blob->active.num_clusters == 5);
4465 
4466 	/* Payload should be all zeros from unallocated clusters */
4467 	memset(payload_read, 0xAA, sizeof(payload_read));
4468 	iov_read[0].iov_base = payload_read;
4469 	iov_read[0].iov_len = 3 * 4096;
4470 	iov_read[1].iov_base = payload_read + 3 * 4096;
4471 	iov_read[1].iov_len = 4 * 4096;
4472 	iov_read[2].iov_base = payload_read + 7 * 4096;
4473 	iov_read[2].iov_len = 3 * 4096;
4474 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4475 	poll_threads();
4476 	CU_ASSERT(g_bserrno == 0);
4477 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4478 
4479 	memset(payload_write, 0xE5, sizeof(payload_write));
4480 	iov_write[0].iov_base = payload_write;
4481 	iov_write[0].iov_len = 1 * 4096;
4482 	iov_write[1].iov_base = payload_write + 1 * 4096;
4483 	iov_write[1].iov_len = 5 * 4096;
4484 	iov_write[2].iov_base = payload_write + 6 * 4096;
4485 	iov_write[2].iov_len = 4 * 4096;
4486 
4487 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4488 	poll_threads();
4489 	CU_ASSERT(g_bserrno == 0);
4490 
4491 	memset(payload_read, 0xAA, sizeof(payload_read));
4492 	iov_read[0].iov_base = payload_read;
4493 	iov_read[0].iov_len = 3 * 4096;
4494 	iov_read[1].iov_base = payload_read + 3 * 4096;
4495 	iov_read[1].iov_len = 4 * 4096;
4496 	iov_read[2].iov_base = payload_read + 7 * 4096;
4497 	iov_read[2].iov_len = 3 * 4096;
4498 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4499 	poll_threads();
4500 	CU_ASSERT(g_bserrno == 0);
4501 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4502 
4503 	spdk_bs_free_io_channel(channel);
4504 	poll_threads();
4505 
4506 	ut_blob_close_and_delete(bs, blob);
4507 }
4508 
4509 struct iter_ctx {
4510 	int		current_iter;
4511 	spdk_blob_id	blobid[4];
4512 };
4513 
4514 static void
4515 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4516 {
4517 	struct iter_ctx *iter_ctx = arg;
4518 	spdk_blob_id blobid;
4519 
4520 	CU_ASSERT(bserrno == 0);
4521 	blobid = spdk_blob_get_id(blob);
4522 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4523 }
4524 
4525 static void
4526 bs_load_iter_test(void)
4527 {
4528 	struct spdk_blob_store *bs;
4529 	struct spdk_bs_dev *dev;
4530 	struct iter_ctx iter_ctx = { 0 };
4531 	struct spdk_blob *blob;
4532 	int i, rc;
4533 	struct spdk_bs_opts opts;
4534 
4535 	dev = init_dev();
4536 	spdk_bs_opts_init(&opts, sizeof(opts));
4537 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4538 
4539 	/* Initialize a new blob store */
4540 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4541 	poll_threads();
4542 	CU_ASSERT(g_bserrno == 0);
4543 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4544 	bs = g_bs;
4545 
4546 	for (i = 0; i < 4; i++) {
4547 		blob = ut_blob_create_and_open(bs, NULL);
4548 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4549 
4550 		/* Just save the blobid as an xattr for testing purposes. */
4551 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4552 		CU_ASSERT(rc == 0);
4553 
4554 		/* Resize the blob */
4555 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4556 		poll_threads();
4557 		CU_ASSERT(g_bserrno == 0);
4558 
4559 		spdk_blob_close(blob, blob_op_complete, NULL);
4560 		poll_threads();
4561 		CU_ASSERT(g_bserrno == 0);
4562 	}
4563 
4564 	g_bserrno = -1;
4565 	spdk_bs_unload(bs, bs_op_complete, NULL);
4566 	poll_threads();
4567 	CU_ASSERT(g_bserrno == 0);
4568 
4569 	dev = init_dev();
4570 	spdk_bs_opts_init(&opts, sizeof(opts));
4571 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4572 	opts.iter_cb_fn = test_iter;
4573 	opts.iter_cb_arg = &iter_ctx;
4574 
4575 	/* Test blob iteration during load after a clean shutdown. */
4576 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4577 	poll_threads();
4578 	CU_ASSERT(g_bserrno == 0);
4579 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4580 	bs = g_bs;
4581 
4582 	/* Dirty shutdown */
4583 	bs_free(bs);
4584 
4585 	dev = init_dev();
4586 	spdk_bs_opts_init(&opts, sizeof(opts));
4587 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4588 	opts.iter_cb_fn = test_iter;
4589 	iter_ctx.current_iter = 0;
4590 	opts.iter_cb_arg = &iter_ctx;
4591 
4592 	/* Test blob iteration during load after a dirty shutdown. */
4593 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4594 	poll_threads();
4595 	CU_ASSERT(g_bserrno == 0);
4596 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4597 	bs = g_bs;
4598 
4599 	spdk_bs_unload(bs, bs_op_complete, NULL);
4600 	poll_threads();
4601 	CU_ASSERT(g_bserrno == 0);
4602 	g_bs = NULL;
4603 }
4604 
4605 static void
4606 blob_snapshot_rw(void)
4607 {
4608 	static const uint8_t zero[10 * 4096] = { 0 };
4609 	struct spdk_blob_store *bs = g_bs;
4610 	struct spdk_blob *blob, *snapshot;
4611 	struct spdk_io_channel *channel;
4612 	struct spdk_blob_opts opts;
4613 	spdk_blob_id blobid, snapshotid;
4614 	uint64_t free_clusters;
4615 	uint64_t cluster_size;
4616 	uint64_t page_size;
4617 	uint8_t payload_read[10 * 4096];
4618 	uint8_t payload_write[10 * 4096];
4619 	uint64_t write_bytes_start;
4620 	uint64_t read_bytes_start;
4621 	uint64_t copy_bytes_start;
4622 	uint64_t write_bytes;
4623 	uint64_t read_bytes;
4624 	uint64_t copy_bytes;
4625 
4626 	free_clusters = spdk_bs_free_cluster_count(bs);
4627 	cluster_size = spdk_bs_get_cluster_size(bs);
4628 	page_size = spdk_bs_get_page_size(bs);
4629 
4630 	channel = spdk_bs_alloc_io_channel(bs);
4631 	CU_ASSERT(channel != NULL);
4632 
4633 	ut_spdk_blob_opts_init(&opts);
4634 	opts.thin_provision = true;
4635 	opts.num_clusters = 5;
4636 
4637 	blob = ut_blob_create_and_open(bs, &opts);
4638 	blobid = spdk_blob_get_id(blob);
4639 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4640 
4641 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4642 
4643 	memset(payload_read, 0xFF, sizeof(payload_read));
4644 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4645 	poll_threads();
4646 	CU_ASSERT(g_bserrno == 0);
4647 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4648 
4649 	memset(payload_write, 0xE5, sizeof(payload_write));
4650 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4651 	poll_threads();
4652 	CU_ASSERT(g_bserrno == 0);
4653 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4654 
4655 	/* Create snapshot from blob */
4656 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4657 	poll_threads();
4658 	CU_ASSERT(g_bserrno == 0);
4659 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4660 	snapshotid = g_blobid;
4661 
4662 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4663 	poll_threads();
4664 	CU_ASSERT(g_bserrno == 0);
4665 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4666 	snapshot = g_blob;
4667 	CU_ASSERT(snapshot->data_ro == true);
4668 	CU_ASSERT(snapshot->md_ro == true);
4669 
4670 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4671 
4672 	write_bytes_start = g_dev_write_bytes;
4673 	read_bytes_start = g_dev_read_bytes;
4674 	copy_bytes_start = g_dev_copy_bytes;
4675 
4676 	memset(payload_write, 0xAA, sizeof(payload_write));
4677 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4678 	poll_threads();
4679 	CU_ASSERT(g_bserrno == 0);
4680 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4681 
4682 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4683 	 * and then write 10 pages of payload.
4684 	 */
4685 	write_bytes = g_dev_write_bytes - write_bytes_start;
4686 	read_bytes = g_dev_read_bytes - read_bytes_start;
4687 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
4688 	if (g_dev_copy_enabled) {
4689 		CU_ASSERT(copy_bytes == cluster_size);
4690 	} else {
4691 		CU_ASSERT(copy_bytes == 0);
4692 	}
4693 	if (g_use_extent_table) {
4694 		/* Add one more page for EXTENT_PAGE write */
4695 		CU_ASSERT(write_bytes + copy_bytes == page_size * 12 + cluster_size);
4696 	} else {
4697 		CU_ASSERT(write_bytes + copy_bytes == page_size * 11 + cluster_size);
4698 	}
4699 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
4700 
4701 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4702 	poll_threads();
4703 	CU_ASSERT(g_bserrno == 0);
4704 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4705 
4706 	/* Data on snapshot should not change after write to clone */
4707 	memset(payload_write, 0xE5, sizeof(payload_write));
4708 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4709 	poll_threads();
4710 	CU_ASSERT(g_bserrno == 0);
4711 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4712 
4713 	ut_blob_close_and_delete(bs, blob);
4714 	ut_blob_close_and_delete(bs, snapshot);
4715 
4716 	spdk_bs_free_io_channel(channel);
4717 	poll_threads();
4718 	g_blob = NULL;
4719 	g_blobid = 0;
4720 }
4721 
4722 static void
4723 blob_snapshot_rw_iov(void)
4724 {
4725 	static const uint8_t zero[10 * 4096] = { 0 };
4726 	struct spdk_blob_store *bs = g_bs;
4727 	struct spdk_blob *blob, *snapshot;
4728 	struct spdk_io_channel *channel;
4729 	struct spdk_blob_opts opts;
4730 	spdk_blob_id blobid, snapshotid;
4731 	uint64_t free_clusters;
4732 	uint8_t payload_read[10 * 4096];
4733 	uint8_t payload_write[10 * 4096];
4734 	struct iovec iov_read[3];
4735 	struct iovec iov_write[3];
4736 
4737 	free_clusters = spdk_bs_free_cluster_count(bs);
4738 
4739 	channel = spdk_bs_alloc_io_channel(bs);
4740 	CU_ASSERT(channel != NULL);
4741 
4742 	ut_spdk_blob_opts_init(&opts);
4743 	opts.thin_provision = true;
4744 	opts.num_clusters = 5;
4745 
4746 	blob = ut_blob_create_and_open(bs, &opts);
4747 	blobid = spdk_blob_get_id(blob);
4748 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4749 
4750 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4751 
4752 	/* Create snapshot from blob */
4753 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4754 	poll_threads();
4755 	CU_ASSERT(g_bserrno == 0);
4756 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4757 	snapshotid = g_blobid;
4758 
4759 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4760 	poll_threads();
4761 	CU_ASSERT(g_bserrno == 0);
4762 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4763 	snapshot = g_blob;
4764 	CU_ASSERT(snapshot->data_ro == true);
4765 	CU_ASSERT(snapshot->md_ro == true);
4766 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4767 
4768 	/* Payload should be all zeros from unallocated clusters */
4769 	memset(payload_read, 0xAA, sizeof(payload_read));
4770 	iov_read[0].iov_base = payload_read;
4771 	iov_read[0].iov_len = 3 * 4096;
4772 	iov_read[1].iov_base = payload_read + 3 * 4096;
4773 	iov_read[1].iov_len = 4 * 4096;
4774 	iov_read[2].iov_base = payload_read + 7 * 4096;
4775 	iov_read[2].iov_len = 3 * 4096;
4776 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4777 	poll_threads();
4778 	CU_ASSERT(g_bserrno == 0);
4779 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4780 
4781 	memset(payload_write, 0xE5, sizeof(payload_write));
4782 	iov_write[0].iov_base = payload_write;
4783 	iov_write[0].iov_len = 1 * 4096;
4784 	iov_write[1].iov_base = payload_write + 1 * 4096;
4785 	iov_write[1].iov_len = 5 * 4096;
4786 	iov_write[2].iov_base = payload_write + 6 * 4096;
4787 	iov_write[2].iov_len = 4 * 4096;
4788 
4789 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4790 	poll_threads();
4791 	CU_ASSERT(g_bserrno == 0);
4792 
4793 	memset(payload_read, 0xAA, sizeof(payload_read));
4794 	iov_read[0].iov_base = payload_read;
4795 	iov_read[0].iov_len = 3 * 4096;
4796 	iov_read[1].iov_base = payload_read + 3 * 4096;
4797 	iov_read[1].iov_len = 4 * 4096;
4798 	iov_read[2].iov_base = payload_read + 7 * 4096;
4799 	iov_read[2].iov_len = 3 * 4096;
4800 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4801 	poll_threads();
4802 	CU_ASSERT(g_bserrno == 0);
4803 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4804 
4805 	spdk_bs_free_io_channel(channel);
4806 	poll_threads();
4807 
4808 	ut_blob_close_and_delete(bs, blob);
4809 	ut_blob_close_and_delete(bs, snapshot);
4810 }
4811 
4812 /**
4813  * Inflate / decouple parent rw unit tests.
4814  *
4815  * --------------
4816  * original blob:         0         1         2         3         4
4817  *                   ,---------+---------+---------+---------+---------.
4818  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4819  *                   +---------+---------+---------+---------+---------+
4820  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4821  *                   +---------+---------+---------+---------+---------+
4822  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4823  *                   '---------+---------+---------+---------+---------'
4824  *                   .         .         .         .         .         .
4825  * --------          .         .         .         .         .         .
4826  * inflate:          .         .         .         .         .         .
4827  *                   ,---------+---------+---------+---------+---------.
4828  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4829  *                   '---------+---------+---------+---------+---------'
4830  *
4831  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4832  *               on snapshot2 and snapshot removed .         .         .
4833  *                   .         .         .         .         .         .
4834  * ----------------  .         .         .         .         .         .
4835  * decouple parent:  .         .         .         .         .         .
4836  *                   ,---------+---------+---------+---------+---------.
4837  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4838  *                   +---------+---------+---------+---------+---------+
4839  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4840  *                   '---------+---------+---------+---------+---------'
4841  *
4842  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4843  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4844  *               should remain a clone of snapshot.
4845  */
4846 static void
4847 _blob_inflate_rw(bool decouple_parent)
4848 {
4849 	struct spdk_blob_store *bs = g_bs;
4850 	struct spdk_blob *blob, *snapshot, *snapshot2;
4851 	struct spdk_io_channel *channel;
4852 	struct spdk_blob_opts opts;
4853 	spdk_blob_id blobid, snapshotid, snapshot2id;
4854 	uint64_t free_clusters;
4855 	uint64_t cluster_size;
4856 
4857 	uint64_t payload_size;
4858 	uint8_t *payload_read;
4859 	uint8_t *payload_write;
4860 	uint8_t *payload_clone;
4861 
4862 	uint64_t pages_per_cluster;
4863 	uint64_t pages_per_payload;
4864 
4865 	int i;
4866 	spdk_blob_id ids[2];
4867 	size_t count;
4868 
4869 	free_clusters = spdk_bs_free_cluster_count(bs);
4870 	cluster_size = spdk_bs_get_cluster_size(bs);
4871 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4872 	pages_per_payload = pages_per_cluster * 5;
4873 
4874 	payload_size = cluster_size * 5;
4875 
4876 	payload_read = malloc(payload_size);
4877 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4878 
4879 	payload_write = malloc(payload_size);
4880 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4881 
4882 	payload_clone = malloc(payload_size);
4883 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4884 
4885 	channel = spdk_bs_alloc_io_channel(bs);
4886 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4887 
4888 	/* Create blob */
4889 	ut_spdk_blob_opts_init(&opts);
4890 	opts.thin_provision = true;
4891 	opts.num_clusters = 5;
4892 
4893 	blob = ut_blob_create_and_open(bs, &opts);
4894 	blobid = spdk_blob_get_id(blob);
4895 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4896 
4897 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4898 
4899 	/* 1) Initial read should return zeroed payload */
4900 	memset(payload_read, 0xFF, payload_size);
4901 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4902 			  blob_op_complete, NULL);
4903 	poll_threads();
4904 	CU_ASSERT(g_bserrno == 0);
4905 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4906 
4907 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4908 	 * isn't allocated) */
4909 	memset(payload_write, 0xE5, payload_size - cluster_size);
4910 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4911 			   pages_per_cluster, blob_op_complete, NULL);
4912 	poll_threads();
4913 	CU_ASSERT(g_bserrno == 0);
4914 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4915 
4916 	/* 2) Create snapshot from blob (first level) */
4917 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4918 	poll_threads();
4919 	CU_ASSERT(g_bserrno == 0);
4920 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4921 	snapshotid = g_blobid;
4922 
4923 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4924 	poll_threads();
4925 	CU_ASSERT(g_bserrno == 0);
4926 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4927 	snapshot = g_blob;
4928 	CU_ASSERT(snapshot->data_ro == true);
4929 	CU_ASSERT(snapshot->md_ro == true);
4930 
4931 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4932 
4933 	/* Write every second cluster with a pattern.
4934 	 *
4935 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4936 	 * doesn't allocate it.
4937 	 *
4938 	 * payload_clone stores expected result on "blob" read at the time and
4939 	 * is used only to check data consistency on clone before and after
4940 	 * inflation. Initially we fill it with a backing snapshots pattern
4941 	 * used before.
4942 	 */
4943 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4944 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4945 	memset(payload_write, 0xAA, payload_size);
4946 	for (i = 1; i < 5; i += 2) {
4947 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4948 				   pages_per_cluster, blob_op_complete, NULL);
4949 		poll_threads();
4950 		CU_ASSERT(g_bserrno == 0);
4951 
4952 		/* Update expected result */
4953 		memcpy(payload_clone + (cluster_size * i), payload_write,
4954 		       cluster_size);
4955 	}
4956 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4957 
4958 	/* Check data consistency on clone */
4959 	memset(payload_read, 0xFF, payload_size);
4960 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4961 			  blob_op_complete, NULL);
4962 	poll_threads();
4963 	CU_ASSERT(g_bserrno == 0);
4964 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4965 
4966 	/* 3) Create second levels snapshot from blob */
4967 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4968 	poll_threads();
4969 	CU_ASSERT(g_bserrno == 0);
4970 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4971 	snapshot2id = g_blobid;
4972 
4973 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4974 	poll_threads();
4975 	CU_ASSERT(g_bserrno == 0);
4976 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4977 	snapshot2 = g_blob;
4978 	CU_ASSERT(snapshot2->data_ro == true);
4979 	CU_ASSERT(snapshot2->md_ro == true);
4980 
4981 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4982 
4983 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4984 
4985 	/* Write one cluster on the top level blob. This cluster (1) covers
4986 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4987 	 * at all */
4988 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4989 			   pages_per_cluster, blob_op_complete, NULL);
4990 	poll_threads();
4991 	CU_ASSERT(g_bserrno == 0);
4992 
4993 	/* Update expected result */
4994 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4995 
4996 	/* Check data consistency on clone */
4997 	memset(payload_read, 0xFF, payload_size);
4998 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4999 			  blob_op_complete, NULL);
5000 	poll_threads();
5001 	CU_ASSERT(g_bserrno == 0);
5002 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5003 
5004 
5005 	/* Close all blobs */
5006 	spdk_blob_close(blob, blob_op_complete, NULL);
5007 	poll_threads();
5008 	CU_ASSERT(g_bserrno == 0);
5009 
5010 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5011 	poll_threads();
5012 	CU_ASSERT(g_bserrno == 0);
5013 
5014 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5015 	poll_threads();
5016 	CU_ASSERT(g_bserrno == 0);
5017 
5018 	/* Check snapshot-clone relations */
5019 	count = 2;
5020 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5021 	CU_ASSERT(count == 1);
5022 	CU_ASSERT(ids[0] == snapshot2id);
5023 
5024 	count = 2;
5025 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5026 	CU_ASSERT(count == 1);
5027 	CU_ASSERT(ids[0] == blobid);
5028 
5029 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
5030 
5031 	free_clusters = spdk_bs_free_cluster_count(bs);
5032 	if (!decouple_parent) {
5033 		/* Do full blob inflation */
5034 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
5035 		poll_threads();
5036 		CU_ASSERT(g_bserrno == 0);
5037 
5038 		/* All clusters should be inflated (except one already allocated
5039 		 * in a top level blob) */
5040 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5041 
5042 		/* Check if relation tree updated correctly */
5043 		count = 2;
5044 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5045 
5046 		/* snapshotid have one clone */
5047 		CU_ASSERT(count == 1);
5048 		CU_ASSERT(ids[0] == snapshot2id);
5049 
5050 		/* snapshot2id have no clones */
5051 		count = 2;
5052 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5053 		CU_ASSERT(count == 0);
5054 
5055 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5056 	} else {
5057 		/* Decouple parent of blob */
5058 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5059 		poll_threads();
5060 		CU_ASSERT(g_bserrno == 0);
5061 
5062 		/* Only one cluster from a parent should be inflated (second one
5063 		 * is covered by a cluster written on a top level blob, and
5064 		 * already allocated) */
5065 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5066 
5067 		/* Check if relation tree updated correctly */
5068 		count = 2;
5069 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5070 
5071 		/* snapshotid have two clones now */
5072 		CU_ASSERT(count == 2);
5073 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5074 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5075 
5076 		/* snapshot2id have no clones */
5077 		count = 2;
5078 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5079 		CU_ASSERT(count == 0);
5080 
5081 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5082 	}
5083 
5084 	/* Try to delete snapshot2 (should pass) */
5085 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5086 	poll_threads();
5087 	CU_ASSERT(g_bserrno == 0);
5088 
5089 	/* Try to delete base snapshot */
5090 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5091 	poll_threads();
5092 	CU_ASSERT(g_bserrno == 0);
5093 
5094 	/* Reopen blob after snapshot deletion */
5095 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5096 	poll_threads();
5097 	CU_ASSERT(g_bserrno == 0);
5098 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5099 	blob = g_blob;
5100 
5101 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5102 
5103 	/* Check data consistency on inflated blob */
5104 	memset(payload_read, 0xFF, payload_size);
5105 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5106 			  blob_op_complete, NULL);
5107 	poll_threads();
5108 	CU_ASSERT(g_bserrno == 0);
5109 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5110 
5111 	spdk_bs_free_io_channel(channel);
5112 	poll_threads();
5113 
5114 	free(payload_read);
5115 	free(payload_write);
5116 	free(payload_clone);
5117 
5118 	ut_blob_close_and_delete(bs, blob);
5119 }
5120 
5121 static void
5122 blob_inflate_rw(void)
5123 {
5124 	_blob_inflate_rw(false);
5125 	_blob_inflate_rw(true);
5126 }
5127 
5128 /**
5129  * Snapshot-clones relation test
5130  *
5131  *         snapshot
5132  *            |
5133  *      +-----+-----+
5134  *      |           |
5135  *   blob(ro)   snapshot2
5136  *      |           |
5137  *   clone2      clone
5138  */
5139 static void
5140 blob_relations(void)
5141 {
5142 	struct spdk_blob_store *bs;
5143 	struct spdk_bs_dev *dev;
5144 	struct spdk_bs_opts bs_opts;
5145 	struct spdk_blob_opts opts;
5146 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5147 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5148 	int rc;
5149 	size_t count;
5150 	spdk_blob_id ids[10] = {};
5151 
5152 	dev = init_dev();
5153 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5154 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5155 
5156 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5157 	poll_threads();
5158 	CU_ASSERT(g_bserrno == 0);
5159 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5160 	bs = g_bs;
5161 
5162 	/* 1. Create blob with 10 clusters */
5163 
5164 	ut_spdk_blob_opts_init(&opts);
5165 	opts.num_clusters = 10;
5166 
5167 	blob = ut_blob_create_and_open(bs, &opts);
5168 	blobid = spdk_blob_get_id(blob);
5169 
5170 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5171 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5172 	CU_ASSERT(!spdk_blob_is_clone(blob));
5173 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5174 
5175 	/* blob should not have underlying snapshot nor clones */
5176 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5177 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5178 	count = SPDK_COUNTOF(ids);
5179 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5180 	CU_ASSERT(rc == 0);
5181 	CU_ASSERT(count == 0);
5182 
5183 
5184 	/* 2. Create snapshot */
5185 
5186 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5187 	poll_threads();
5188 	CU_ASSERT(g_bserrno == 0);
5189 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5190 	snapshotid = g_blobid;
5191 
5192 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5193 	poll_threads();
5194 	CU_ASSERT(g_bserrno == 0);
5195 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5196 	snapshot = g_blob;
5197 
5198 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5199 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5200 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5201 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5202 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5203 
5204 	/* Check if original blob is converted to the clone of snapshot */
5205 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5206 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5207 	CU_ASSERT(spdk_blob_is_clone(blob));
5208 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5209 	CU_ASSERT(blob->parent_id == snapshotid);
5210 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5211 
5212 	count = SPDK_COUNTOF(ids);
5213 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5214 	CU_ASSERT(rc == 0);
5215 	CU_ASSERT(count == 1);
5216 	CU_ASSERT(ids[0] == blobid);
5217 
5218 
5219 	/* 3. Create clone from snapshot */
5220 
5221 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5222 	poll_threads();
5223 	CU_ASSERT(g_bserrno == 0);
5224 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5225 	cloneid = g_blobid;
5226 
5227 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5228 	poll_threads();
5229 	CU_ASSERT(g_bserrno == 0);
5230 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5231 	clone = g_blob;
5232 
5233 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5234 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5235 	CU_ASSERT(spdk_blob_is_clone(clone));
5236 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5237 	CU_ASSERT(clone->parent_id == snapshotid);
5238 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5239 
5240 	count = SPDK_COUNTOF(ids);
5241 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5242 	CU_ASSERT(rc == 0);
5243 	CU_ASSERT(count == 0);
5244 
5245 	/* Check if clone is on the snapshot's list */
5246 	count = SPDK_COUNTOF(ids);
5247 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5248 	CU_ASSERT(rc == 0);
5249 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5250 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5251 
5252 
5253 	/* 4. Create snapshot of the clone */
5254 
5255 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5256 	poll_threads();
5257 	CU_ASSERT(g_bserrno == 0);
5258 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5259 	snapshotid2 = g_blobid;
5260 
5261 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5262 	poll_threads();
5263 	CU_ASSERT(g_bserrno == 0);
5264 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5265 	snapshot2 = g_blob;
5266 
5267 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5268 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5269 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5270 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5271 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5272 
5273 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5274 	 * is a child of snapshot */
5275 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5276 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5277 	CU_ASSERT(spdk_blob_is_clone(clone));
5278 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5279 	CU_ASSERT(clone->parent_id == snapshotid2);
5280 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5281 
5282 	count = SPDK_COUNTOF(ids);
5283 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5284 	CU_ASSERT(rc == 0);
5285 	CU_ASSERT(count == 1);
5286 	CU_ASSERT(ids[0] == cloneid);
5287 
5288 
5289 	/* 5. Try to create clone from read only blob */
5290 
5291 	/* Mark blob as read only */
5292 	spdk_blob_set_read_only(blob);
5293 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5294 	poll_threads();
5295 	CU_ASSERT(g_bserrno == 0);
5296 
5297 	/* Check if previously created blob is read only clone */
5298 	CU_ASSERT(spdk_blob_is_read_only(blob));
5299 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5300 	CU_ASSERT(spdk_blob_is_clone(blob));
5301 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5302 
5303 	/* Create clone from read only blob */
5304 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5305 	poll_threads();
5306 	CU_ASSERT(g_bserrno == 0);
5307 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5308 	cloneid2 = g_blobid;
5309 
5310 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5311 	poll_threads();
5312 	CU_ASSERT(g_bserrno == 0);
5313 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5314 	clone2 = g_blob;
5315 
5316 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5317 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5318 	CU_ASSERT(spdk_blob_is_clone(clone2));
5319 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5320 
5321 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5322 
5323 	count = SPDK_COUNTOF(ids);
5324 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5325 	CU_ASSERT(rc == 0);
5326 
5327 	CU_ASSERT(count == 1);
5328 	CU_ASSERT(ids[0] == cloneid2);
5329 
5330 	/* Close blobs */
5331 
5332 	spdk_blob_close(clone2, blob_op_complete, NULL);
5333 	poll_threads();
5334 	CU_ASSERT(g_bserrno == 0);
5335 
5336 	spdk_blob_close(blob, blob_op_complete, NULL);
5337 	poll_threads();
5338 	CU_ASSERT(g_bserrno == 0);
5339 
5340 	spdk_blob_close(clone, blob_op_complete, NULL);
5341 	poll_threads();
5342 	CU_ASSERT(g_bserrno == 0);
5343 
5344 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5345 	poll_threads();
5346 	CU_ASSERT(g_bserrno == 0);
5347 
5348 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5349 	poll_threads();
5350 	CU_ASSERT(g_bserrno == 0);
5351 
5352 	/* Try to delete snapshot with more than 1 clone */
5353 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5354 	poll_threads();
5355 	CU_ASSERT(g_bserrno != 0);
5356 
5357 	ut_bs_reload(&bs, &bs_opts);
5358 
5359 	/* NULL ids array should return number of clones in count */
5360 	count = SPDK_COUNTOF(ids);
5361 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5362 	CU_ASSERT(rc == -ENOMEM);
5363 	CU_ASSERT(count == 2);
5364 
5365 	/* incorrect array size */
5366 	count = 1;
5367 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5368 	CU_ASSERT(rc == -ENOMEM);
5369 	CU_ASSERT(count == 2);
5370 
5371 
5372 	/* Verify structure of loaded blob store */
5373 
5374 	/* snapshot */
5375 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5376 
5377 	count = SPDK_COUNTOF(ids);
5378 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5379 	CU_ASSERT(rc == 0);
5380 	CU_ASSERT(count == 2);
5381 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5382 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5383 
5384 	/* blob */
5385 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5386 	count = SPDK_COUNTOF(ids);
5387 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5388 	CU_ASSERT(rc == 0);
5389 	CU_ASSERT(count == 1);
5390 	CU_ASSERT(ids[0] == cloneid2);
5391 
5392 	/* clone */
5393 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5394 	count = SPDK_COUNTOF(ids);
5395 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5396 	CU_ASSERT(rc == 0);
5397 	CU_ASSERT(count == 0);
5398 
5399 	/* snapshot2 */
5400 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5401 	count = SPDK_COUNTOF(ids);
5402 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5403 	CU_ASSERT(rc == 0);
5404 	CU_ASSERT(count == 1);
5405 	CU_ASSERT(ids[0] == cloneid);
5406 
5407 	/* clone2 */
5408 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5409 	count = SPDK_COUNTOF(ids);
5410 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5411 	CU_ASSERT(rc == 0);
5412 	CU_ASSERT(count == 0);
5413 
5414 	/* Try to delete blob that user should not be able to remove */
5415 
5416 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5417 	poll_threads();
5418 	CU_ASSERT(g_bserrno != 0);
5419 
5420 	/* Remove all blobs */
5421 
5422 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5423 	poll_threads();
5424 	CU_ASSERT(g_bserrno == 0);
5425 
5426 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5427 	poll_threads();
5428 	CU_ASSERT(g_bserrno == 0);
5429 
5430 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5431 	poll_threads();
5432 	CU_ASSERT(g_bserrno == 0);
5433 
5434 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5435 	poll_threads();
5436 	CU_ASSERT(g_bserrno == 0);
5437 
5438 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5439 	poll_threads();
5440 	CU_ASSERT(g_bserrno == 0);
5441 
5442 	spdk_bs_unload(bs, bs_op_complete, NULL);
5443 	poll_threads();
5444 	CU_ASSERT(g_bserrno == 0);
5445 
5446 	g_bs = NULL;
5447 }
5448 
5449 /**
5450  * Snapshot-clones relation test 2
5451  *
5452  *         snapshot1
5453  *            |
5454  *         snapshot2
5455  *            |
5456  *      +-----+-----+
5457  *      |           |
5458  *   blob(ro)   snapshot3
5459  *      |           |
5460  *      |       snapshot4
5461  *      |        |     |
5462  *   clone2   clone  clone3
5463  */
5464 static void
5465 blob_relations2(void)
5466 {
5467 	struct spdk_blob_store *bs;
5468 	struct spdk_bs_dev *dev;
5469 	struct spdk_bs_opts bs_opts;
5470 	struct spdk_blob_opts opts;
5471 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5472 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5473 		     cloneid3;
5474 	int rc;
5475 	size_t count;
5476 	spdk_blob_id ids[10] = {};
5477 
5478 	dev = init_dev();
5479 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5480 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5481 
5482 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5483 	poll_threads();
5484 	CU_ASSERT(g_bserrno == 0);
5485 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5486 	bs = g_bs;
5487 
5488 	/* 1. Create blob with 10 clusters */
5489 
5490 	ut_spdk_blob_opts_init(&opts);
5491 	opts.num_clusters = 10;
5492 
5493 	blob = ut_blob_create_and_open(bs, &opts);
5494 	blobid = spdk_blob_get_id(blob);
5495 
5496 	/* 2. Create snapshot1 */
5497 
5498 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5499 	poll_threads();
5500 	CU_ASSERT(g_bserrno == 0);
5501 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5502 	snapshotid1 = g_blobid;
5503 
5504 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5505 	poll_threads();
5506 	CU_ASSERT(g_bserrno == 0);
5507 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5508 	snapshot1 = g_blob;
5509 
5510 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5511 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5512 
5513 	CU_ASSERT(blob->parent_id == snapshotid1);
5514 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5515 
5516 	/* Check if blob is the clone of snapshot1 */
5517 	CU_ASSERT(blob->parent_id == snapshotid1);
5518 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5519 
5520 	count = SPDK_COUNTOF(ids);
5521 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5522 	CU_ASSERT(rc == 0);
5523 	CU_ASSERT(count == 1);
5524 	CU_ASSERT(ids[0] == blobid);
5525 
5526 	/* 3. Create another snapshot */
5527 
5528 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5529 	poll_threads();
5530 	CU_ASSERT(g_bserrno == 0);
5531 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5532 	snapshotid2 = g_blobid;
5533 
5534 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5535 	poll_threads();
5536 	CU_ASSERT(g_bserrno == 0);
5537 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5538 	snapshot2 = g_blob;
5539 
5540 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5541 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5542 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5543 
5544 	/* Check if snapshot2 is the clone of snapshot1 and blob
5545 	 * is a child of snapshot2 */
5546 	CU_ASSERT(blob->parent_id == snapshotid2);
5547 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5548 
5549 	count = SPDK_COUNTOF(ids);
5550 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5551 	CU_ASSERT(rc == 0);
5552 	CU_ASSERT(count == 1);
5553 	CU_ASSERT(ids[0] == blobid);
5554 
5555 	/* 4. Create clone from snapshot */
5556 
5557 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5558 	poll_threads();
5559 	CU_ASSERT(g_bserrno == 0);
5560 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5561 	cloneid = g_blobid;
5562 
5563 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5564 	poll_threads();
5565 	CU_ASSERT(g_bserrno == 0);
5566 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5567 	clone = g_blob;
5568 
5569 	CU_ASSERT(clone->parent_id == snapshotid2);
5570 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5571 
5572 	/* Check if clone is on the snapshot's list */
5573 	count = SPDK_COUNTOF(ids);
5574 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5575 	CU_ASSERT(rc == 0);
5576 	CU_ASSERT(count == 2);
5577 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5578 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5579 
5580 	/* 5. Create snapshot of the clone */
5581 
5582 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5583 	poll_threads();
5584 	CU_ASSERT(g_bserrno == 0);
5585 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5586 	snapshotid3 = g_blobid;
5587 
5588 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5589 	poll_threads();
5590 	CU_ASSERT(g_bserrno == 0);
5591 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5592 	snapshot3 = g_blob;
5593 
5594 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5595 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5596 
5597 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5598 	 * is a child of snapshot2 */
5599 	CU_ASSERT(clone->parent_id == snapshotid3);
5600 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5601 
5602 	count = SPDK_COUNTOF(ids);
5603 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5604 	CU_ASSERT(rc == 0);
5605 	CU_ASSERT(count == 1);
5606 	CU_ASSERT(ids[0] == cloneid);
5607 
5608 	/* 6. Create another snapshot of the clone */
5609 
5610 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5611 	poll_threads();
5612 	CU_ASSERT(g_bserrno == 0);
5613 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5614 	snapshotid4 = g_blobid;
5615 
5616 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5617 	poll_threads();
5618 	CU_ASSERT(g_bserrno == 0);
5619 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5620 	snapshot4 = g_blob;
5621 
5622 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5623 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5624 
5625 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5626 	 * is a child of snapshot3 */
5627 	CU_ASSERT(clone->parent_id == snapshotid4);
5628 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5629 
5630 	count = SPDK_COUNTOF(ids);
5631 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5632 	CU_ASSERT(rc == 0);
5633 	CU_ASSERT(count == 1);
5634 	CU_ASSERT(ids[0] == cloneid);
5635 
5636 	/* 7. Remove snapshot 4 */
5637 
5638 	ut_blob_close_and_delete(bs, snapshot4);
5639 
5640 	/* Check if relations are back to state from before creating snapshot 4 */
5641 	CU_ASSERT(clone->parent_id == snapshotid3);
5642 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5643 
5644 	count = SPDK_COUNTOF(ids);
5645 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5646 	CU_ASSERT(rc == 0);
5647 	CU_ASSERT(count == 1);
5648 	CU_ASSERT(ids[0] == cloneid);
5649 
5650 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5651 
5652 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5653 	poll_threads();
5654 	CU_ASSERT(g_bserrno == 0);
5655 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5656 	cloneid3 = g_blobid;
5657 
5658 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5659 	poll_threads();
5660 	CU_ASSERT(g_bserrno != 0);
5661 
5662 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5663 
5664 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5665 	poll_threads();
5666 	CU_ASSERT(g_bserrno == 0);
5667 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5668 	snapshot3 = g_blob;
5669 
5670 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5671 	poll_threads();
5672 	CU_ASSERT(g_bserrno != 0);
5673 
5674 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5675 	poll_threads();
5676 	CU_ASSERT(g_bserrno == 0);
5677 
5678 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5679 	poll_threads();
5680 	CU_ASSERT(g_bserrno == 0);
5681 
5682 	/* 10. Remove snapshot 1 */
5683 
5684 	ut_blob_close_and_delete(bs, snapshot1);
5685 
5686 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5687 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5688 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5689 
5690 	count = SPDK_COUNTOF(ids);
5691 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5692 	CU_ASSERT(rc == 0);
5693 	CU_ASSERT(count == 2);
5694 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5695 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5696 
5697 	/* 11. Try to create clone from read only blob */
5698 
5699 	/* Mark blob as read only */
5700 	spdk_blob_set_read_only(blob);
5701 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5702 	poll_threads();
5703 	CU_ASSERT(g_bserrno == 0);
5704 
5705 	/* Create clone from read only blob */
5706 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5707 	poll_threads();
5708 	CU_ASSERT(g_bserrno == 0);
5709 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5710 	cloneid2 = g_blobid;
5711 
5712 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5713 	poll_threads();
5714 	CU_ASSERT(g_bserrno == 0);
5715 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5716 	clone2 = g_blob;
5717 
5718 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5719 
5720 	count = SPDK_COUNTOF(ids);
5721 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5722 	CU_ASSERT(rc == 0);
5723 	CU_ASSERT(count == 1);
5724 	CU_ASSERT(ids[0] == cloneid2);
5725 
5726 	/* Close blobs */
5727 
5728 	spdk_blob_close(clone2, blob_op_complete, NULL);
5729 	poll_threads();
5730 	CU_ASSERT(g_bserrno == 0);
5731 
5732 	spdk_blob_close(blob, blob_op_complete, NULL);
5733 	poll_threads();
5734 	CU_ASSERT(g_bserrno == 0);
5735 
5736 	spdk_blob_close(clone, blob_op_complete, NULL);
5737 	poll_threads();
5738 	CU_ASSERT(g_bserrno == 0);
5739 
5740 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5741 	poll_threads();
5742 	CU_ASSERT(g_bserrno == 0);
5743 
5744 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5745 	poll_threads();
5746 	CU_ASSERT(g_bserrno == 0);
5747 
5748 	ut_bs_reload(&bs, &bs_opts);
5749 
5750 	/* Verify structure of loaded blob store */
5751 
5752 	/* snapshot2 */
5753 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5754 
5755 	count = SPDK_COUNTOF(ids);
5756 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5757 	CU_ASSERT(rc == 0);
5758 	CU_ASSERT(count == 2);
5759 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5760 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5761 
5762 	/* blob */
5763 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5764 	count = SPDK_COUNTOF(ids);
5765 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5766 	CU_ASSERT(rc == 0);
5767 	CU_ASSERT(count == 1);
5768 	CU_ASSERT(ids[0] == cloneid2);
5769 
5770 	/* clone */
5771 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5772 	count = SPDK_COUNTOF(ids);
5773 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5774 	CU_ASSERT(rc == 0);
5775 	CU_ASSERT(count == 0);
5776 
5777 	/* snapshot3 */
5778 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5779 	count = SPDK_COUNTOF(ids);
5780 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5781 	CU_ASSERT(rc == 0);
5782 	CU_ASSERT(count == 1);
5783 	CU_ASSERT(ids[0] == cloneid);
5784 
5785 	/* clone2 */
5786 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5787 	count = SPDK_COUNTOF(ids);
5788 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5789 	CU_ASSERT(rc == 0);
5790 	CU_ASSERT(count == 0);
5791 
5792 	/* Try to delete all blobs in the worse possible order */
5793 
5794 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5795 	poll_threads();
5796 	CU_ASSERT(g_bserrno != 0);
5797 
5798 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5799 	poll_threads();
5800 	CU_ASSERT(g_bserrno == 0);
5801 
5802 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5803 	poll_threads();
5804 	CU_ASSERT(g_bserrno != 0);
5805 
5806 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5807 	poll_threads();
5808 	CU_ASSERT(g_bserrno == 0);
5809 
5810 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5811 	poll_threads();
5812 	CU_ASSERT(g_bserrno == 0);
5813 
5814 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5815 	poll_threads();
5816 	CU_ASSERT(g_bserrno == 0);
5817 
5818 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5819 	poll_threads();
5820 	CU_ASSERT(g_bserrno == 0);
5821 
5822 	spdk_bs_unload(bs, bs_op_complete, NULL);
5823 	poll_threads();
5824 	CU_ASSERT(g_bserrno == 0);
5825 
5826 	g_bs = NULL;
5827 }
5828 
5829 /**
5830  * Snapshot-clones relation test 3
5831  *
5832  *         snapshot0
5833  *            |
5834  *         snapshot1
5835  *            |
5836  *         snapshot2
5837  *            |
5838  *           blob
5839  */
5840 static void
5841 blob_relations3(void)
5842 {
5843 	struct spdk_blob_store *bs;
5844 	struct spdk_bs_dev *dev;
5845 	struct spdk_io_channel *channel;
5846 	struct spdk_bs_opts bs_opts;
5847 	struct spdk_blob_opts opts;
5848 	struct spdk_blob *blob;
5849 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5850 
5851 	dev = init_dev();
5852 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5853 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5854 
5855 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5856 	poll_threads();
5857 	CU_ASSERT(g_bserrno == 0);
5858 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5859 	bs = g_bs;
5860 
5861 	channel = spdk_bs_alloc_io_channel(bs);
5862 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5863 
5864 	/* 1. Create blob with 10 clusters */
5865 	ut_spdk_blob_opts_init(&opts);
5866 	opts.num_clusters = 10;
5867 
5868 	blob = ut_blob_create_and_open(bs, &opts);
5869 	blobid = spdk_blob_get_id(blob);
5870 
5871 	/* 2. Create snapshot0 */
5872 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5873 	poll_threads();
5874 	CU_ASSERT(g_bserrno == 0);
5875 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5876 	snapshotid0 = g_blobid;
5877 
5878 	/* 3. Create snapshot1 */
5879 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5880 	poll_threads();
5881 	CU_ASSERT(g_bserrno == 0);
5882 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5883 	snapshotid1 = g_blobid;
5884 
5885 	/* 4. Create snapshot2 */
5886 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5887 	poll_threads();
5888 	CU_ASSERT(g_bserrno == 0);
5889 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5890 	snapshotid2 = g_blobid;
5891 
5892 	/* 5. Decouple blob */
5893 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5894 	poll_threads();
5895 	CU_ASSERT(g_bserrno == 0);
5896 
5897 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5898 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5899 	poll_threads();
5900 	CU_ASSERT(g_bserrno == 0);
5901 
5902 	/* 7. Delete blob */
5903 	spdk_blob_close(blob, blob_op_complete, NULL);
5904 	poll_threads();
5905 	CU_ASSERT(g_bserrno == 0);
5906 
5907 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5908 	poll_threads();
5909 	CU_ASSERT(g_bserrno == 0);
5910 
5911 	/* 8. Delete snapshot2.
5912 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5913 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5914 	poll_threads();
5915 	CU_ASSERT(g_bserrno == 0);
5916 
5917 	/* Remove remaining blobs and unload bs */
5918 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5919 	poll_threads();
5920 	CU_ASSERT(g_bserrno == 0);
5921 
5922 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5923 	poll_threads();
5924 	CU_ASSERT(g_bserrno == 0);
5925 
5926 	spdk_bs_free_io_channel(channel);
5927 	poll_threads();
5928 
5929 	spdk_bs_unload(bs, bs_op_complete, NULL);
5930 	poll_threads();
5931 	CU_ASSERT(g_bserrno == 0);
5932 
5933 	g_bs = NULL;
5934 }
5935 
5936 static void
5937 blobstore_clean_power_failure(void)
5938 {
5939 	struct spdk_blob_store *bs;
5940 	struct spdk_blob *blob;
5941 	struct spdk_power_failure_thresholds thresholds = {};
5942 	bool clean = false;
5943 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5944 	struct spdk_bs_super_block super_copy = {};
5945 
5946 	thresholds.general_threshold = 1;
5947 	while (!clean) {
5948 		/* Create bs and blob */
5949 		suite_blob_setup();
5950 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5951 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5952 		bs = g_bs;
5953 		blob = g_blob;
5954 
5955 		/* Super block should not change for rest of the UT,
5956 		 * save it and compare later. */
5957 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5958 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5959 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5960 
5961 		/* Force bs/super block in a clean state.
5962 		 * Along with marking blob dirty, to cause blob persist. */
5963 		blob->state = SPDK_BLOB_STATE_DIRTY;
5964 		bs->clean = 1;
5965 		super->clean = 1;
5966 		super->crc = blob_md_page_calc_crc(super);
5967 
5968 		g_bserrno = -1;
5969 		dev_set_power_failure_thresholds(thresholds);
5970 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5971 		poll_threads();
5972 		dev_reset_power_failure_event();
5973 
5974 		if (g_bserrno == 0) {
5975 			/* After successful md sync, both bs and super block
5976 			 * should be marked as not clean. */
5977 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5978 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5979 			clean = true;
5980 		}
5981 
5982 		/* Depending on the point of failure, super block was either updated or not. */
5983 		super_copy.clean = super->clean;
5984 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5985 		/* Compare that the values in super block remained unchanged. */
5986 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5987 
5988 		/* Delete blob and unload bs */
5989 		suite_blob_cleanup();
5990 
5991 		thresholds.general_threshold++;
5992 	}
5993 }
5994 
5995 static void
5996 blob_delete_snapshot_power_failure(void)
5997 {
5998 	struct spdk_bs_dev *dev;
5999 	struct spdk_blob_store *bs;
6000 	struct spdk_blob_opts opts;
6001 	struct spdk_blob *blob, *snapshot;
6002 	struct spdk_power_failure_thresholds thresholds = {};
6003 	spdk_blob_id blobid, snapshotid;
6004 	const void *value;
6005 	size_t value_len;
6006 	size_t count;
6007 	spdk_blob_id ids[3] = {};
6008 	int rc;
6009 	bool deleted = false;
6010 	int delete_snapshot_bserrno = -1;
6011 
6012 	thresholds.general_threshold = 1;
6013 	while (!deleted) {
6014 		dev = init_dev();
6015 
6016 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6017 		poll_threads();
6018 		CU_ASSERT(g_bserrno == 0);
6019 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6020 		bs = g_bs;
6021 
6022 		/* Create blob */
6023 		ut_spdk_blob_opts_init(&opts);
6024 		opts.num_clusters = 10;
6025 
6026 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6027 		poll_threads();
6028 		CU_ASSERT(g_bserrno == 0);
6029 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6030 		blobid = g_blobid;
6031 
6032 		/* Create snapshot */
6033 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6034 		poll_threads();
6035 		CU_ASSERT(g_bserrno == 0);
6036 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6037 		snapshotid = g_blobid;
6038 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6039 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6040 
6041 		dev_set_power_failure_thresholds(thresholds);
6042 
6043 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6044 		poll_threads();
6045 		delete_snapshot_bserrno = g_bserrno;
6046 
6047 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6048 		 * reports success, changes to both blobs should already persisted. */
6049 		dev_reset_power_failure_event();
6050 		ut_bs_dirty_load(&bs, NULL);
6051 
6052 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6053 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6054 
6055 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6056 		poll_threads();
6057 		CU_ASSERT(g_bserrno == 0);
6058 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6059 		blob = g_blob;
6060 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6061 
6062 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6063 		poll_threads();
6064 
6065 		if (g_bserrno == 0) {
6066 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6067 			snapshot = g_blob;
6068 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6069 			count = SPDK_COUNTOF(ids);
6070 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6071 			CU_ASSERT(rc == 0);
6072 			CU_ASSERT(count == 1);
6073 			CU_ASSERT(ids[0] == blobid);
6074 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6075 			CU_ASSERT(rc != 0);
6076 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6077 
6078 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6079 			poll_threads();
6080 			CU_ASSERT(g_bserrno == 0);
6081 		} else {
6082 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6083 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6084 			 * Yet delete might perform further changes to the clone after that.
6085 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6086 			if (delete_snapshot_bserrno == 0) {
6087 				deleted = true;
6088 			}
6089 		}
6090 
6091 		spdk_blob_close(blob, blob_op_complete, NULL);
6092 		poll_threads();
6093 		CU_ASSERT(g_bserrno == 0);
6094 
6095 		spdk_bs_unload(bs, bs_op_complete, NULL);
6096 		poll_threads();
6097 		CU_ASSERT(g_bserrno == 0);
6098 
6099 		thresholds.general_threshold++;
6100 	}
6101 }
6102 
6103 static void
6104 blob_create_snapshot_power_failure(void)
6105 {
6106 	struct spdk_blob_store *bs = g_bs;
6107 	struct spdk_bs_dev *dev;
6108 	struct spdk_blob_opts opts;
6109 	struct spdk_blob *blob, *snapshot;
6110 	struct spdk_power_failure_thresholds thresholds = {};
6111 	spdk_blob_id blobid, snapshotid;
6112 	const void *value;
6113 	size_t value_len;
6114 	size_t count;
6115 	spdk_blob_id ids[3] = {};
6116 	int rc;
6117 	bool created = false;
6118 	int create_snapshot_bserrno = -1;
6119 
6120 	thresholds.general_threshold = 1;
6121 	while (!created) {
6122 		dev = init_dev();
6123 
6124 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6125 		poll_threads();
6126 		CU_ASSERT(g_bserrno == 0);
6127 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6128 		bs = g_bs;
6129 
6130 		/* Create blob */
6131 		ut_spdk_blob_opts_init(&opts);
6132 		opts.num_clusters = 10;
6133 
6134 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6135 		poll_threads();
6136 		CU_ASSERT(g_bserrno == 0);
6137 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6138 		blobid = g_blobid;
6139 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6140 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6141 
6142 		dev_set_power_failure_thresholds(thresholds);
6143 
6144 		/* Create snapshot */
6145 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6146 		poll_threads();
6147 		create_snapshot_bserrno = g_bserrno;
6148 		snapshotid = g_blobid;
6149 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6150 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6151 
6152 		/* Do not shut down cleanly. Assumption is that after create snapshot
6153 		 * reports success, both blobs should be power-fail safe. */
6154 		dev_reset_power_failure_event();
6155 		ut_bs_dirty_load(&bs, NULL);
6156 
6157 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6158 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6159 
6160 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6161 		poll_threads();
6162 		CU_ASSERT(g_bserrno == 0);
6163 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6164 		blob = g_blob;
6165 
6166 		if (snapshotid != SPDK_BLOBID_INVALID) {
6167 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6168 			poll_threads();
6169 		}
6170 
6171 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6172 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6173 			snapshot = g_blob;
6174 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6175 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6176 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6177 			count = SPDK_COUNTOF(ids);
6178 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6179 			CU_ASSERT(rc == 0);
6180 			CU_ASSERT(count == 1);
6181 			CU_ASSERT(ids[0] == blobid);
6182 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6183 			CU_ASSERT(rc != 0);
6184 
6185 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6186 			poll_threads();
6187 			CU_ASSERT(g_bserrno == 0);
6188 			if (create_snapshot_bserrno == 0) {
6189 				created = true;
6190 			}
6191 		} else {
6192 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6193 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6194 		}
6195 
6196 		spdk_blob_close(blob, blob_op_complete, NULL);
6197 		poll_threads();
6198 		CU_ASSERT(g_bserrno == 0);
6199 
6200 		spdk_bs_unload(bs, bs_op_complete, NULL);
6201 		poll_threads();
6202 		CU_ASSERT(g_bserrno == 0);
6203 
6204 		thresholds.general_threshold++;
6205 	}
6206 }
6207 
6208 static void
6209 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6210 {
6211 	uint8_t payload_ff[64 * 512];
6212 	uint8_t payload_aa[64 * 512];
6213 	uint8_t payload_00[64 * 512];
6214 	uint8_t *cluster0, *cluster1;
6215 
6216 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6217 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6218 	memset(payload_00, 0x00, sizeof(payload_00));
6219 
6220 	/* Try to perform I/O with io unit = 512 */
6221 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6222 	poll_threads();
6223 	CU_ASSERT(g_bserrno == 0);
6224 
6225 	/* If thin provisioned is set cluster should be allocated now */
6226 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6227 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6228 
6229 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6230 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6231 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6232 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6233 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6234 
6235 	/* Verify write with offset on first page */
6236 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6237 	poll_threads();
6238 	CU_ASSERT(g_bserrno == 0);
6239 
6240 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6241 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6242 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6243 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6244 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6245 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6246 
6247 	/* Verify write with offset on first page */
6248 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6249 	poll_threads();
6250 
6251 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6252 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6253 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6254 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6255 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6256 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6257 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6258 
6259 	/* Verify write with offset on second page */
6260 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6261 	poll_threads();
6262 
6263 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6264 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6265 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6266 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6267 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6268 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6269 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6270 
6271 	/* Verify write across multiple pages */
6272 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6273 	poll_threads();
6274 
6275 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6276 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6277 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6278 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6279 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6280 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6281 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6282 
6283 	/* Verify write across multiple clusters */
6284 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6285 	poll_threads();
6286 
6287 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6288 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6289 
6290 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6291 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6292 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6293 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6294 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6295 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6296 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6297 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6298 
6299 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6300 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6301 
6302 	/* Verify write to second cluster */
6303 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6304 	poll_threads();
6305 
6306 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6307 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6308 
6309 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6310 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6311 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6312 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6313 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6314 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6315 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6316 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6317 
6318 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6319 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6320 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6321 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6322 }
6323 
6324 static void
6325 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6326 {
6327 	uint8_t payload_read[64 * 512];
6328 	uint8_t payload_ff[64 * 512];
6329 	uint8_t payload_aa[64 * 512];
6330 	uint8_t payload_00[64 * 512];
6331 
6332 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6333 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6334 	memset(payload_00, 0x00, sizeof(payload_00));
6335 
6336 	/* Read only first io unit */
6337 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6338 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6339 	 * payload_read: F000 0000 | 0000 0000 ... */
6340 	memset(payload_read, 0x00, sizeof(payload_read));
6341 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6342 	poll_threads();
6343 	CU_ASSERT(g_bserrno == 0);
6344 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6345 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6346 
6347 	/* Read four io_units starting from offset = 2
6348 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6349 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6350 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6351 
6352 	memset(payload_read, 0x00, sizeof(payload_read));
6353 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6354 	poll_threads();
6355 	CU_ASSERT(g_bserrno == 0);
6356 
6357 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6358 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6359 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6360 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6361 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6362 
6363 	/* Read eight io_units across multiple pages
6364 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6365 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6366 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6367 	memset(payload_read, 0x00, sizeof(payload_read));
6368 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6369 	poll_threads();
6370 	CU_ASSERT(g_bserrno == 0);
6371 
6372 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6373 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6374 
6375 	/* Read eight io_units across multiple clusters
6376 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6377 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6378 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6379 	memset(payload_read, 0x00, sizeof(payload_read));
6380 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6381 	poll_threads();
6382 	CU_ASSERT(g_bserrno == 0);
6383 
6384 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6385 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6386 
6387 	/* Read four io_units from second cluster
6388 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6389 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6390 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6391 	memset(payload_read, 0x00, sizeof(payload_read));
6392 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6393 	poll_threads();
6394 	CU_ASSERT(g_bserrno == 0);
6395 
6396 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6397 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6398 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6399 
6400 	/* Read second cluster
6401 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6402 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6403 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6404 	memset(payload_read, 0x00, sizeof(payload_read));
6405 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6406 	poll_threads();
6407 	CU_ASSERT(g_bserrno == 0);
6408 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6409 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6410 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6411 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6412 
6413 	/* Read whole two clusters
6414 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6415 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6416 	memset(payload_read, 0x00, sizeof(payload_read));
6417 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6418 	poll_threads();
6419 	CU_ASSERT(g_bserrno == 0);
6420 
6421 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6422 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6423 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6424 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6425 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6426 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6427 
6428 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6429 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6430 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6431 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6432 }
6433 
6434 
6435 static void
6436 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6437 {
6438 	uint8_t payload_ff[64 * 512];
6439 	uint8_t payload_aa[64 * 512];
6440 	uint8_t payload_00[64 * 512];
6441 	uint8_t *cluster0, *cluster1;
6442 
6443 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6444 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6445 	memset(payload_00, 0x00, sizeof(payload_00));
6446 
6447 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6448 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6449 
6450 	/* Unmap */
6451 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6452 	poll_threads();
6453 
6454 	CU_ASSERT(g_bserrno == 0);
6455 
6456 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6457 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6458 }
6459 
6460 static void
6461 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6462 {
6463 	uint8_t payload_ff[64 * 512];
6464 	uint8_t payload_aa[64 * 512];
6465 	uint8_t payload_00[64 * 512];
6466 	uint8_t *cluster0, *cluster1;
6467 
6468 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6469 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6470 	memset(payload_00, 0x00, sizeof(payload_00));
6471 
6472 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6473 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6474 
6475 	/* Write zeroes  */
6476 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6477 	poll_threads();
6478 
6479 	CU_ASSERT(g_bserrno == 0);
6480 
6481 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6482 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6483 }
6484 
6485 static inline void
6486 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6487 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6488 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6489 {
6490 	if (io_opts) {
6491 		g_dev_writev_ext_called = false;
6492 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6493 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6494 					io_opts);
6495 	} else {
6496 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6497 	}
6498 	poll_threads();
6499 	CU_ASSERT(g_bserrno == 0);
6500 	if (io_opts) {
6501 		CU_ASSERT(g_dev_writev_ext_called);
6502 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6503 	}
6504 }
6505 
6506 static void
6507 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6508 	       bool ext_api)
6509 {
6510 	uint8_t payload_ff[64 * 512];
6511 	uint8_t payload_aa[64 * 512];
6512 	uint8_t payload_00[64 * 512];
6513 	uint8_t *cluster0, *cluster1;
6514 	struct iovec iov[4];
6515 	struct spdk_blob_ext_io_opts ext_opts = {
6516 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6517 		.memory_domain_ctx = (void *)0xf00df00d,
6518 		.size = sizeof(struct spdk_blob_ext_io_opts),
6519 		.user_ctx = (void *)123,
6520 	};
6521 
6522 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6523 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6524 	memset(payload_00, 0x00, sizeof(payload_00));
6525 
6526 	/* Try to perform I/O with io unit = 512 */
6527 	iov[0].iov_base = payload_ff;
6528 	iov[0].iov_len = 1 * 512;
6529 
6530 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6531 			    ext_api ? &ext_opts : NULL);
6532 
6533 	/* If thin provisioned is set cluster should be allocated now */
6534 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6535 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6536 
6537 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6538 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6539 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6540 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6541 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6542 
6543 	/* Verify write with offset on first page */
6544 	iov[0].iov_base = payload_ff;
6545 	iov[0].iov_len = 1 * 512;
6546 
6547 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6548 			    ext_api ? &ext_opts : NULL);
6549 
6550 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6551 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6552 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6553 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6554 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6555 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6556 
6557 	/* Verify write with offset on first page */
6558 	iov[0].iov_base = payload_ff;
6559 	iov[0].iov_len = 4 * 512;
6560 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6561 	poll_threads();
6562 
6563 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6564 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6565 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6566 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6567 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6568 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6569 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6570 
6571 	/* Verify write with offset on second page */
6572 	iov[0].iov_base = payload_ff;
6573 	iov[0].iov_len = 4 * 512;
6574 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6575 	poll_threads();
6576 
6577 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6578 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6579 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6580 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6581 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6582 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6583 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6584 
6585 	/* Verify write across multiple pages */
6586 	iov[0].iov_base = payload_aa;
6587 	iov[0].iov_len = 8 * 512;
6588 
6589 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6590 			    ext_api ? &ext_opts : NULL);
6591 
6592 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6593 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6594 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6595 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6596 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6597 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6598 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6599 
6600 	/* Verify write across multiple clusters */
6601 
6602 	iov[0].iov_base = payload_ff;
6603 	iov[0].iov_len = 8 * 512;
6604 
6605 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6606 			    ext_api ? &ext_opts : NULL);
6607 
6608 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6609 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6610 
6611 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6612 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6613 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6614 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6615 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6616 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6617 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6618 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6619 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6620 
6621 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6622 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6623 
6624 	/* Verify write to second cluster */
6625 
6626 	iov[0].iov_base = payload_ff;
6627 	iov[0].iov_len = 2 * 512;
6628 
6629 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
6630 			    ext_api ? &ext_opts : NULL);
6631 
6632 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6633 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6634 
6635 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6636 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6637 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6638 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6639 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6640 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6641 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6642 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6643 
6644 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6645 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6646 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6647 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6648 }
6649 
6650 static inline void
6651 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6652 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6653 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6654 {
6655 	if (io_opts) {
6656 		g_dev_readv_ext_called = false;
6657 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6658 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
6659 	} else {
6660 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6661 	}
6662 	poll_threads();
6663 	CU_ASSERT(g_bserrno == 0);
6664 	if (io_opts) {
6665 		CU_ASSERT(g_dev_readv_ext_called);
6666 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6667 	}
6668 }
6669 
6670 static void
6671 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6672 	      bool ext_api)
6673 {
6674 	uint8_t payload_read[64 * 512];
6675 	uint8_t payload_ff[64 * 512];
6676 	uint8_t payload_aa[64 * 512];
6677 	uint8_t payload_00[64 * 512];
6678 	struct iovec iov[4];
6679 	struct spdk_blob_ext_io_opts ext_opts = {
6680 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6681 		.memory_domain_ctx = (void *)0xf00df00d,
6682 		.size = sizeof(struct spdk_blob_ext_io_opts),
6683 		.user_ctx = (void *)123,
6684 	};
6685 
6686 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6687 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6688 	memset(payload_00, 0x00, sizeof(payload_00));
6689 
6690 	/* Read only first io unit */
6691 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6692 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6693 	 * payload_read: F000 0000 | 0000 0000 ... */
6694 	memset(payload_read, 0x00, sizeof(payload_read));
6695 	iov[0].iov_base = payload_read;
6696 	iov[0].iov_len = 1 * 512;
6697 
6698 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6699 
6700 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6701 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6702 
6703 	/* Read four io_units starting from offset = 2
6704 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6705 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6706 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6707 
6708 	memset(payload_read, 0x00, sizeof(payload_read));
6709 	iov[0].iov_base = payload_read;
6710 	iov[0].iov_len = 4 * 512;
6711 
6712 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6713 
6714 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6715 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6716 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6717 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6718 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6719 
6720 	/* Read eight io_units across multiple pages
6721 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6722 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6723 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6724 	memset(payload_read, 0x00, sizeof(payload_read));
6725 	iov[0].iov_base = payload_read;
6726 	iov[0].iov_len = 4 * 512;
6727 	iov[1].iov_base = payload_read + 4 * 512;
6728 	iov[1].iov_len = 4 * 512;
6729 
6730 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6731 
6732 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6733 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6734 
6735 	/* Read eight io_units across multiple clusters
6736 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6737 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6738 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6739 	memset(payload_read, 0x00, sizeof(payload_read));
6740 	iov[0].iov_base = payload_read;
6741 	iov[0].iov_len = 2 * 512;
6742 	iov[1].iov_base = payload_read + 2 * 512;
6743 	iov[1].iov_len = 2 * 512;
6744 	iov[2].iov_base = payload_read + 4 * 512;
6745 	iov[2].iov_len = 2 * 512;
6746 	iov[3].iov_base = payload_read + 6 * 512;
6747 	iov[3].iov_len = 2 * 512;
6748 
6749 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
6750 			   ext_api ? &ext_opts : NULL);
6751 
6752 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6753 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6754 
6755 	/* Read four io_units from second cluster
6756 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6757 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6758 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6759 	memset(payload_read, 0x00, sizeof(payload_read));
6760 	iov[0].iov_base = payload_read;
6761 	iov[0].iov_len = 1 * 512;
6762 	iov[1].iov_base = payload_read + 1 * 512;
6763 	iov[1].iov_len = 3 * 512;
6764 
6765 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
6766 			   ext_api ? &ext_opts : NULL);
6767 
6768 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6769 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6770 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6771 
6772 	/* Read second cluster
6773 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6774 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6775 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6776 	memset(payload_read, 0x00, sizeof(payload_read));
6777 	iov[0].iov_base = payload_read;
6778 	iov[0].iov_len = 1 * 512;
6779 	iov[1].iov_base = payload_read + 1 * 512;
6780 	iov[1].iov_len = 2 * 512;
6781 	iov[2].iov_base = payload_read + 3 * 512;
6782 	iov[2].iov_len = 4 * 512;
6783 	iov[3].iov_base = payload_read + 7 * 512;
6784 	iov[3].iov_len = 25 * 512;
6785 
6786 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
6787 			   ext_api ? &ext_opts : NULL);
6788 
6789 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6790 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6791 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6792 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6793 
6794 	/* Read whole two clusters
6795 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6796 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6797 	memset(payload_read, 0x00, sizeof(payload_read));
6798 	iov[0].iov_base = payload_read;
6799 	iov[0].iov_len = 1 * 512;
6800 	iov[1].iov_base = payload_read + 1 * 512;
6801 	iov[1].iov_len = 8 * 512;
6802 	iov[2].iov_base = payload_read + 9 * 512;
6803 	iov[2].iov_len = 16 * 512;
6804 	iov[3].iov_base = payload_read + 25 * 512;
6805 	iov[3].iov_len = 39 * 512;
6806 
6807 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
6808 			   ext_api ? &ext_opts : NULL);
6809 
6810 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6811 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6812 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6813 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6814 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6815 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6816 
6817 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6818 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6819 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6820 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6821 }
6822 
6823 static void
6824 blob_io_unit(void)
6825 {
6826 	struct spdk_bs_opts bsopts;
6827 	struct spdk_blob_opts opts;
6828 	struct spdk_blob_store *bs;
6829 	struct spdk_bs_dev *dev;
6830 	struct spdk_blob *blob, *snapshot, *clone;
6831 	spdk_blob_id blobid;
6832 	struct spdk_io_channel *channel;
6833 
6834 	/* Create dev with 512 bytes io unit size */
6835 
6836 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6837 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6838 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6839 
6840 	/* Try to initialize a new blob store with unsupported io_unit */
6841 	dev = init_dev();
6842 	dev->blocklen = 512;
6843 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6844 
6845 	/* Initialize a new blob store */
6846 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6847 	poll_threads();
6848 	CU_ASSERT(g_bserrno == 0);
6849 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6850 	bs = g_bs;
6851 
6852 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6853 	channel = spdk_bs_alloc_io_channel(bs);
6854 
6855 	/* Create thick provisioned blob */
6856 	ut_spdk_blob_opts_init(&opts);
6857 	opts.thin_provision = false;
6858 	opts.num_clusters = 32;
6859 
6860 	blob = ut_blob_create_and_open(bs, &opts);
6861 	blobid = spdk_blob_get_id(blob);
6862 
6863 	test_io_write(dev, blob, channel);
6864 	test_io_read(dev, blob, channel);
6865 	test_io_zeroes(dev, blob, channel);
6866 
6867 	test_iov_write(dev, blob, channel, false);
6868 	test_iov_read(dev, blob, channel, false);
6869 	test_io_zeroes(dev, blob, channel);
6870 
6871 	test_iov_write(dev, blob, channel, true);
6872 	test_iov_read(dev, blob, channel, true);
6873 
6874 	test_io_unmap(dev, blob, channel);
6875 
6876 	spdk_blob_close(blob, blob_op_complete, NULL);
6877 	poll_threads();
6878 	CU_ASSERT(g_bserrno == 0);
6879 	blob = NULL;
6880 	g_blob = NULL;
6881 
6882 	/* Create thin provisioned blob */
6883 
6884 	ut_spdk_blob_opts_init(&opts);
6885 	opts.thin_provision = true;
6886 	opts.num_clusters = 32;
6887 
6888 	blob = ut_blob_create_and_open(bs, &opts);
6889 	blobid = spdk_blob_get_id(blob);
6890 
6891 	test_io_write(dev, blob, channel);
6892 	test_io_read(dev, blob, channel);
6893 	test_io_zeroes(dev, blob, channel);
6894 
6895 	test_iov_write(dev, blob, channel, false);
6896 	test_iov_read(dev, blob, channel, false);
6897 	test_io_zeroes(dev, blob, channel);
6898 
6899 	test_iov_write(dev, blob, channel, true);
6900 	test_iov_read(dev, blob, channel, true);
6901 
6902 	/* Create snapshot */
6903 
6904 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6905 	poll_threads();
6906 	CU_ASSERT(g_bserrno == 0);
6907 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6908 	blobid = g_blobid;
6909 
6910 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6911 	poll_threads();
6912 	CU_ASSERT(g_bserrno == 0);
6913 	CU_ASSERT(g_blob != NULL);
6914 	snapshot = g_blob;
6915 
6916 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6917 	poll_threads();
6918 	CU_ASSERT(g_bserrno == 0);
6919 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6920 	blobid = g_blobid;
6921 
6922 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6923 	poll_threads();
6924 	CU_ASSERT(g_bserrno == 0);
6925 	CU_ASSERT(g_blob != NULL);
6926 	clone = g_blob;
6927 
6928 	test_io_read(dev, blob, channel);
6929 	test_io_read(dev, snapshot, channel);
6930 	test_io_read(dev, clone, channel);
6931 
6932 	test_iov_read(dev, blob, channel, false);
6933 	test_iov_read(dev, snapshot, channel, false);
6934 	test_iov_read(dev, clone, channel, false);
6935 
6936 	test_iov_read(dev, blob, channel, true);
6937 	test_iov_read(dev, snapshot, channel, true);
6938 	test_iov_read(dev, clone, channel, true);
6939 
6940 	/* Inflate clone */
6941 
6942 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6943 	poll_threads();
6944 
6945 	CU_ASSERT(g_bserrno == 0);
6946 
6947 	test_io_read(dev, clone, channel);
6948 
6949 	test_io_unmap(dev, clone, channel);
6950 
6951 	test_iov_write(dev, clone, channel, false);
6952 	test_iov_read(dev, clone, channel, false);
6953 	test_io_unmap(dev, clone, channel);
6954 
6955 	test_iov_write(dev, clone, channel, true);
6956 	test_iov_read(dev, clone, channel, true);
6957 
6958 	spdk_blob_close(blob, blob_op_complete, NULL);
6959 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6960 	spdk_blob_close(clone, blob_op_complete, NULL);
6961 	poll_threads();
6962 	CU_ASSERT(g_bserrno == 0);
6963 	blob = NULL;
6964 	g_blob = NULL;
6965 
6966 	spdk_bs_free_io_channel(channel);
6967 	poll_threads();
6968 
6969 	/* Unload the blob store */
6970 	spdk_bs_unload(bs, bs_op_complete, NULL);
6971 	poll_threads();
6972 	CU_ASSERT(g_bserrno == 0);
6973 	g_bs = NULL;
6974 	g_blob = NULL;
6975 	g_blobid = 0;
6976 }
6977 
6978 static void
6979 blob_io_unit_compatibility(void)
6980 {
6981 	struct spdk_bs_opts bsopts;
6982 	struct spdk_blob_store *bs;
6983 	struct spdk_bs_dev *dev;
6984 	struct spdk_bs_super_block *super;
6985 
6986 	/* Create dev with 512 bytes io unit size */
6987 
6988 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6989 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6990 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6991 
6992 	/* Try to initialize a new blob store with unsupported io_unit */
6993 	dev = init_dev();
6994 	dev->blocklen = 512;
6995 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6996 
6997 	/* Initialize a new blob store */
6998 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6999 	poll_threads();
7000 	CU_ASSERT(g_bserrno == 0);
7001 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7002 	bs = g_bs;
7003 
7004 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7005 
7006 	/* Unload the blob store */
7007 	spdk_bs_unload(bs, bs_op_complete, NULL);
7008 	poll_threads();
7009 	CU_ASSERT(g_bserrno == 0);
7010 
7011 	/* Modify super block to behave like older version.
7012 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
7013 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
7014 	super->io_unit_size = 0;
7015 	super->crc = blob_md_page_calc_crc(super);
7016 
7017 	dev = init_dev();
7018 	dev->blocklen = 512;
7019 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7020 
7021 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
7022 	poll_threads();
7023 	CU_ASSERT(g_bserrno == 0);
7024 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7025 	bs = g_bs;
7026 
7027 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
7028 
7029 	/* Unload the blob store */
7030 	spdk_bs_unload(bs, bs_op_complete, NULL);
7031 	poll_threads();
7032 	CU_ASSERT(g_bserrno == 0);
7033 
7034 	g_bs = NULL;
7035 	g_blob = NULL;
7036 	g_blobid = 0;
7037 }
7038 
7039 static void
7040 first_sync_complete(void *cb_arg, int bserrno)
7041 {
7042 	struct spdk_blob *blob = cb_arg;
7043 	int rc;
7044 
7045 	CU_ASSERT(bserrno == 0);
7046 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7047 	CU_ASSERT(rc == 0);
7048 	CU_ASSERT(g_bserrno == -1);
7049 
7050 	/* Keep g_bserrno at -1, only the
7051 	 * second sync completion should set it at 0. */
7052 }
7053 
7054 static void
7055 second_sync_complete(void *cb_arg, int bserrno)
7056 {
7057 	struct spdk_blob *blob = cb_arg;
7058 	const void *value;
7059 	size_t value_len;
7060 	int rc;
7061 
7062 	CU_ASSERT(bserrno == 0);
7063 
7064 	/* Verify that the first sync completion had a chance to execute */
7065 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7066 	CU_ASSERT(rc == 0);
7067 	SPDK_CU_ASSERT_FATAL(value != NULL);
7068 	CU_ASSERT(value_len == strlen("second") + 1);
7069 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7070 
7071 	CU_ASSERT(g_bserrno == -1);
7072 	g_bserrno = bserrno;
7073 }
7074 
7075 static void
7076 blob_simultaneous_operations(void)
7077 {
7078 	struct spdk_blob_store *bs = g_bs;
7079 	struct spdk_blob_opts opts;
7080 	struct spdk_blob *blob, *snapshot;
7081 	spdk_blob_id blobid, snapshotid;
7082 	struct spdk_io_channel *channel;
7083 	int rc;
7084 
7085 	channel = spdk_bs_alloc_io_channel(bs);
7086 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7087 
7088 	ut_spdk_blob_opts_init(&opts);
7089 	opts.num_clusters = 10;
7090 
7091 	blob = ut_blob_create_and_open(bs, &opts);
7092 	blobid = spdk_blob_get_id(blob);
7093 
7094 	/* Create snapshot and try to remove blob in the same time:
7095 	 * - snapshot should be created successfully
7096 	 * - delete operation should fail w -EBUSY */
7097 	CU_ASSERT(blob->locked_operation_in_progress == false);
7098 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7099 	CU_ASSERT(blob->locked_operation_in_progress == true);
7100 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7101 	CU_ASSERT(blob->locked_operation_in_progress == true);
7102 	/* Deletion failure */
7103 	CU_ASSERT(g_bserrno == -EBUSY);
7104 	poll_threads();
7105 	CU_ASSERT(blob->locked_operation_in_progress == false);
7106 	/* Snapshot creation success */
7107 	CU_ASSERT(g_bserrno == 0);
7108 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7109 
7110 	snapshotid = g_blobid;
7111 
7112 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7113 	poll_threads();
7114 	CU_ASSERT(g_bserrno == 0);
7115 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7116 	snapshot = g_blob;
7117 
7118 	/* Inflate blob and try to remove blob in the same time:
7119 	 * - blob should be inflated successfully
7120 	 * - delete operation should fail w -EBUSY */
7121 	CU_ASSERT(blob->locked_operation_in_progress == false);
7122 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7123 	CU_ASSERT(blob->locked_operation_in_progress == true);
7124 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7125 	CU_ASSERT(blob->locked_operation_in_progress == true);
7126 	/* Deletion failure */
7127 	CU_ASSERT(g_bserrno == -EBUSY);
7128 	poll_threads();
7129 	CU_ASSERT(blob->locked_operation_in_progress == false);
7130 	/* Inflation success */
7131 	CU_ASSERT(g_bserrno == 0);
7132 
7133 	/* Clone snapshot and try to remove snapshot in the same time:
7134 	 * - snapshot should be cloned successfully
7135 	 * - delete operation should fail w -EBUSY */
7136 	CU_ASSERT(blob->locked_operation_in_progress == false);
7137 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7138 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7139 	/* Deletion failure */
7140 	CU_ASSERT(g_bserrno == -EBUSY);
7141 	poll_threads();
7142 	CU_ASSERT(blob->locked_operation_in_progress == false);
7143 	/* Clone created */
7144 	CU_ASSERT(g_bserrno == 0);
7145 
7146 	/* Resize blob and try to remove blob in the same time:
7147 	 * - blob should be resized successfully
7148 	 * - delete operation should fail w -EBUSY */
7149 	CU_ASSERT(blob->locked_operation_in_progress == false);
7150 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7151 	CU_ASSERT(blob->locked_operation_in_progress == true);
7152 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7153 	CU_ASSERT(blob->locked_operation_in_progress == true);
7154 	/* Deletion failure */
7155 	CU_ASSERT(g_bserrno == -EBUSY);
7156 	poll_threads();
7157 	CU_ASSERT(blob->locked_operation_in_progress == false);
7158 	/* Blob resized successfully */
7159 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7160 	poll_threads();
7161 	CU_ASSERT(g_bserrno == 0);
7162 
7163 	/* Issue two consecutive blob syncs, neither should fail.
7164 	 * Force sync to actually occur by marking blob dirty each time.
7165 	 * Execution of sync should not be enough to complete the operation,
7166 	 * since disk I/O is required to complete it. */
7167 	g_bserrno = -1;
7168 
7169 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7170 	CU_ASSERT(rc == 0);
7171 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7172 	CU_ASSERT(g_bserrno == -1);
7173 
7174 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7175 	CU_ASSERT(g_bserrno == -1);
7176 
7177 	poll_threads();
7178 	CU_ASSERT(g_bserrno == 0);
7179 
7180 	spdk_bs_free_io_channel(channel);
7181 	poll_threads();
7182 
7183 	ut_blob_close_and_delete(bs, snapshot);
7184 	ut_blob_close_and_delete(bs, blob);
7185 }
7186 
7187 static void
7188 blob_persist_test(void)
7189 {
7190 	struct spdk_blob_store *bs = g_bs;
7191 	struct spdk_blob_opts opts;
7192 	struct spdk_blob *blob;
7193 	spdk_blob_id blobid;
7194 	struct spdk_io_channel *channel;
7195 	char *xattr;
7196 	size_t xattr_length;
7197 	int rc;
7198 	uint32_t page_count_clear, page_count_xattr;
7199 	uint64_t poller_iterations;
7200 	bool run_poller;
7201 
7202 	channel = spdk_bs_alloc_io_channel(bs);
7203 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7204 
7205 	ut_spdk_blob_opts_init(&opts);
7206 	opts.num_clusters = 10;
7207 
7208 	blob = ut_blob_create_and_open(bs, &opts);
7209 	blobid = spdk_blob_get_id(blob);
7210 
7211 	/* Save the amount of md pages used after creation of a blob.
7212 	 * This should be consistent after removing xattr. */
7213 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7214 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7215 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7216 
7217 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7218 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7219 		       strlen("large_xattr");
7220 	xattr = calloc(xattr_length, sizeof(char));
7221 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7222 
7223 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7224 	SPDK_CU_ASSERT_FATAL(rc == 0);
7225 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7226 	poll_threads();
7227 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7228 
7229 	/* Save the amount of md pages used after adding the large xattr */
7230 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7231 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7232 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7233 
7234 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7235 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7236 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7237 	poller_iterations = 1;
7238 	run_poller = true;
7239 	while (run_poller) {
7240 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7241 		SPDK_CU_ASSERT_FATAL(rc == 0);
7242 		g_bserrno = -1;
7243 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7244 		poll_thread_times(0, poller_iterations);
7245 		if (g_bserrno == 0) {
7246 			/* Poller iteration count was high enough for first sync to complete.
7247 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7248 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7249 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7250 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7251 			run_poller = false;
7252 		}
7253 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7254 		SPDK_CU_ASSERT_FATAL(rc == 0);
7255 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7256 		poll_threads();
7257 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7258 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7259 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7260 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7261 
7262 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7263 		spdk_blob_close(blob, blob_op_complete, NULL);
7264 		poll_threads();
7265 		CU_ASSERT(g_bserrno == 0);
7266 
7267 		ut_bs_reload(&bs, NULL);
7268 
7269 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7270 		poll_threads();
7271 		CU_ASSERT(g_bserrno == 0);
7272 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7273 		blob = g_blob;
7274 
7275 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7276 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7277 
7278 		poller_iterations++;
7279 		/* Stop at high iteration count to prevent infinite loop.
7280 		 * This value should be enough for first md sync to complete in any case. */
7281 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7282 	}
7283 
7284 	free(xattr);
7285 
7286 	ut_blob_close_and_delete(bs, blob);
7287 
7288 	spdk_bs_free_io_channel(channel);
7289 	poll_threads();
7290 }
7291 
7292 static void
7293 blob_decouple_snapshot(void)
7294 {
7295 	struct spdk_blob_store *bs = g_bs;
7296 	struct spdk_blob_opts opts;
7297 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7298 	struct spdk_io_channel *channel;
7299 	spdk_blob_id blobid, snapshotid;
7300 	uint64_t cluster;
7301 
7302 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7303 		channel = spdk_bs_alloc_io_channel(bs);
7304 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7305 
7306 		ut_spdk_blob_opts_init(&opts);
7307 		opts.num_clusters = 10;
7308 		opts.thin_provision = false;
7309 
7310 		blob = ut_blob_create_and_open(bs, &opts);
7311 		blobid = spdk_blob_get_id(blob);
7312 
7313 		/* Create first snapshot */
7314 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7315 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7316 		poll_threads();
7317 		CU_ASSERT(g_bserrno == 0);
7318 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7319 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7320 		snapshotid = g_blobid;
7321 
7322 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7323 		poll_threads();
7324 		CU_ASSERT(g_bserrno == 0);
7325 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7326 		snapshot1 = g_blob;
7327 
7328 		/* Create the second one */
7329 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7330 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7331 		poll_threads();
7332 		CU_ASSERT(g_bserrno == 0);
7333 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7334 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7335 		snapshotid = g_blobid;
7336 
7337 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7338 		poll_threads();
7339 		CU_ASSERT(g_bserrno == 0);
7340 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7341 		snapshot2 = g_blob;
7342 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7343 
7344 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7345 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7346 		poll_threads();
7347 		CU_ASSERT(g_bserrno == 0);
7348 
7349 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7350 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7351 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7352 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7353 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7354 					    snapshot1->active.clusters[cluster]);
7355 		}
7356 
7357 		spdk_bs_free_io_channel(channel);
7358 
7359 		if (delete_snapshot_first) {
7360 			ut_blob_close_and_delete(bs, snapshot2);
7361 			ut_blob_close_and_delete(bs, snapshot1);
7362 			ut_blob_close_and_delete(bs, blob);
7363 		} else {
7364 			ut_blob_close_and_delete(bs, blob);
7365 			ut_blob_close_and_delete(bs, snapshot2);
7366 			ut_blob_close_and_delete(bs, snapshot1);
7367 		}
7368 		poll_threads();
7369 	}
7370 }
7371 
7372 static void
7373 blob_seek_io_unit(void)
7374 {
7375 	struct spdk_blob_store *bs = g_bs;
7376 	struct spdk_blob *blob;
7377 	struct spdk_io_channel *channel;
7378 	struct spdk_blob_opts opts;
7379 	uint64_t free_clusters;
7380 	uint8_t payload[10 * 4096];
7381 	uint64_t offset;
7382 	uint64_t io_unit, io_units_per_cluster;
7383 
7384 	free_clusters = spdk_bs_free_cluster_count(bs);
7385 
7386 	channel = spdk_bs_alloc_io_channel(bs);
7387 	CU_ASSERT(channel != NULL);
7388 
7389 	/* Set blob as thin provisioned */
7390 	ut_spdk_blob_opts_init(&opts);
7391 	opts.thin_provision = true;
7392 
7393 	/* Create a blob */
7394 	blob = ut_blob_create_and_open(bs, &opts);
7395 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7396 
7397 	io_units_per_cluster = bs_io_units_per_cluster(blob);
7398 
7399 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
7400 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
7401 	poll_threads();
7402 	CU_ASSERT(g_bserrno == 0);
7403 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7404 	CU_ASSERT(blob->active.num_clusters == 5);
7405 
7406 	/* Write at the beginning of first cluster */
7407 	offset = 0;
7408 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7409 	poll_threads();
7410 	CU_ASSERT(g_bserrno == 0);
7411 
7412 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
7413 	CU_ASSERT(io_unit == offset);
7414 
7415 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
7416 	CU_ASSERT(io_unit == io_units_per_cluster);
7417 
7418 	/* Write in the middle of third cluster */
7419 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
7420 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7421 	poll_threads();
7422 	CU_ASSERT(g_bserrno == 0);
7423 
7424 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
7425 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
7426 
7427 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
7428 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
7429 
7430 	/* Write at the end of last cluster */
7431 	offset = 5 * io_units_per_cluster - 1;
7432 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7433 	poll_threads();
7434 	CU_ASSERT(g_bserrno == 0);
7435 
7436 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
7437 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
7438 
7439 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
7440 	CU_ASSERT(io_unit == UINT64_MAX);
7441 
7442 	spdk_bs_free_io_channel(channel);
7443 	poll_threads();
7444 
7445 	ut_blob_close_and_delete(bs, blob);
7446 }
7447 
7448 static void
7449 blob_esnap_create(void)
7450 {
7451 	struct spdk_blob_store	*bs = g_bs;
7452 	struct spdk_bs_opts	bs_opts;
7453 	struct ut_esnap_opts	esnap_opts;
7454 	struct spdk_blob_opts	opts;
7455 	struct spdk_blob_open_opts open_opts;
7456 	struct spdk_blob	*blob;
7457 	uint32_t		cluster_sz, block_sz;
7458 	const uint32_t		esnap_num_clusters = 4;
7459 	uint64_t		esnap_num_blocks;
7460 	uint32_t		sz;
7461 	spdk_blob_id		blobid;
7462 	uint32_t		bs_ctx_count, blob_ctx_count;
7463 
7464 	cluster_sz = spdk_bs_get_cluster_size(bs);
7465 	block_sz = spdk_bs_get_io_unit_size(bs);
7466 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
7467 
7468 	/* Create a normal blob and verify it is not an esnap clone. */
7469 	ut_spdk_blob_opts_init(&opts);
7470 	blob = ut_blob_create_and_open(bs, &opts);
7471 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
7472 	ut_blob_close_and_delete(bs, blob);
7473 
7474 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
7475 	ut_spdk_blob_opts_init(&opts);
7476 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7477 	opts.esnap_id = &esnap_opts;
7478 	opts.esnap_id_len = sizeof(esnap_opts);
7479 	opts.num_clusters = esnap_num_clusters;
7480 	blob = ut_blob_create_and_open(bs, &opts);
7481 	SPDK_CU_ASSERT_FATAL(blob != NULL);
7482 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7483 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
7484 	sz = spdk_blob_get_num_clusters(blob);
7485 	CU_ASSERT(sz == esnap_num_clusters);
7486 	ut_blob_close_and_delete(bs, blob);
7487 
7488 	/* Create an esnap clone without the size and verify it can be grown */
7489 	ut_spdk_blob_opts_init(&opts);
7490 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7491 	opts.esnap_id = &esnap_opts;
7492 	opts.esnap_id_len = sizeof(esnap_opts);
7493 	blob = ut_blob_create_and_open(bs, &opts);
7494 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7495 	sz = spdk_blob_get_num_clusters(blob);
7496 	CU_ASSERT(sz == 0);
7497 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
7498 	poll_threads();
7499 	CU_ASSERT(g_bserrno == 0);
7500 	sz = spdk_blob_get_num_clusters(blob);
7501 	CU_ASSERT(sz == 1);
7502 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
7503 	poll_threads();
7504 	CU_ASSERT(g_bserrno == 0);
7505 	sz = spdk_blob_get_num_clusters(blob);
7506 	CU_ASSERT(sz == esnap_num_clusters);
7507 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
7508 	poll_threads();
7509 	CU_ASSERT(g_bserrno == 0);
7510 	sz = spdk_blob_get_num_clusters(blob);
7511 	CU_ASSERT(sz == esnap_num_clusters + 1);
7512 
7513 	/* Reload the blobstore and be sure that the blob can be opened. */
7514 	blobid = spdk_blob_get_id(blob);
7515 	spdk_blob_close(blob, blob_op_complete, NULL);
7516 	poll_threads();
7517 	CU_ASSERT(g_bserrno == 0);
7518 	g_blob = NULL;
7519 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7520 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
7521 	ut_bs_reload(&bs, &bs_opts);
7522 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7523 	poll_threads();
7524 	CU_ASSERT(g_bserrno == 0);
7525 	CU_ASSERT(g_blob != NULL);
7526 	blob = g_blob;
7527 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7528 	sz = spdk_blob_get_num_clusters(blob);
7529 	CU_ASSERT(sz == esnap_num_clusters + 1);
7530 
7531 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
7532 	spdk_blob_close(blob, blob_op_complete, NULL);
7533 	poll_threads();
7534 	CU_ASSERT(g_bserrno == 0);
7535 	g_blob = NULL;
7536 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7537 	ut_bs_reload(&bs, &bs_opts);
7538 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7539 	poll_threads();
7540 	CU_ASSERT(g_bserrno != 0);
7541 	CU_ASSERT(g_blob == NULL);
7542 
7543 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
7544 	bs_ctx_count = 0;
7545 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7546 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
7547 	bs_opts.esnap_ctx = &bs_ctx_count;
7548 	ut_bs_reload(&bs, &bs_opts);
7549 	/* Loading the blobstore triggers the esnap to be loaded */
7550 	CU_ASSERT(bs_ctx_count == 1);
7551 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7552 	poll_threads();
7553 	CU_ASSERT(g_bserrno == 0);
7554 	CU_ASSERT(g_blob != NULL);
7555 	/* Opening the blob also triggers the esnap to be loaded */
7556 	CU_ASSERT(bs_ctx_count == 2);
7557 	blob = g_blob;
7558 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7559 	sz = spdk_blob_get_num_clusters(blob);
7560 	CU_ASSERT(sz == esnap_num_clusters + 1);
7561 	spdk_blob_close(blob, blob_op_complete, NULL);
7562 	poll_threads();
7563 	CU_ASSERT(g_bserrno == 0);
7564 	g_blob = NULL;
7565 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
7566 	blob_ctx_count = 0;
7567 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
7568 	open_opts.esnap_ctx = &blob_ctx_count;
7569 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
7570 	poll_threads();
7571 	blob = g_blob;
7572 	CU_ASSERT(bs_ctx_count == 3);
7573 	CU_ASSERT(blob_ctx_count == 1);
7574 	spdk_blob_close(blob, blob_op_complete, NULL);
7575 	poll_threads();
7576 	CU_ASSERT(g_bserrno == 0);
7577 	g_blob = NULL;
7578 }
7579 
7580 static bool
7581 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
7582 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
7583 {
7584 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
7585 	const uint32_t	esnap_blksz = blob->back_bs_dev->blocklen;
7586 	const uint32_t	start_blk = offset / bs_blksz;
7587 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
7588 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
7589 	uint32_t	blob_block;
7590 	struct iovec	iov;
7591 	uint8_t		buf[spdk_min(size, readsize)];
7592 	bool		block_ok;
7593 
7594 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
7595 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
7596 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
7597 
7598 	memset(buf, 0, readsize);
7599 	iov.iov_base = buf;
7600 	iov.iov_len = readsize;
7601 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
7602 		if (strcmp(how, "read") == 0) {
7603 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
7604 					  bs_op_complete, NULL);
7605 		} else if (strcmp(how, "readv") == 0) {
7606 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
7607 					   bs_op_complete, NULL);
7608 		} else if (strcmp(how, "readv_ext") == 0) {
7609 			/*
7610 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
7611 			 * dev->readv_ext().
7612 			 */
7613 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
7614 					       bs_op_complete, NULL, NULL);
7615 		} else {
7616 			abort();
7617 		}
7618 		poll_threads();
7619 		CU_ASSERT(g_bserrno == 0);
7620 		if (g_bserrno != 0) {
7621 			return false;
7622 		}
7623 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
7624 						       blob_block * bs_blksz, esnap_blksz);
7625 		CU_ASSERT(block_ok);
7626 		if (!block_ok) {
7627 			return false;
7628 		}
7629 	}
7630 
7631 	return true;
7632 }
7633 
7634 static void
7635 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
7636 {
7637 	struct spdk_bs_dev	*dev;
7638 	struct spdk_blob_store	*bs;
7639 	struct spdk_bs_opts	bsopts;
7640 	struct spdk_blob_opts	opts;
7641 	struct ut_esnap_opts	esnap_opts;
7642 	struct spdk_blob	*blob;
7643 	const uint32_t		cluster_sz = 16 * 1024;
7644 	const uint64_t		esnap_num_clusters = 4;
7645 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
7646 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
7647 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
7648 	uint32_t		block;
7649 	struct spdk_io_channel	*bs_ch;
7650 
7651 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7652 	bsopts.cluster_sz = cluster_sz;
7653 	bsopts.esnap_bs_dev_create = ut_esnap_create;
7654 
7655 	/* Create device with desired block size */
7656 	dev = init_dev();
7657 	dev->blocklen = bs_blksz;
7658 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
7659 
7660 	/* Initialize a new blob store */
7661 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7662 	poll_threads();
7663 	CU_ASSERT(g_bserrno == 0);
7664 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7665 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
7666 	bs = g_bs;
7667 
7668 	bs_ch = spdk_bs_alloc_io_channel(bs);
7669 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
7670 
7671 	/* Create and open the esnap clone  */
7672 	ut_spdk_blob_opts_init(&opts);
7673 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7674 	opts.esnap_id = &esnap_opts;
7675 	opts.esnap_id_len = sizeof(esnap_opts);
7676 	opts.num_clusters = esnap_num_clusters;
7677 	blob = ut_blob_create_and_open(bs, &opts);
7678 	SPDK_CU_ASSERT_FATAL(blob != NULL);
7679 
7680 	/* Verify that large reads return the content of the esnap device */
7681 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
7682 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
7683 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
7684 	/* Verify that small reads return the content of the esnap device */
7685 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
7686 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
7687 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
7688 
7689 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
7690 	for (block = 0; block < blob_num_blocks; block++) {
7691 		char		buf[bs_blksz];
7692 		union ut_word	word;
7693 
7694 		word.f.blob_id = 0xfedcba90;
7695 		word.f.lba = block;
7696 		ut_memset8(buf, word.num, bs_blksz);
7697 
7698 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
7699 		poll_threads();
7700 		CU_ASSERT(g_bserrno == 0);
7701 		if (g_bserrno != 0) {
7702 			break;
7703 		}
7704 
7705 		/* Read and verify the block before the current block */
7706 		if (block != 0) {
7707 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
7708 			poll_threads();
7709 			CU_ASSERT(g_bserrno == 0);
7710 			if (g_bserrno != 0) {
7711 				break;
7712 			}
7713 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
7714 							      (block - 1) * bs_blksz, bs_blksz));
7715 		}
7716 
7717 		/* Read and verify the current block */
7718 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
7719 		poll_threads();
7720 		CU_ASSERT(g_bserrno == 0);
7721 		if (g_bserrno != 0) {
7722 			break;
7723 		}
7724 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
7725 						      block * bs_blksz, bs_blksz));
7726 
7727 		/* Check the block that follows */
7728 		if (block + 1 < blob_num_blocks) {
7729 			g_bserrno = 0xbad;
7730 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
7731 			poll_threads();
7732 			CU_ASSERT(g_bserrno == 0);
7733 			if (g_bserrno != 0) {
7734 				break;
7735 			}
7736 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
7737 							      (block + 1) * bs_blksz,
7738 							      esnap_blksz));
7739 		}
7740 	}
7741 
7742 	/* Clean up */
7743 	spdk_bs_free_io_channel(bs_ch);
7744 	g_bserrno = 0xbad;
7745 	spdk_blob_close(blob, blob_op_complete, NULL);
7746 	poll_threads();
7747 	CU_ASSERT(g_bserrno == 0);
7748 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7749 	poll_threads();
7750 	CU_ASSERT(g_bserrno == 0);
7751 	g_bs = NULL;
7752 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7753 }
7754 
7755 static void
7756 blob_esnap_io_4096_4096(void)
7757 {
7758 	blob_esnap_io_size(4096, 4096);
7759 }
7760 
7761 static void
7762 blob_esnap_io_512_512(void)
7763 {
7764 	blob_esnap_io_size(512, 512);
7765 }
7766 
7767 static void
7768 blob_esnap_io_4096_512(void)
7769 {
7770 	blob_esnap_io_size(4096, 512);
7771 }
7772 
7773 static void
7774 blob_esnap_io_512_4096(void)
7775 {
7776 	struct spdk_bs_dev	*dev;
7777 	struct spdk_blob_store	*bs;
7778 	struct spdk_bs_opts	bs_opts;
7779 	struct spdk_blob_opts	blob_opts;
7780 	struct ut_esnap_opts	esnap_opts;
7781 	uint64_t		cluster_sz = 16 * 1024;
7782 	uint32_t		bs_blksz = 512;
7783 	uint32_t		esnap_blksz = 4096;
7784 	uint64_t		esnap_num_blocks = 64;
7785 	spdk_blob_id		blobid;
7786 
7787 	/* Create device with desired block size */
7788 	dev = init_dev();
7789 	dev->blocklen = bs_blksz;
7790 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
7791 
7792 	/* Initialize a new blob store */
7793 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7794 	bs_opts.cluster_sz = cluster_sz;
7795 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
7796 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
7797 	poll_threads();
7798 	CU_ASSERT(g_bserrno == 0);
7799 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7800 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
7801 	bs = g_bs;
7802 
7803 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
7804 	ut_spdk_blob_opts_init(&blob_opts);
7805 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7806 	blob_opts.esnap_id = &esnap_opts;
7807 	blob_opts.esnap_id_len = sizeof(esnap_opts);
7808 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
7809 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
7810 	poll_threads();
7811 	CU_ASSERT(g_bserrno == 0);
7812 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7813 	blobid = g_blobid;
7814 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7815 	poll_threads();
7816 	CU_ASSERT(g_bserrno == -EINVAL);
7817 	CU_ASSERT(g_blob == NULL);
7818 
7819 	/* Clean up */
7820 	spdk_bs_unload(bs, bs_op_complete, NULL);
7821 	poll_threads();
7822 	CU_ASSERT(g_bserrno == 0);
7823 	g_bs = NULL;
7824 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7825 }
7826 
7827 static void
7828 blob_esnap_thread_add_remove(void)
7829 {
7830 	struct spdk_blob_store	*bs = g_bs;
7831 	struct spdk_blob_opts	opts;
7832 	struct ut_esnap_opts	ut_esnap_opts;
7833 	struct spdk_blob	*blob;
7834 	struct ut_esnap_dev	*ut_dev;
7835 	spdk_blob_id		blobid;
7836 	uint64_t		start_thread = g_ut_thread_id;
7837 	bool			destroyed = false;
7838 	struct spdk_io_channel	*ch0, *ch1;
7839 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
7840 	const uint32_t		blocklen = bs->io_unit_size;
7841 	char			buf[blocklen * 4];
7842 
7843 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
7844 	set_thread(0);
7845 
7846 	/* Create the esnap clone */
7847 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
7848 	ut_spdk_blob_opts_init(&opts);
7849 	opts.esnap_id = &ut_esnap_opts;
7850 	opts.esnap_id_len = sizeof(ut_esnap_opts);
7851 	opts.num_clusters = 10;
7852 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
7853 	poll_threads();
7854 	CU_ASSERT(g_bserrno == 0);
7855 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7856 	blobid = g_blobid;
7857 
7858 	/* Open the blob. No channels should be allocated yet. */
7859 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7860 	poll_threads();
7861 	CU_ASSERT(g_bserrno == 0);
7862 	CU_ASSERT(g_blob != NULL);
7863 	blob = g_blob;
7864 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
7865 	CU_ASSERT(ut_dev != NULL);
7866 	CU_ASSERT(ut_dev->num_channels == 0);
7867 
7868 	/* Create a channel on thread 0. It is lazily created on the first read. */
7869 	ch0 = spdk_bs_alloc_io_channel(bs);
7870 	CU_ASSERT(ch0 != NULL);
7871 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
7872 	CU_ASSERT(ut_ch0 == NULL);
7873 	CU_ASSERT(ut_dev->num_channels == 0);
7874 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
7875 	poll_threads();
7876 	CU_ASSERT(g_bserrno == 0);
7877 	CU_ASSERT(ut_dev->num_channels == 1);
7878 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
7879 	CU_ASSERT(ut_ch0 != NULL);
7880 	CU_ASSERT(ut_ch0->blocks_read == 1);
7881 
7882 	/* Create a channel on thread 1 and verify its lazy creation too. */
7883 	set_thread(1);
7884 	ch1 = spdk_bs_alloc_io_channel(bs);
7885 	CU_ASSERT(ch1 != NULL);
7886 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
7887 	CU_ASSERT(ut_ch1 == NULL);
7888 	CU_ASSERT(ut_dev->num_channels == 1);
7889 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
7890 	poll_threads();
7891 	CU_ASSERT(g_bserrno == 0);
7892 	CU_ASSERT(ut_dev->num_channels == 2);
7893 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
7894 	CU_ASSERT(ut_ch1 != NULL);
7895 	CU_ASSERT(ut_ch1->blocks_read == 4);
7896 
7897 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
7898 	set_thread(0);
7899 	spdk_bs_free_io_channel(ch0);
7900 	poll_threads();
7901 	CU_ASSERT(ut_dev->num_channels == 1);
7902 
7903 	/* Close the blob. There is no outstanding IO so it should close right away. */
7904 	g_bserrno = 0xbad;
7905 	spdk_blob_close(blob, blob_op_complete, NULL);
7906 	poll_threads();
7907 	CU_ASSERT(g_bserrno == 0);
7908 	CU_ASSERT(destroyed);
7909 
7910 	/* The esnap channel for the blob should be gone now too. */
7911 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
7912 	CU_ASSERT(ut_ch1 == NULL);
7913 
7914 	/* Clean up */
7915 	set_thread(1);
7916 	spdk_bs_free_io_channel(ch1);
7917 	set_thread(start_thread);
7918 }
7919 
7920 static void
7921 freeze_done(void *cb_arg, int bserrno)
7922 {
7923 	uint32_t *freeze_cnt = cb_arg;
7924 
7925 	CU_ASSERT(bserrno == 0);
7926 	(*freeze_cnt)++;
7927 }
7928 
7929 static void
7930 unfreeze_done(void *cb_arg, int bserrno)
7931 {
7932 	uint32_t *unfreeze_cnt = cb_arg;
7933 
7934 	CU_ASSERT(bserrno == 0);
7935 	(*unfreeze_cnt)++;
7936 }
7937 
7938 static void
7939 blob_nested_freezes(void)
7940 {
7941 	struct spdk_blob_store *bs = g_bs;
7942 	struct spdk_blob *blob;
7943 	struct spdk_io_channel *channel[2];
7944 	struct spdk_blob_opts opts;
7945 	uint32_t freeze_cnt, unfreeze_cnt;
7946 	int i;
7947 
7948 	for (i = 0; i < 2; i++) {
7949 		set_thread(i);
7950 		channel[i] = spdk_bs_alloc_io_channel(bs);
7951 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
7952 	}
7953 
7954 	set_thread(0);
7955 
7956 	ut_spdk_blob_opts_init(&opts);
7957 	blob = ut_blob_create_and_open(bs, &opts);
7958 
7959 	/* First just test a single freeze/unfreeze. */
7960 	freeze_cnt = 0;
7961 	unfreeze_cnt = 0;
7962 	CU_ASSERT(blob->frozen_refcnt == 0);
7963 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7964 	CU_ASSERT(blob->frozen_refcnt == 1);
7965 	CU_ASSERT(freeze_cnt == 0);
7966 	poll_threads();
7967 	CU_ASSERT(freeze_cnt == 1);
7968 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7969 	CU_ASSERT(blob->frozen_refcnt == 0);
7970 	CU_ASSERT(unfreeze_cnt == 0);
7971 	poll_threads();
7972 	CU_ASSERT(unfreeze_cnt == 1);
7973 
7974 	/* Now nest multiple freeze/unfreeze operations.  We should
7975 	 * expect a callback for each operation, but only after
7976 	 * the threads have been polled to ensure a for_each_channel()
7977 	 * was executed.
7978 	 */
7979 	freeze_cnt = 0;
7980 	unfreeze_cnt = 0;
7981 	CU_ASSERT(blob->frozen_refcnt == 0);
7982 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7983 	CU_ASSERT(blob->frozen_refcnt == 1);
7984 	CU_ASSERT(freeze_cnt == 0);
7985 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7986 	CU_ASSERT(blob->frozen_refcnt == 2);
7987 	CU_ASSERT(freeze_cnt == 0);
7988 	poll_threads();
7989 	CU_ASSERT(freeze_cnt == 2);
7990 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7991 	CU_ASSERT(blob->frozen_refcnt == 1);
7992 	CU_ASSERT(unfreeze_cnt == 0);
7993 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7994 	CU_ASSERT(blob->frozen_refcnt == 0);
7995 	CU_ASSERT(unfreeze_cnt == 0);
7996 	poll_threads();
7997 	CU_ASSERT(unfreeze_cnt == 2);
7998 
7999 	for (i = 0; i < 2; i++) {
8000 		set_thread(i);
8001 		spdk_bs_free_io_channel(channel[i]);
8002 	}
8003 	set_thread(0);
8004 	ut_blob_close_and_delete(bs, blob);
8005 
8006 	poll_threads();
8007 	g_blob = NULL;
8008 	g_blobid = 0;
8009 }
8010 
8011 static void
8012 blob_ext_md_pages(void)
8013 {
8014 	struct spdk_blob_store *bs;
8015 	struct spdk_bs_dev *dev;
8016 	struct spdk_blob *blob;
8017 	struct spdk_blob_opts opts;
8018 	struct spdk_bs_opts bs_opts;
8019 	uint64_t free_clusters;
8020 
8021 	dev = init_dev();
8022 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8023 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
8024 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
8025 	 * It requires num_md_pages that is much smaller than the number of clusters.
8026 	 * Make sure we can create a blob that uses all of the free clusters.
8027 	 */
8028 	bs_opts.cluster_sz = 65536;
8029 	bs_opts.num_md_pages = 16;
8030 
8031 	/* Initialize a new blob store */
8032 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8033 	poll_threads();
8034 	CU_ASSERT(g_bserrno == 0);
8035 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8036 	bs = g_bs;
8037 
8038 	free_clusters = spdk_bs_free_cluster_count(bs);
8039 
8040 	ut_spdk_blob_opts_init(&opts);
8041 	opts.num_clusters = free_clusters;
8042 
8043 	blob = ut_blob_create_and_open(bs, &opts);
8044 	spdk_blob_close(blob, blob_op_complete, NULL);
8045 	CU_ASSERT(g_bserrno == 0);
8046 
8047 	spdk_bs_unload(bs, bs_op_complete, NULL);
8048 	poll_threads();
8049 	CU_ASSERT(g_bserrno == 0);
8050 	g_bs = NULL;
8051 }
8052 
8053 static void
8054 blob_esnap_clone_snapshot(void)
8055 {
8056 	/*
8057 	 * When a snapshot is created, the blob that is being snapped becomes
8058 	 * the leaf node (a clone of the snapshot) and the newly created
8059 	 * snapshot sits between the snapped blob and the external snapshot.
8060 	 *
8061 	 * Before creating snap1
8062 	 *
8063 	 *   ,--------.     ,----------.
8064 	 *   |  blob  |     |  vbdev   |
8065 	 *   | blob1  |<----| nvme1n42 |
8066 	 *   |  (rw)  |     |   (ro)   |
8067 	 *   `--------'     `----------'
8068 	 *       Figure 1
8069 	 *
8070 	 * After creating snap1
8071 	 *
8072 	 *   ,--------.     ,--------.     ,----------.
8073 	 *   |  blob  |     |  blob  |     |  vbdev   |
8074 	 *   | blob1  |<----| snap1  |<----| nvme1n42 |
8075 	 *   |  (rw)  |     |  (ro)  |     |   (ro)   |
8076 	 *   `--------'     `--------'     `----------'
8077 	 *       Figure 2
8078 	 *
8079 	 * Starting from Figure 2, if snap1 is removed, the chain reverts to
8080 	 * what it looks like in Figure 1.
8081 	 *
8082 	 * Starting from Figure 2, if blob1 is removed, the chain becomes:
8083 	 *
8084 	 *   ,--------.     ,----------.
8085 	 *   |  blob  |     |  vbdev   |
8086 	 *   | snap1  |<----| nvme1n42 |
8087 	 *   |  (ro)  |     |   (ro)   |
8088 	 *   `--------'     `----------'
8089 	 *       Figure 3
8090 	 *
8091 	 * In each case, the blob pointed to by the nvme vbdev is considered
8092 	 * the "esnap clone".  The esnap clone must have:
8093 	 *
8094 	 *   - XATTR_INTERNAL for BLOB_EXTERNAL_SNAPSHOT_ID (e.g. name or UUID)
8095 	 *   - blob->invalid_flags must contain SPDK_BLOB_EXTERNAL_SNAPSHOT
8096 	 *   - blob->parent_id must be SPDK_BLOBID_EXTERNAL_SNAPSHOT.
8097 	 *
8098 	 * No other blob that descends from the esnap clone may have any of
8099 	 * those set.
8100 	 */
8101 	struct spdk_blob_store	*bs = g_bs;
8102 	const uint32_t		blocklen = bs->io_unit_size;
8103 	struct spdk_blob_opts	opts;
8104 	struct ut_esnap_opts	esnap_opts;
8105 	struct spdk_blob	*blob, *snap_blob;
8106 	spdk_blob_id		blobid, snap_blobid;
8107 	bool			destroyed = false;
8108 
8109 	/* Create the esnap clone */
8110 	ut_esnap_opts_init(blocklen, 2048, __func__, &destroyed, &esnap_opts);
8111 	ut_spdk_blob_opts_init(&opts);
8112 	opts.esnap_id = &esnap_opts;
8113 	opts.esnap_id_len = sizeof(esnap_opts);
8114 	opts.num_clusters = 10;
8115 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8116 	poll_threads();
8117 	CU_ASSERT(g_bserrno == 0);
8118 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8119 	blobid = g_blobid;
8120 
8121 	/* Open the blob. */
8122 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8123 	poll_threads();
8124 	CU_ASSERT(g_bserrno == 0);
8125 	CU_ASSERT(g_blob != NULL);
8126 	blob = g_blob;
8127 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8128 
8129 	/*
8130 	 * Create a snapshot of the blob. The snapshot becomes the esnap clone.
8131 	 */
8132 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8133 	poll_threads();
8134 	CU_ASSERT(g_bserrno == 0);
8135 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8136 	snap_blobid = g_blobid;
8137 
8138 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8139 	poll_threads();
8140 	CU_ASSERT(g_bserrno == 0);
8141 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8142 	snap_blob = g_blob;
8143 
8144 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8145 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8146 
8147 	/*
8148 	 * Delete the snapshot.  The original blob becomes the esnap clone.
8149 	 */
8150 	ut_blob_close_and_delete(bs, snap_blob);
8151 	snap_blob = NULL;
8152 	snap_blobid = SPDK_BLOBID_INVALID;
8153 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8154 
8155 	/*
8156 	 * Create the snapshot again, then delete the original blob.  The
8157 	 * snapshot should survive as the esnap clone.
8158 	 */
8159 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8160 	poll_threads();
8161 	CU_ASSERT(g_bserrno == 0);
8162 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8163 	snap_blobid = g_blobid;
8164 
8165 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8166 	poll_threads();
8167 	CU_ASSERT(g_bserrno == 0);
8168 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8169 	snap_blob = g_blob;
8170 
8171 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8172 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8173 
8174 	ut_blob_close_and_delete(bs, blob);
8175 	blob = NULL;
8176 	blobid = SPDK_BLOBID_INVALID;
8177 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8178 
8179 	/*
8180 	 * Clone the snapshot.  The snapshot continues to be the esnap clone.
8181 	 */
8182 	spdk_bs_create_clone(bs, snap_blobid, NULL, blob_op_with_id_complete, NULL);
8183 	poll_threads();
8184 	CU_ASSERT(g_bserrno == 0);
8185 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8186 	blobid = g_blobid;
8187 
8188 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8189 	poll_threads();
8190 	CU_ASSERT(g_bserrno == 0);
8191 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8192 	blob = g_blob;
8193 
8194 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8195 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8196 
8197 	/*
8198 	 * Delete the snapshot. The clone becomes the esnap clone.
8199 	 */
8200 	ut_blob_close_and_delete(bs, snap_blob);
8201 	snap_blob = NULL;
8202 	snap_blobid = SPDK_BLOBID_INVALID;
8203 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8204 
8205 	/*
8206 	 * Clean up
8207 	 */
8208 	ut_blob_close_and_delete(bs, blob);
8209 }
8210 
8211 static void
8212 suite_bs_setup(void)
8213 {
8214 	struct spdk_bs_dev *dev;
8215 
8216 	dev = init_dev();
8217 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8218 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
8219 	poll_threads();
8220 	CU_ASSERT(g_bserrno == 0);
8221 	CU_ASSERT(g_bs != NULL);
8222 }
8223 
8224 static void
8225 suite_esnap_bs_setup(void)
8226 {
8227 	struct spdk_bs_dev	*dev;
8228 	struct spdk_bs_opts	bs_opts;
8229 
8230 	dev = init_dev();
8231 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8232 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8233 	bs_opts.cluster_sz = 16 * 1024;
8234 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8235 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8236 	poll_threads();
8237 	CU_ASSERT(g_bserrno == 0);
8238 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8239 }
8240 
8241 static void
8242 suite_bs_cleanup(void)
8243 {
8244 	if (g_bs != NULL) {
8245 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
8246 		poll_threads();
8247 		CU_ASSERT(g_bserrno == 0);
8248 		g_bs = NULL;
8249 	}
8250 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8251 }
8252 
8253 static struct spdk_blob *
8254 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
8255 {
8256 	struct spdk_blob *blob;
8257 	struct spdk_blob_opts create_blob_opts;
8258 	spdk_blob_id blobid;
8259 
8260 	if (blob_opts == NULL) {
8261 		ut_spdk_blob_opts_init(&create_blob_opts);
8262 		blob_opts = &create_blob_opts;
8263 	}
8264 
8265 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
8266 	poll_threads();
8267 	CU_ASSERT(g_bserrno == 0);
8268 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8269 	blobid = g_blobid;
8270 	g_blobid = -1;
8271 
8272 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8273 	poll_threads();
8274 	CU_ASSERT(g_bserrno == 0);
8275 	CU_ASSERT(g_blob != NULL);
8276 	blob = g_blob;
8277 
8278 	g_blob = NULL;
8279 	g_bserrno = -1;
8280 
8281 	return blob;
8282 }
8283 
8284 static void
8285 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
8286 {
8287 	spdk_blob_id blobid = spdk_blob_get_id(blob);
8288 
8289 	spdk_blob_close(blob, blob_op_complete, NULL);
8290 	poll_threads();
8291 	CU_ASSERT(g_bserrno == 0);
8292 	g_blob = NULL;
8293 
8294 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
8295 	poll_threads();
8296 	CU_ASSERT(g_bserrno == 0);
8297 	g_bserrno = -1;
8298 }
8299 
8300 static void
8301 suite_blob_setup(void)
8302 {
8303 	suite_bs_setup();
8304 	CU_ASSERT(g_bs != NULL);
8305 
8306 	g_blob = ut_blob_create_and_open(g_bs, NULL);
8307 	CU_ASSERT(g_blob != NULL);
8308 }
8309 
8310 static void
8311 suite_blob_cleanup(void)
8312 {
8313 	ut_blob_close_and_delete(g_bs, g_blob);
8314 	CU_ASSERT(g_blob == NULL);
8315 
8316 	suite_bs_cleanup();
8317 	CU_ASSERT(g_bs == NULL);
8318 }
8319 
8320 int
8321 main(int argc, char **argv)
8322 {
8323 	CU_pSuite	suite, suite_bs, suite_blob, suite_esnap_bs;
8324 	unsigned int	num_failures;
8325 
8326 	CU_set_error_action(CUEA_ABORT);
8327 	CU_initialize_registry();
8328 
8329 	suite = CU_add_suite("blob", NULL, NULL);
8330 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
8331 			suite_bs_setup, suite_bs_cleanup);
8332 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
8333 			suite_blob_setup, suite_blob_cleanup);
8334 	suite_esnap_bs = CU_add_suite_with_setup_and_teardown("blob_esnap_bs", NULL, NULL,
8335 			 suite_esnap_bs_setup,
8336 			 suite_bs_cleanup);
8337 
8338 	CU_ADD_TEST(suite, blob_init);
8339 	CU_ADD_TEST(suite_bs, blob_open);
8340 	CU_ADD_TEST(suite_bs, blob_create);
8341 	CU_ADD_TEST(suite_bs, blob_create_loop);
8342 	CU_ADD_TEST(suite_bs, blob_create_fail);
8343 	CU_ADD_TEST(suite_bs, blob_create_internal);
8344 	CU_ADD_TEST(suite_bs, blob_create_zero_extent);
8345 	CU_ADD_TEST(suite, blob_thin_provision);
8346 	CU_ADD_TEST(suite_bs, blob_snapshot);
8347 	CU_ADD_TEST(suite_bs, blob_clone);
8348 	CU_ADD_TEST(suite_bs, blob_inflate);
8349 	CU_ADD_TEST(suite_bs, blob_delete);
8350 	CU_ADD_TEST(suite_bs, blob_resize_test);
8351 	CU_ADD_TEST(suite, blob_read_only);
8352 	CU_ADD_TEST(suite_bs, channel_ops);
8353 	CU_ADD_TEST(suite_bs, blob_super);
8354 	CU_ADD_TEST(suite_blob, blob_write);
8355 	CU_ADD_TEST(suite_blob, blob_read);
8356 	CU_ADD_TEST(suite_blob, blob_rw_verify);
8357 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
8358 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
8359 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
8360 	CU_ADD_TEST(suite_bs, blob_unmap);
8361 	CU_ADD_TEST(suite_bs, blob_iter);
8362 	CU_ADD_TEST(suite_blob, blob_xattr);
8363 	CU_ADD_TEST(suite_bs, blob_parse_md);
8364 	CU_ADD_TEST(suite, bs_load);
8365 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
8366 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
8367 	CU_ADD_TEST(suite, bs_load_after_failed_grow);
8368 	CU_ADD_TEST(suite_bs, bs_unload);
8369 	CU_ADD_TEST(suite, bs_cluster_sz);
8370 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
8371 	CU_ADD_TEST(suite, bs_resize_md);
8372 	CU_ADD_TEST(suite, bs_destroy);
8373 	CU_ADD_TEST(suite, bs_type);
8374 	CU_ADD_TEST(suite, bs_super_block);
8375 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
8376 	CU_ADD_TEST(suite, bs_test_grow);
8377 	CU_ADD_TEST(suite, blob_serialize_test);
8378 	CU_ADD_TEST(suite_bs, blob_crc);
8379 	CU_ADD_TEST(suite, super_block_crc);
8380 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
8381 	CU_ADD_TEST(suite_bs, blob_flags);
8382 	CU_ADD_TEST(suite_bs, bs_version);
8383 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
8384 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
8385 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
8386 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
8387 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
8388 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
8389 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
8390 	CU_ADD_TEST(suite, bs_load_iter_test);
8391 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
8392 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
8393 	CU_ADD_TEST(suite, blob_relations);
8394 	CU_ADD_TEST(suite, blob_relations2);
8395 	CU_ADD_TEST(suite, blob_relations3);
8396 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
8397 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
8398 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
8399 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
8400 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
8401 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
8402 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
8403 	CU_ADD_TEST(suite, blob_io_unit);
8404 	CU_ADD_TEST(suite, blob_io_unit_compatibility);
8405 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
8406 	CU_ADD_TEST(suite_bs, blob_persist_test);
8407 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
8408 	CU_ADD_TEST(suite_bs, blob_seek_io_unit);
8409 	CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
8410 	CU_ADD_TEST(suite_bs, blob_nested_freezes);
8411 	CU_ADD_TEST(suite, blob_ext_md_pages);
8412 	CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
8413 	CU_ADD_TEST(suite, blob_esnap_io_512_512);
8414 	CU_ADD_TEST(suite, blob_esnap_io_4096_512);
8415 	CU_ADD_TEST(suite, blob_esnap_io_512_4096);
8416 	CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
8417 	CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_snapshot);
8418 
8419 	allocate_threads(2);
8420 	set_thread(0);
8421 
8422 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
8423 
8424 	g_dev_copy_enabled = false;
8425 	CU_basic_set_mode(CU_BRM_VERBOSE);
8426 	g_use_extent_table = false;
8427 	CU_basic_run_tests();
8428 	num_failures = CU_get_number_of_failures();
8429 	g_use_extent_table = true;
8430 	CU_basic_run_tests();
8431 	num_failures += CU_get_number_of_failures();
8432 
8433 	g_dev_copy_enabled = true;
8434 	CU_basic_set_mode(CU_BRM_VERBOSE);
8435 	g_use_extent_table = false;
8436 	CU_basic_run_tests();
8437 	num_failures = CU_get_number_of_failures();
8438 	g_use_extent_table = true;
8439 	CU_basic_run_tests();
8440 	num_failures += CU_get_number_of_failures();
8441 	CU_cleanup_registry();
8442 
8443 	free(g_dev_buffer);
8444 
8445 	free_threads();
8446 
8447 	return num_failures;
8448 }
8449