xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision 1d83a09d4b8d745be754c660d8ec5961bd0a189e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_is_ptr, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
63 
64 
65 int g_status;
66 int g_count;
67 struct spdk_histogram_data *g_histogram;
68 
69 void
70 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
71 			 int *sc, int *sk, int *asc, int *ascq)
72 {
73 }
74 
75 static int
76 null_init(void)
77 {
78 	return 0;
79 }
80 
81 static int
82 null_clean(void)
83 {
84 	return 0;
85 }
86 
87 static int
88 stub_destruct(void *ctx)
89 {
90 	return 0;
91 }
92 
93 struct ut_expected_io {
94 	uint8_t				type;
95 	uint64_t			offset;
96 	uint64_t			length;
97 	int				iovcnt;
98 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
99 	TAILQ_ENTRY(ut_expected_io)	link;
100 };
101 
102 struct bdev_ut_channel {
103 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
104 	uint32_t			outstanding_io_count;
105 	TAILQ_HEAD(, ut_expected_io)	expected_io;
106 };
107 
108 static bool g_io_done;
109 static struct spdk_bdev_io *g_bdev_io;
110 static enum spdk_bdev_io_status g_io_status;
111 static uint32_t g_bdev_ut_io_device;
112 static struct bdev_ut_channel *g_bdev_ut_channel;
113 
114 static struct ut_expected_io *
115 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
116 {
117 	struct ut_expected_io *expected_io;
118 
119 	expected_io = calloc(1, sizeof(*expected_io));
120 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
121 
122 	expected_io->type = type;
123 	expected_io->offset = offset;
124 	expected_io->length = length;
125 	expected_io->iovcnt = iovcnt;
126 
127 	return expected_io;
128 }
129 
130 static void
131 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
132 {
133 	expected_io->iov[pos].iov_base = base;
134 	expected_io->iov[pos].iov_len = len;
135 }
136 
137 static void
138 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
139 {
140 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
141 	struct ut_expected_io *expected_io;
142 	struct iovec *iov, *expected_iov;
143 	int i;
144 
145 	g_bdev_io = bdev_io;
146 
147 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
148 	ch->outstanding_io_count++;
149 
150 	expected_io = TAILQ_FIRST(&ch->expected_io);
151 	if (expected_io == NULL) {
152 		return;
153 	}
154 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
155 
156 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
157 		CU_ASSERT(bdev_io->type == expected_io->type);
158 	}
159 
160 	if (expected_io->length == 0) {
161 		free(expected_io);
162 		return;
163 	}
164 
165 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
166 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
167 
168 	if (expected_io->iovcnt == 0) {
169 		free(expected_io);
170 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
171 		return;
172 	}
173 
174 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
175 	for (i = 0; i < expected_io->iovcnt; i++) {
176 		iov = &bdev_io->u.bdev.iovs[i];
177 		expected_iov = &expected_io->iov[i];
178 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
179 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
180 	}
181 
182 	free(expected_io);
183 }
184 
185 static void
186 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
187 				      struct spdk_bdev_io *bdev_io, bool success)
188 {
189 	CU_ASSERT(success == true);
190 
191 	stub_submit_request(_ch, bdev_io);
192 }
193 
194 static void
195 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
196 {
197 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
198 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
199 }
200 
201 static uint32_t
202 stub_complete_io(uint32_t num_to_complete)
203 {
204 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
205 	struct spdk_bdev_io *bdev_io;
206 	uint32_t num_completed = 0;
207 
208 	while (num_completed < num_to_complete) {
209 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
210 			break;
211 		}
212 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
213 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
214 		ch->outstanding_io_count--;
215 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
216 		num_completed++;
217 	}
218 
219 	return num_completed;
220 }
221 
222 static struct spdk_io_channel *
223 bdev_ut_get_io_channel(void *ctx)
224 {
225 	return spdk_get_io_channel(&g_bdev_ut_io_device);
226 }
227 
228 static bool
229 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
230 {
231 	return true;
232 }
233 
234 static struct spdk_bdev_fn_table fn_table = {
235 	.destruct = stub_destruct,
236 	.submit_request = stub_submit_request,
237 	.get_io_channel = bdev_ut_get_io_channel,
238 	.io_type_supported = stub_io_type_supported,
239 };
240 
241 static int
242 bdev_ut_create_ch(void *io_device, void *ctx_buf)
243 {
244 	struct bdev_ut_channel *ch = ctx_buf;
245 
246 	CU_ASSERT(g_bdev_ut_channel == NULL);
247 	g_bdev_ut_channel = ch;
248 
249 	TAILQ_INIT(&ch->outstanding_io);
250 	ch->outstanding_io_count = 0;
251 	TAILQ_INIT(&ch->expected_io);
252 	return 0;
253 }
254 
255 static void
256 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
257 {
258 	CU_ASSERT(g_bdev_ut_channel != NULL);
259 	g_bdev_ut_channel = NULL;
260 }
261 
262 static int
263 bdev_ut_module_init(void)
264 {
265 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
266 				sizeof(struct bdev_ut_channel), NULL);
267 	return 0;
268 }
269 
270 static void
271 bdev_ut_module_fini(void)
272 {
273 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
274 }
275 
276 struct spdk_bdev_module bdev_ut_if = {
277 	.name = "bdev_ut",
278 	.module_init = bdev_ut_module_init,
279 	.module_fini = bdev_ut_module_fini,
280 };
281 
282 static void vbdev_ut_examine(struct spdk_bdev *bdev);
283 
284 static int
285 vbdev_ut_module_init(void)
286 {
287 	return 0;
288 }
289 
290 static void
291 vbdev_ut_module_fini(void)
292 {
293 }
294 
295 struct spdk_bdev_module vbdev_ut_if = {
296 	.name = "vbdev_ut",
297 	.module_init = vbdev_ut_module_init,
298 	.module_fini = vbdev_ut_module_fini,
299 	.examine_config = vbdev_ut_examine,
300 };
301 
302 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
303 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
304 
305 static void
306 vbdev_ut_examine(struct spdk_bdev *bdev)
307 {
308 	spdk_bdev_module_examine_done(&vbdev_ut_if);
309 }
310 
311 static struct spdk_bdev *
312 allocate_bdev(char *name)
313 {
314 	struct spdk_bdev *bdev;
315 	int rc;
316 
317 	bdev = calloc(1, sizeof(*bdev));
318 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
319 
320 	bdev->name = name;
321 	bdev->fn_table = &fn_table;
322 	bdev->module = &bdev_ut_if;
323 	bdev->blockcnt = 1024;
324 	bdev->blocklen = 512;
325 
326 	rc = spdk_bdev_register(bdev);
327 	CU_ASSERT(rc == 0);
328 
329 	return bdev;
330 }
331 
332 static struct spdk_bdev *
333 allocate_vbdev(char *name, struct spdk_bdev *base1, struct spdk_bdev *base2)
334 {
335 	struct spdk_bdev *bdev;
336 	struct spdk_bdev *array[2];
337 	int rc;
338 
339 	bdev = calloc(1, sizeof(*bdev));
340 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
341 
342 	bdev->name = name;
343 	bdev->fn_table = &fn_table;
344 	bdev->module = &vbdev_ut_if;
345 
346 	/* vbdev must have at least one base bdev */
347 	CU_ASSERT(base1 != NULL);
348 
349 	array[0] = base1;
350 	array[1] = base2;
351 
352 	rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2);
353 	CU_ASSERT(rc == 0);
354 
355 	return bdev;
356 }
357 
358 static void
359 free_bdev(struct spdk_bdev *bdev)
360 {
361 	spdk_bdev_unregister(bdev, NULL, NULL);
362 	poll_threads();
363 	memset(bdev, 0xFF, sizeof(*bdev));
364 	free(bdev);
365 }
366 
367 static void
368 free_vbdev(struct spdk_bdev *bdev)
369 {
370 	spdk_bdev_unregister(bdev, NULL, NULL);
371 	poll_threads();
372 	memset(bdev, 0xFF, sizeof(*bdev));
373 	free(bdev);
374 }
375 
376 static void
377 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
378 {
379 	const char *bdev_name;
380 
381 	CU_ASSERT(bdev != NULL);
382 	CU_ASSERT(rc == 0);
383 	bdev_name = spdk_bdev_get_name(bdev);
384 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
385 
386 	free(stat);
387 	free_bdev(bdev);
388 
389 	*(bool *)cb_arg = true;
390 }
391 
392 static void
393 get_device_stat_test(void)
394 {
395 	struct spdk_bdev *bdev;
396 	struct spdk_bdev_io_stat *stat;
397 	bool done;
398 
399 	bdev = allocate_bdev("bdev0");
400 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
401 	if (stat == NULL) {
402 		free_bdev(bdev);
403 		return;
404 	}
405 
406 	done = false;
407 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
408 	while (!done) { poll_threads(); }
409 
410 
411 }
412 
413 static void
414 open_write_test(void)
415 {
416 	struct spdk_bdev *bdev[9];
417 	struct spdk_bdev_desc *desc[9] = {};
418 	int rc;
419 
420 	/*
421 	 * Create a tree of bdevs to test various open w/ write cases.
422 	 *
423 	 * bdev0 through bdev3 are physical block devices, such as NVMe
424 	 * namespaces or Ceph block devices.
425 	 *
426 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
427 	 * caching or RAID use cases.
428 	 *
429 	 * bdev5 through bdev7 are all virtual bdevs with the same base
430 	 * bdev (except bdev7). This models partitioning or logical volume
431 	 * use cases.
432 	 *
433 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
434 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
435 	 * models caching, RAID, partitioning or logical volumes use cases.
436 	 *
437 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
438 	 * base bdevs are themselves virtual bdevs.
439 	 *
440 	 *                bdev8
441 	 *                  |
442 	 *            +----------+
443 	 *            |          |
444 	 *          bdev4      bdev5   bdev6   bdev7
445 	 *            |          |       |       |
446 	 *        +---+---+      +---+   +   +---+---+
447 	 *        |       |           \  |  /         \
448 	 *      bdev0   bdev1          bdev2         bdev3
449 	 */
450 
451 	bdev[0] = allocate_bdev("bdev0");
452 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
453 	CU_ASSERT(rc == 0);
454 
455 	bdev[1] = allocate_bdev("bdev1");
456 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
457 	CU_ASSERT(rc == 0);
458 
459 	bdev[2] = allocate_bdev("bdev2");
460 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
461 	CU_ASSERT(rc == 0);
462 
463 	bdev[3] = allocate_bdev("bdev3");
464 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
465 	CU_ASSERT(rc == 0);
466 
467 	bdev[4] = allocate_vbdev("bdev4", bdev[0], bdev[1]);
468 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
469 	CU_ASSERT(rc == 0);
470 
471 	bdev[5] = allocate_vbdev("bdev5", bdev[2], NULL);
472 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
473 	CU_ASSERT(rc == 0);
474 
475 	bdev[6] = allocate_vbdev("bdev6", bdev[2], NULL);
476 
477 	bdev[7] = allocate_vbdev("bdev7", bdev[2], bdev[3]);
478 
479 	bdev[8] = allocate_vbdev("bdev8", bdev[4], bdev[5]);
480 
481 	/* Open bdev0 read-only.  This should succeed. */
482 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
483 	CU_ASSERT(rc == 0);
484 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
485 	spdk_bdev_close(desc[0]);
486 
487 	/*
488 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
489 	 * by a vbdev module.
490 	 */
491 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
492 	CU_ASSERT(rc == -EPERM);
493 
494 	/*
495 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
496 	 * by a vbdev module.
497 	 */
498 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
499 	CU_ASSERT(rc == -EPERM);
500 
501 	/* Open bdev4 read-only.  This should succeed. */
502 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
503 	CU_ASSERT(rc == 0);
504 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
505 	spdk_bdev_close(desc[4]);
506 
507 	/*
508 	 * Open bdev8 read/write.  This should succeed since it is a leaf
509 	 * bdev.
510 	 */
511 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
512 	CU_ASSERT(rc == 0);
513 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
514 	spdk_bdev_close(desc[8]);
515 
516 	/*
517 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
518 	 * by a vbdev module.
519 	 */
520 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
521 	CU_ASSERT(rc == -EPERM);
522 
523 	/* Open bdev4 read-only.  This should succeed. */
524 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
525 	CU_ASSERT(rc == 0);
526 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
527 	spdk_bdev_close(desc[5]);
528 
529 	free_vbdev(bdev[8]);
530 
531 	free_vbdev(bdev[5]);
532 	free_vbdev(bdev[6]);
533 	free_vbdev(bdev[7]);
534 
535 	free_vbdev(bdev[4]);
536 
537 	free_bdev(bdev[0]);
538 	free_bdev(bdev[1]);
539 	free_bdev(bdev[2]);
540 	free_bdev(bdev[3]);
541 }
542 
543 static void
544 bytes_to_blocks_test(void)
545 {
546 	struct spdk_bdev bdev;
547 	uint64_t offset_blocks, num_blocks;
548 
549 	memset(&bdev, 0, sizeof(bdev));
550 
551 	bdev.blocklen = 512;
552 
553 	/* All parameters valid */
554 	offset_blocks = 0;
555 	num_blocks = 0;
556 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
557 	CU_ASSERT(offset_blocks == 1);
558 	CU_ASSERT(num_blocks == 2);
559 
560 	/* Offset not a block multiple */
561 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
562 
563 	/* Length not a block multiple */
564 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
565 
566 	/* In case blocklen not the power of two */
567 	bdev.blocklen = 100;
568 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
569 	CU_ASSERT(offset_blocks == 1);
570 	CU_ASSERT(num_blocks == 2);
571 
572 	/* Offset not a block multiple */
573 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
574 
575 	/* Length not a block multiple */
576 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
577 }
578 
579 static void
580 num_blocks_test(void)
581 {
582 	struct spdk_bdev bdev;
583 	struct spdk_bdev_desc *desc = NULL;
584 	int rc;
585 
586 	memset(&bdev, 0, sizeof(bdev));
587 	bdev.name = "num_blocks";
588 	bdev.fn_table = &fn_table;
589 	bdev.module = &bdev_ut_if;
590 	spdk_bdev_register(&bdev);
591 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
592 
593 	/* Growing block number */
594 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
595 	/* Shrinking block number */
596 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
597 
598 	/* In case bdev opened */
599 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
600 	CU_ASSERT(rc == 0);
601 	SPDK_CU_ASSERT_FATAL(desc != NULL);
602 
603 	/* Growing block number */
604 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
605 	/* Shrinking block number */
606 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
607 
608 	spdk_bdev_close(desc);
609 	spdk_bdev_unregister(&bdev, NULL, NULL);
610 
611 	poll_threads();
612 }
613 
614 static void
615 io_valid_test(void)
616 {
617 	struct spdk_bdev bdev;
618 
619 	memset(&bdev, 0, sizeof(bdev));
620 
621 	bdev.blocklen = 512;
622 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
623 
624 	/* All parameters valid */
625 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
626 
627 	/* Last valid block */
628 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
629 
630 	/* Offset past end of bdev */
631 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
632 
633 	/* Offset + length past end of bdev */
634 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
635 
636 	/* Offset near end of uint64_t range (2^64 - 1) */
637 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
638 }
639 
640 static void
641 alias_add_del_test(void)
642 {
643 	struct spdk_bdev *bdev[3];
644 	int rc;
645 
646 	/* Creating and registering bdevs */
647 	bdev[0] = allocate_bdev("bdev0");
648 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
649 
650 	bdev[1] = allocate_bdev("bdev1");
651 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
652 
653 	bdev[2] = allocate_bdev("bdev2");
654 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
655 
656 	poll_threads();
657 
658 	/*
659 	 * Trying adding an alias identical to name.
660 	 * Alias is identical to name, so it can not be added to aliases list
661 	 */
662 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
663 	CU_ASSERT(rc == -EEXIST);
664 
665 	/*
666 	 * Trying to add empty alias,
667 	 * this one should fail
668 	 */
669 	rc = spdk_bdev_alias_add(bdev[0], NULL);
670 	CU_ASSERT(rc == -EINVAL);
671 
672 	/* Trying adding same alias to two different registered bdevs */
673 
674 	/* Alias is used first time, so this one should pass */
675 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
676 	CU_ASSERT(rc == 0);
677 
678 	/* Alias was added to another bdev, so this one should fail */
679 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
680 	CU_ASSERT(rc == -EEXIST);
681 
682 	/* Alias is used first time, so this one should pass */
683 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
684 	CU_ASSERT(rc == 0);
685 
686 	/* Trying removing an alias from registered bdevs */
687 
688 	/* Alias is not on a bdev aliases list, so this one should fail */
689 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
690 	CU_ASSERT(rc == -ENOENT);
691 
692 	/* Alias is present on a bdev aliases list, so this one should pass */
693 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
694 	CU_ASSERT(rc == 0);
695 
696 	/* Alias is present on a bdev aliases list, so this one should pass */
697 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
698 	CU_ASSERT(rc == 0);
699 
700 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
701 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
702 	CU_ASSERT(rc != 0);
703 
704 	/* Trying to del all alias from empty alias list */
705 	spdk_bdev_alias_del_all(bdev[2]);
706 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
707 
708 	/* Trying to del all alias from non-empty alias list */
709 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
710 	CU_ASSERT(rc == 0);
711 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
712 	CU_ASSERT(rc == 0);
713 	spdk_bdev_alias_del_all(bdev[2]);
714 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
715 
716 	/* Unregister and free bdevs */
717 	spdk_bdev_unregister(bdev[0], NULL, NULL);
718 	spdk_bdev_unregister(bdev[1], NULL, NULL);
719 	spdk_bdev_unregister(bdev[2], NULL, NULL);
720 
721 	poll_threads();
722 
723 	free(bdev[0]);
724 	free(bdev[1]);
725 	free(bdev[2]);
726 }
727 
728 static void
729 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
730 {
731 	g_io_done = true;
732 	g_io_status = bdev_io->internal.status;
733 	spdk_bdev_free_io(bdev_io);
734 }
735 
736 static void
737 bdev_init_cb(void *arg, int rc)
738 {
739 	CU_ASSERT(rc == 0);
740 }
741 
742 static void
743 bdev_fini_cb(void *arg)
744 {
745 }
746 
747 struct bdev_ut_io_wait_entry {
748 	struct spdk_bdev_io_wait_entry	entry;
749 	struct spdk_io_channel		*io_ch;
750 	struct spdk_bdev_desc		*desc;
751 	bool				submitted;
752 };
753 
754 static void
755 io_wait_cb(void *arg)
756 {
757 	struct bdev_ut_io_wait_entry *entry = arg;
758 	int rc;
759 
760 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
761 	CU_ASSERT(rc == 0);
762 	entry->submitted = true;
763 }
764 
765 static void
766 bdev_io_wait_test(void)
767 {
768 	struct spdk_bdev *bdev;
769 	struct spdk_bdev_desc *desc = NULL;
770 	struct spdk_io_channel *io_ch;
771 	struct spdk_bdev_opts bdev_opts = {
772 		.bdev_io_pool_size = 4,
773 		.bdev_io_cache_size = 2,
774 	};
775 	struct bdev_ut_io_wait_entry io_wait_entry;
776 	struct bdev_ut_io_wait_entry io_wait_entry2;
777 	int rc;
778 
779 	rc = spdk_bdev_set_opts(&bdev_opts);
780 	CU_ASSERT(rc == 0);
781 	spdk_bdev_initialize(bdev_init_cb, NULL);
782 	poll_threads();
783 
784 	bdev = allocate_bdev("bdev0");
785 
786 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
787 	CU_ASSERT(rc == 0);
788 	poll_threads();
789 	SPDK_CU_ASSERT_FATAL(desc != NULL);
790 	io_ch = spdk_bdev_get_io_channel(desc);
791 	CU_ASSERT(io_ch != NULL);
792 
793 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
794 	CU_ASSERT(rc == 0);
795 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
796 	CU_ASSERT(rc == 0);
797 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
798 	CU_ASSERT(rc == 0);
799 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
800 	CU_ASSERT(rc == 0);
801 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
802 
803 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
804 	CU_ASSERT(rc == -ENOMEM);
805 
806 	io_wait_entry.entry.bdev = bdev;
807 	io_wait_entry.entry.cb_fn = io_wait_cb;
808 	io_wait_entry.entry.cb_arg = &io_wait_entry;
809 	io_wait_entry.io_ch = io_ch;
810 	io_wait_entry.desc = desc;
811 	io_wait_entry.submitted = false;
812 	/* Cannot use the same io_wait_entry for two different calls. */
813 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
814 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
815 
816 	/* Queue two I/O waits. */
817 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
818 	CU_ASSERT(rc == 0);
819 	CU_ASSERT(io_wait_entry.submitted == false);
820 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
821 	CU_ASSERT(rc == 0);
822 	CU_ASSERT(io_wait_entry2.submitted == false);
823 
824 	stub_complete_io(1);
825 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
826 	CU_ASSERT(io_wait_entry.submitted == true);
827 	CU_ASSERT(io_wait_entry2.submitted == false);
828 
829 	stub_complete_io(1);
830 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
831 	CU_ASSERT(io_wait_entry2.submitted == true);
832 
833 	stub_complete_io(4);
834 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
835 
836 	spdk_put_io_channel(io_ch);
837 	spdk_bdev_close(desc);
838 	free_bdev(bdev);
839 	spdk_bdev_finish(bdev_fini_cb, NULL);
840 	poll_threads();
841 }
842 
843 static void
844 bdev_io_spans_boundary_test(void)
845 {
846 	struct spdk_bdev bdev;
847 	struct spdk_bdev_io bdev_io;
848 
849 	memset(&bdev, 0, sizeof(bdev));
850 
851 	bdev.optimal_io_boundary = 0;
852 	bdev_io.bdev = &bdev;
853 
854 	/* bdev has no optimal_io_boundary set - so this should return false. */
855 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
856 
857 	bdev.optimal_io_boundary = 32;
858 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
859 
860 	/* RESETs are not based on LBAs - so this should return false. */
861 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
862 
863 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
864 	bdev_io.u.bdev.offset_blocks = 0;
865 	bdev_io.u.bdev.num_blocks = 32;
866 
867 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
868 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
869 
870 	bdev_io.u.bdev.num_blocks = 33;
871 
872 	/* This I/O spans a boundary. */
873 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
874 }
875 
876 static void
877 bdev_io_split(void)
878 {
879 	struct spdk_bdev *bdev;
880 	struct spdk_bdev_desc *desc = NULL;
881 	struct spdk_io_channel *io_ch;
882 	struct spdk_bdev_opts bdev_opts = {
883 		.bdev_io_pool_size = 512,
884 		.bdev_io_cache_size = 64,
885 	};
886 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
887 	struct ut_expected_io *expected_io;
888 	uint64_t i;
889 	int rc;
890 
891 	rc = spdk_bdev_set_opts(&bdev_opts);
892 	CU_ASSERT(rc == 0);
893 	spdk_bdev_initialize(bdev_init_cb, NULL);
894 
895 	bdev = allocate_bdev("bdev0");
896 
897 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
898 	CU_ASSERT(rc == 0);
899 	SPDK_CU_ASSERT_FATAL(desc != NULL);
900 	io_ch = spdk_bdev_get_io_channel(desc);
901 	CU_ASSERT(io_ch != NULL);
902 
903 	bdev->optimal_io_boundary = 16;
904 	bdev->split_on_optimal_io_boundary = false;
905 
906 	g_io_done = false;
907 
908 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
909 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
910 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
911 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
912 
913 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
914 	CU_ASSERT(rc == 0);
915 	CU_ASSERT(g_io_done == false);
916 
917 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
918 	stub_complete_io(1);
919 	CU_ASSERT(g_io_done == true);
920 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
921 
922 	bdev->split_on_optimal_io_boundary = true;
923 
924 	/* Now test that a single-vector command is split correctly.
925 	 * Offset 14, length 8, payload 0xF000
926 	 *  Child - Offset 14, length 2, payload 0xF000
927 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
928 	 *
929 	 * Set up the expected values before calling spdk_bdev_read_blocks
930 	 */
931 	g_io_done = false;
932 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
933 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
934 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
935 
936 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
937 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
938 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
939 
940 	/* spdk_bdev_read_blocks will submit the first child immediately. */
941 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
942 	CU_ASSERT(rc == 0);
943 	CU_ASSERT(g_io_done == false);
944 
945 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
946 	stub_complete_io(2);
947 	CU_ASSERT(g_io_done == true);
948 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
949 
950 	/* Now set up a more complex, multi-vector command that needs to be split,
951 	 *  including splitting iovecs.
952 	 */
953 	iov[0].iov_base = (void *)0x10000;
954 	iov[0].iov_len = 512;
955 	iov[1].iov_base = (void *)0x20000;
956 	iov[1].iov_len = 20 * 512;
957 	iov[2].iov_base = (void *)0x30000;
958 	iov[2].iov_len = 11 * 512;
959 
960 	g_io_done = false;
961 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
962 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
963 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
964 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
965 
966 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
967 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
968 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
969 
970 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
971 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
972 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
973 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
974 
975 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
976 	CU_ASSERT(rc == 0);
977 	CU_ASSERT(g_io_done == false);
978 
979 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
980 	stub_complete_io(3);
981 	CU_ASSERT(g_io_done == true);
982 
983 	/* Test multi vector command that needs to be split by strip and then needs to be
984 	 * split further due to the capacity of child iovs.
985 	 */
986 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
987 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
988 		iov[i].iov_len = 512;
989 	}
990 
991 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
992 	g_io_done = false;
993 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
994 					   BDEV_IO_NUM_CHILD_IOV);
995 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
996 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
997 	}
998 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
999 
1000 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1001 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
1002 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1003 		ut_expected_io_set_iov(expected_io, i,
1004 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1005 	}
1006 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1007 
1008 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1009 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1010 	CU_ASSERT(rc == 0);
1011 	CU_ASSERT(g_io_done == false);
1012 
1013 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1014 	stub_complete_io(1);
1015 	CU_ASSERT(g_io_done == false);
1016 
1017 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1018 	stub_complete_io(1);
1019 	CU_ASSERT(g_io_done == true);
1020 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1021 
1022 	/* Test multi vector command that needs to be split by strip and then needs to be
1023 	 * split further due to the capacity of child iovs, but fails to split. The cause
1024 	 * of failure of split is that the length of an iovec is not multiple of block size.
1025 	 */
1026 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1027 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1028 		iov[i].iov_len = 512;
1029 	}
1030 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1031 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1032 
1033 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1034 	g_io_done = false;
1035 	g_io_status = 0;
1036 
1037 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1038 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1039 	CU_ASSERT(rc == 0);
1040 	CU_ASSERT(g_io_done == true);
1041 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1042 
1043 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1044 	 * split, so test that.
1045 	 */
1046 	bdev->optimal_io_boundary = 15;
1047 	g_io_done = false;
1048 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1049 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1050 
1051 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1052 	CU_ASSERT(rc == 0);
1053 	CU_ASSERT(g_io_done == false);
1054 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1055 	stub_complete_io(1);
1056 	CU_ASSERT(g_io_done == true);
1057 
1058 	/* Test an UNMAP.  This should also not be split. */
1059 	bdev->optimal_io_boundary = 16;
1060 	g_io_done = false;
1061 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1062 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1063 
1064 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1065 	CU_ASSERT(rc == 0);
1066 	CU_ASSERT(g_io_done == false);
1067 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1068 	stub_complete_io(1);
1069 	CU_ASSERT(g_io_done == true);
1070 
1071 	/* Test a FLUSH.  This should also not be split. */
1072 	bdev->optimal_io_boundary = 16;
1073 	g_io_done = false;
1074 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1075 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1076 
1077 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1078 	CU_ASSERT(rc == 0);
1079 	CU_ASSERT(g_io_done == false);
1080 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1081 	stub_complete_io(1);
1082 	CU_ASSERT(g_io_done == true);
1083 
1084 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1085 
1086 	spdk_put_io_channel(io_ch);
1087 	spdk_bdev_close(desc);
1088 	free_bdev(bdev);
1089 	spdk_bdev_finish(bdev_fini_cb, NULL);
1090 	poll_threads();
1091 }
1092 
1093 static void
1094 bdev_io_split_with_io_wait(void)
1095 {
1096 	struct spdk_bdev *bdev;
1097 	struct spdk_bdev_desc *desc;
1098 	struct spdk_io_channel *io_ch;
1099 	struct spdk_bdev_channel *channel;
1100 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1101 	struct spdk_bdev_opts bdev_opts = {
1102 		.bdev_io_pool_size = 2,
1103 		.bdev_io_cache_size = 1,
1104 	};
1105 	struct iovec iov[3];
1106 	struct ut_expected_io *expected_io;
1107 	int rc;
1108 
1109 	rc = spdk_bdev_set_opts(&bdev_opts);
1110 	CU_ASSERT(rc == 0);
1111 	spdk_bdev_initialize(bdev_init_cb, NULL);
1112 
1113 	bdev = allocate_bdev("bdev0");
1114 
1115 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1116 	CU_ASSERT(rc == 0);
1117 	CU_ASSERT(desc != NULL);
1118 	io_ch = spdk_bdev_get_io_channel(desc);
1119 	CU_ASSERT(io_ch != NULL);
1120 	channel = spdk_io_channel_get_ctx(io_ch);
1121 	mgmt_ch = channel->shared_resource->mgmt_ch;
1122 
1123 	bdev->optimal_io_boundary = 16;
1124 	bdev->split_on_optimal_io_boundary = true;
1125 
1126 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1127 	CU_ASSERT(rc == 0);
1128 
1129 	/* Now test that a single-vector command is split correctly.
1130 	 * Offset 14, length 8, payload 0xF000
1131 	 *  Child - Offset 14, length 2, payload 0xF000
1132 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1133 	 *
1134 	 * Set up the expected values before calling spdk_bdev_read_blocks
1135 	 */
1136 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1137 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1138 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1139 
1140 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1141 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1142 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1143 
1144 	/* The following children will be submitted sequentially due to the capacity of
1145 	 * spdk_bdev_io.
1146 	 */
1147 
1148 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1149 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1150 	CU_ASSERT(rc == 0);
1151 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1152 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1153 
1154 	/* Completing the first read I/O will submit the first child */
1155 	stub_complete_io(1);
1156 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1157 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1158 
1159 	/* Completing the first child will submit the second child */
1160 	stub_complete_io(1);
1161 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1162 
1163 	/* Complete the second child I/O.  This should result in our callback getting
1164 	 * invoked since the parent I/O is now complete.
1165 	 */
1166 	stub_complete_io(1);
1167 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1168 
1169 	/* Now set up a more complex, multi-vector command that needs to be split,
1170 	 *  including splitting iovecs.
1171 	 */
1172 	iov[0].iov_base = (void *)0x10000;
1173 	iov[0].iov_len = 512;
1174 	iov[1].iov_base = (void *)0x20000;
1175 	iov[1].iov_len = 20 * 512;
1176 	iov[2].iov_base = (void *)0x30000;
1177 	iov[2].iov_len = 11 * 512;
1178 
1179 	g_io_done = false;
1180 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1181 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1182 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1183 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1184 
1185 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1186 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1187 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1188 
1189 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1190 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1191 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1192 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1193 
1194 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1195 	CU_ASSERT(rc == 0);
1196 	CU_ASSERT(g_io_done == false);
1197 
1198 	/* The following children will be submitted sequentially due to the capacity of
1199 	 * spdk_bdev_io.
1200 	 */
1201 
1202 	/* Completing the first child will submit the second child */
1203 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1204 	stub_complete_io(1);
1205 	CU_ASSERT(g_io_done == false);
1206 
1207 	/* Completing the second child will submit the third child */
1208 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1209 	stub_complete_io(1);
1210 	CU_ASSERT(g_io_done == false);
1211 
1212 	/* Completing the third child will result in our callback getting invoked
1213 	 * since the parent I/O is now complete.
1214 	 */
1215 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1216 	stub_complete_io(1);
1217 	CU_ASSERT(g_io_done == true);
1218 
1219 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1220 
1221 	spdk_put_io_channel(io_ch);
1222 	spdk_bdev_close(desc);
1223 	free_bdev(bdev);
1224 	spdk_bdev_finish(bdev_fini_cb, NULL);
1225 	poll_threads();
1226 }
1227 
1228 static void
1229 bdev_io_alignment(void)
1230 {
1231 	struct spdk_bdev *bdev;
1232 	struct spdk_bdev_desc *desc;
1233 	struct spdk_io_channel *io_ch;
1234 	struct spdk_bdev_opts bdev_opts = {
1235 		.bdev_io_pool_size = 20,
1236 		.bdev_io_cache_size = 2,
1237 	};
1238 	int rc;
1239 	void *buf;
1240 	struct iovec iovs[2];
1241 	int iovcnt;
1242 	uint64_t alignment;
1243 
1244 	rc = spdk_bdev_set_opts(&bdev_opts);
1245 	CU_ASSERT(rc == 0);
1246 	spdk_bdev_initialize(bdev_init_cb, NULL);
1247 
1248 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1249 	bdev = allocate_bdev("bdev0");
1250 
1251 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1252 	CU_ASSERT(rc == 0);
1253 	CU_ASSERT(desc != NULL);
1254 	io_ch = spdk_bdev_get_io_channel(desc);
1255 	CU_ASSERT(io_ch != NULL);
1256 
1257 	/* Create aligned buffer */
1258 	rc = posix_memalign(&buf, 4096, 8192);
1259 	SPDK_CU_ASSERT_FATAL(rc == 0);
1260 
1261 	/* Pass aligned single buffer with no alignment required */
1262 	alignment = 1;
1263 	bdev->required_alignment = spdk_u32log2(alignment);
1264 
1265 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1266 	CU_ASSERT(rc == 0);
1267 	stub_complete_io(1);
1268 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1269 				    alignment));
1270 
1271 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1272 	CU_ASSERT(rc == 0);
1273 	stub_complete_io(1);
1274 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1275 				    alignment));
1276 
1277 	/* Pass unaligned single buffer with no alignment required */
1278 	alignment = 1;
1279 	bdev->required_alignment = spdk_u32log2(alignment);
1280 
1281 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1282 	CU_ASSERT(rc == 0);
1283 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1284 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1285 	stub_complete_io(1);
1286 
1287 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1288 	CU_ASSERT(rc == 0);
1289 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1290 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1291 	stub_complete_io(1);
1292 
1293 	/* Pass unaligned single buffer with 512 alignment required */
1294 	alignment = 512;
1295 	bdev->required_alignment = spdk_u32log2(alignment);
1296 
1297 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1298 	CU_ASSERT(rc == 0);
1299 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1300 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1301 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1302 				    alignment));
1303 	stub_complete_io(1);
1304 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1305 
1306 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1307 	CU_ASSERT(rc == 0);
1308 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1309 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1310 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1311 				    alignment));
1312 	stub_complete_io(1);
1313 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1314 
1315 	/* Pass unaligned single buffer with 4096 alignment required */
1316 	alignment = 4096;
1317 	bdev->required_alignment = spdk_u32log2(alignment);
1318 
1319 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1320 	CU_ASSERT(rc == 0);
1321 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1322 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1323 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1324 				    alignment));
1325 	stub_complete_io(1);
1326 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1327 
1328 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1329 	CU_ASSERT(rc == 0);
1330 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1331 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1332 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1333 				    alignment));
1334 	stub_complete_io(1);
1335 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1336 
1337 	/* Pass aligned iovs with no alignment required */
1338 	alignment = 1;
1339 	bdev->required_alignment = spdk_u32log2(alignment);
1340 
1341 	iovcnt = 1;
1342 	iovs[0].iov_base = buf;
1343 	iovs[0].iov_len = 512;
1344 
1345 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1346 	CU_ASSERT(rc == 0);
1347 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1348 	stub_complete_io(1);
1349 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1350 
1351 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1352 	CU_ASSERT(rc == 0);
1353 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1354 	stub_complete_io(1);
1355 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1356 
1357 	/* Pass unaligned iovs with no alignment required */
1358 	alignment = 1;
1359 	bdev->required_alignment = spdk_u32log2(alignment);
1360 
1361 	iovcnt = 2;
1362 	iovs[0].iov_base = buf + 16;
1363 	iovs[0].iov_len = 256;
1364 	iovs[1].iov_base = buf + 16 + 256 + 32;
1365 	iovs[1].iov_len = 256;
1366 
1367 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1368 	CU_ASSERT(rc == 0);
1369 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1370 	stub_complete_io(1);
1371 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1372 
1373 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1374 	CU_ASSERT(rc == 0);
1375 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1376 	stub_complete_io(1);
1377 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1378 
1379 	/* Pass unaligned iov with 2048 alignment required */
1380 	alignment = 2048;
1381 	bdev->required_alignment = spdk_u32log2(alignment);
1382 
1383 	iovcnt = 2;
1384 	iovs[0].iov_base = buf + 16;
1385 	iovs[0].iov_len = 256;
1386 	iovs[1].iov_base = buf + 16 + 256 + 32;
1387 	iovs[1].iov_len = 256;
1388 
1389 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1390 	CU_ASSERT(rc == 0);
1391 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1392 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1393 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1394 				    alignment));
1395 	stub_complete_io(1);
1396 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1397 
1398 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1399 	CU_ASSERT(rc == 0);
1400 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1401 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1402 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1403 				    alignment));
1404 	stub_complete_io(1);
1405 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1406 
1407 	/* Pass iov without allocated buffer without alignment required */
1408 	alignment = 1;
1409 	bdev->required_alignment = spdk_u32log2(alignment);
1410 
1411 	iovcnt = 1;
1412 	iovs[0].iov_base = NULL;
1413 	iovs[0].iov_len = 0;
1414 
1415 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1416 	CU_ASSERT(rc == 0);
1417 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1418 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1419 				    alignment));
1420 	stub_complete_io(1);
1421 
1422 	/* Pass iov without allocated buffer with 1024 alignment required */
1423 	alignment = 1024;
1424 	bdev->required_alignment = spdk_u32log2(alignment);
1425 
1426 	iovcnt = 1;
1427 	iovs[0].iov_base = NULL;
1428 	iovs[0].iov_len = 0;
1429 
1430 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1431 	CU_ASSERT(rc == 0);
1432 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1433 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1434 				    alignment));
1435 	stub_complete_io(1);
1436 
1437 	spdk_put_io_channel(io_ch);
1438 	spdk_bdev_close(desc);
1439 	free_bdev(bdev);
1440 	spdk_bdev_finish(bdev_fini_cb, NULL);
1441 	poll_threads();
1442 
1443 	free(buf);
1444 }
1445 
1446 static void
1447 histogram_status_cb(void *cb_arg, int status)
1448 {
1449 	g_status = status;
1450 }
1451 
1452 static void
1453 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1454 {
1455 	g_status = status;
1456 	g_histogram = histogram;
1457 }
1458 
1459 static void
1460 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1461 		   uint64_t total, uint64_t so_far)
1462 {
1463 	g_count += count;
1464 }
1465 
1466 static void
1467 bdev_histograms(void)
1468 {
1469 	struct spdk_bdev *bdev;
1470 	struct spdk_bdev_desc *desc;
1471 	struct spdk_io_channel *ch;
1472 	struct spdk_histogram_data *histogram;
1473 	uint8_t buf[4096];
1474 	int rc;
1475 
1476 	spdk_bdev_initialize(bdev_init_cb, NULL);
1477 
1478 	bdev = allocate_bdev("bdev");
1479 
1480 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1481 	CU_ASSERT(rc == 0);
1482 	CU_ASSERT(desc != NULL);
1483 
1484 	ch = spdk_bdev_get_io_channel(desc);
1485 	CU_ASSERT(ch != NULL);
1486 
1487 	/* Enable histogram */
1488 	g_status = -1;
1489 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1490 	poll_threads();
1491 	CU_ASSERT(g_status == 0);
1492 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1493 
1494 	/* Allocate histogram */
1495 	histogram = spdk_histogram_data_alloc();
1496 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1497 
1498 	/* Check if histogram is zeroed */
1499 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1500 	poll_threads();
1501 	CU_ASSERT(g_status == 0);
1502 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1503 
1504 	g_count = 0;
1505 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1506 
1507 	CU_ASSERT(g_count == 0);
1508 
1509 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1510 	CU_ASSERT(rc == 0);
1511 
1512 	spdk_delay_us(10);
1513 	stub_complete_io(1);
1514 	poll_threads();
1515 
1516 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1517 	CU_ASSERT(rc == 0);
1518 
1519 	spdk_delay_us(10);
1520 	stub_complete_io(1);
1521 	poll_threads();
1522 
1523 	/* Check if histogram gathered data from all I/O channels */
1524 	g_histogram = NULL;
1525 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1526 	poll_threads();
1527 	CU_ASSERT(g_status == 0);
1528 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1529 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1530 
1531 	g_count = 0;
1532 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1533 	CU_ASSERT(g_count == 2);
1534 
1535 	/* Disable histogram */
1536 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1537 	poll_threads();
1538 	CU_ASSERT(g_status == 0);
1539 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1540 
1541 	/* Try to run histogram commands on disabled bdev */
1542 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1543 	poll_threads();
1544 	CU_ASSERT(g_status == -EFAULT);
1545 
1546 	spdk_histogram_data_free(g_histogram);
1547 	spdk_put_io_channel(ch);
1548 	spdk_bdev_close(desc);
1549 	free_bdev(bdev);
1550 	spdk_bdev_finish(bdev_fini_cb, NULL);
1551 	poll_threads();
1552 }
1553 
1554 int
1555 main(int argc, char **argv)
1556 {
1557 	CU_pSuite		suite = NULL;
1558 	unsigned int		num_failures;
1559 
1560 	if (CU_initialize_registry() != CUE_SUCCESS) {
1561 		return CU_get_error();
1562 	}
1563 
1564 	suite = CU_add_suite("bdev", null_init, null_clean);
1565 	if (suite == NULL) {
1566 		CU_cleanup_registry();
1567 		return CU_get_error();
1568 	}
1569 
1570 	if (
1571 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1572 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1573 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1574 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1575 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1576 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1577 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1578 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1579 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1580 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1581 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
1582 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL
1583 	) {
1584 		CU_cleanup_registry();
1585 		return CU_get_error();
1586 	}
1587 
1588 	allocate_threads(1);
1589 	set_thread(0);
1590 
1591 	CU_basic_set_mode(CU_BRM_VERBOSE);
1592 	CU_basic_run_tests();
1593 	num_failures = CU_get_number_of_failures();
1594 	CU_cleanup_registry();
1595 
1596 	free_threads();
1597 
1598 	return num_failures;
1599 }
1600