xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision c4d9daeb7bf491bc0eb6e8d417b75d44773cb009)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_type, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
63 
64 
65 int g_status;
66 int g_count;
67 struct spdk_histogram_data *g_histogram;
68 
69 void
70 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
71 			 int *sc, int *sk, int *asc, int *ascq)
72 {
73 }
74 
75 static int
76 null_init(void)
77 {
78 	return 0;
79 }
80 
81 static int
82 null_clean(void)
83 {
84 	return 0;
85 }
86 
87 static int
88 stub_destruct(void *ctx)
89 {
90 	return 0;
91 }
92 
93 struct ut_expected_io {
94 	uint8_t				type;
95 	uint64_t			offset;
96 	uint64_t			length;
97 	int				iovcnt;
98 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
99 	TAILQ_ENTRY(ut_expected_io)	link;
100 };
101 
102 struct bdev_ut_channel {
103 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
104 	uint32_t			outstanding_io_count;
105 	TAILQ_HEAD(, ut_expected_io)	expected_io;
106 };
107 
108 static bool g_io_done;
109 static struct spdk_bdev_io *g_bdev_io;
110 static enum spdk_bdev_io_status g_io_status;
111 static uint32_t g_bdev_ut_io_device;
112 static struct bdev_ut_channel *g_bdev_ut_channel;
113 
114 static struct ut_expected_io *
115 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
116 {
117 	struct ut_expected_io *expected_io;
118 
119 	expected_io = calloc(1, sizeof(*expected_io));
120 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
121 
122 	expected_io->type = type;
123 	expected_io->offset = offset;
124 	expected_io->length = length;
125 	expected_io->iovcnt = iovcnt;
126 
127 	return expected_io;
128 }
129 
130 static void
131 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
132 {
133 	expected_io->iov[pos].iov_base = base;
134 	expected_io->iov[pos].iov_len = len;
135 }
136 
137 static void
138 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
139 {
140 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
141 	struct ut_expected_io *expected_io;
142 	struct iovec *iov, *expected_iov;
143 	int i;
144 
145 	g_bdev_io = bdev_io;
146 
147 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
148 	ch->outstanding_io_count++;
149 
150 	expected_io = TAILQ_FIRST(&ch->expected_io);
151 	if (expected_io == NULL) {
152 		return;
153 	}
154 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
155 
156 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
157 		CU_ASSERT(bdev_io->type == expected_io->type);
158 	}
159 
160 	if (expected_io->length == 0) {
161 		free(expected_io);
162 		return;
163 	}
164 
165 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
166 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
167 
168 	if (expected_io->iovcnt == 0) {
169 		free(expected_io);
170 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
171 		return;
172 	}
173 
174 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
175 	for (i = 0; i < expected_io->iovcnt; i++) {
176 		iov = &bdev_io->u.bdev.iovs[i];
177 		expected_iov = &expected_io->iov[i];
178 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
179 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
180 	}
181 
182 	free(expected_io);
183 }
184 
185 static void
186 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
187 				      struct spdk_bdev_io *bdev_io, bool success)
188 {
189 	CU_ASSERT(success == true);
190 
191 	stub_submit_request(_ch, bdev_io);
192 }
193 
194 static void
195 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
196 {
197 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
198 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
199 }
200 
201 static uint32_t
202 stub_complete_io(uint32_t num_to_complete)
203 {
204 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
205 	struct spdk_bdev_io *bdev_io;
206 	uint32_t num_completed = 0;
207 
208 	while (num_completed < num_to_complete) {
209 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
210 			break;
211 		}
212 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
213 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
214 		ch->outstanding_io_count--;
215 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
216 		num_completed++;
217 	}
218 
219 	return num_completed;
220 }
221 
222 static struct spdk_io_channel *
223 bdev_ut_get_io_channel(void *ctx)
224 {
225 	return spdk_get_io_channel(&g_bdev_ut_io_device);
226 }
227 
228 static bool
229 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
230 {
231 	return true;
232 }
233 
234 static struct spdk_bdev_fn_table fn_table = {
235 	.destruct = stub_destruct,
236 	.submit_request = stub_submit_request,
237 	.get_io_channel = bdev_ut_get_io_channel,
238 	.io_type_supported = stub_io_type_supported,
239 };
240 
241 static int
242 bdev_ut_create_ch(void *io_device, void *ctx_buf)
243 {
244 	struct bdev_ut_channel *ch = ctx_buf;
245 
246 	CU_ASSERT(g_bdev_ut_channel == NULL);
247 	g_bdev_ut_channel = ch;
248 
249 	TAILQ_INIT(&ch->outstanding_io);
250 	ch->outstanding_io_count = 0;
251 	TAILQ_INIT(&ch->expected_io);
252 	return 0;
253 }
254 
255 static void
256 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
257 {
258 	CU_ASSERT(g_bdev_ut_channel != NULL);
259 	g_bdev_ut_channel = NULL;
260 }
261 
262 struct spdk_bdev_module bdev_ut_if;
263 
264 static int
265 bdev_ut_module_init(void)
266 {
267 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
268 				sizeof(struct bdev_ut_channel), NULL);
269 	spdk_bdev_module_init_done(&bdev_ut_if);
270 	return 0;
271 }
272 
273 static void
274 bdev_ut_module_fini(void)
275 {
276 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
277 }
278 
279 struct spdk_bdev_module bdev_ut_if = {
280 	.name = "bdev_ut",
281 	.module_init = bdev_ut_module_init,
282 	.module_fini = bdev_ut_module_fini,
283 	.async_init = true,
284 };
285 
286 static void vbdev_ut_examine(struct spdk_bdev *bdev);
287 
288 static int
289 vbdev_ut_module_init(void)
290 {
291 	return 0;
292 }
293 
294 static void
295 vbdev_ut_module_fini(void)
296 {
297 }
298 
299 struct spdk_bdev_module vbdev_ut_if = {
300 	.name = "vbdev_ut",
301 	.module_init = vbdev_ut_module_init,
302 	.module_fini = vbdev_ut_module_fini,
303 	.examine_config = vbdev_ut_examine,
304 };
305 
306 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
307 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
308 
309 static void
310 vbdev_ut_examine(struct spdk_bdev *bdev)
311 {
312 	spdk_bdev_module_examine_done(&vbdev_ut_if);
313 }
314 
315 static struct spdk_bdev *
316 allocate_bdev(char *name)
317 {
318 	struct spdk_bdev *bdev;
319 	int rc;
320 
321 	bdev = calloc(1, sizeof(*bdev));
322 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
323 
324 	bdev->name = name;
325 	bdev->fn_table = &fn_table;
326 	bdev->module = &bdev_ut_if;
327 	bdev->blockcnt = 1024;
328 	bdev->blocklen = 512;
329 
330 	rc = spdk_bdev_register(bdev);
331 	CU_ASSERT(rc == 0);
332 
333 	return bdev;
334 }
335 
336 static struct spdk_bdev *
337 allocate_vbdev(char *name)
338 {
339 	struct spdk_bdev *bdev;
340 	int rc;
341 
342 	bdev = calloc(1, sizeof(*bdev));
343 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
344 
345 	bdev->name = name;
346 	bdev->fn_table = &fn_table;
347 	bdev->module = &vbdev_ut_if;
348 
349 	rc = spdk_bdev_register(bdev);
350 	CU_ASSERT(rc == 0);
351 
352 	return bdev;
353 }
354 
355 static void
356 free_bdev(struct spdk_bdev *bdev)
357 {
358 	spdk_bdev_unregister(bdev, NULL, NULL);
359 	poll_threads();
360 	memset(bdev, 0xFF, sizeof(*bdev));
361 	free(bdev);
362 }
363 
364 static void
365 free_vbdev(struct spdk_bdev *bdev)
366 {
367 	spdk_bdev_unregister(bdev, NULL, NULL);
368 	poll_threads();
369 	memset(bdev, 0xFF, sizeof(*bdev));
370 	free(bdev);
371 }
372 
373 static void
374 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
375 {
376 	const char *bdev_name;
377 
378 	CU_ASSERT(bdev != NULL);
379 	CU_ASSERT(rc == 0);
380 	bdev_name = spdk_bdev_get_name(bdev);
381 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
382 
383 	free(stat);
384 	free_bdev(bdev);
385 
386 	*(bool *)cb_arg = true;
387 }
388 
389 static void
390 get_device_stat_test(void)
391 {
392 	struct spdk_bdev *bdev;
393 	struct spdk_bdev_io_stat *stat;
394 	bool done;
395 
396 	bdev = allocate_bdev("bdev0");
397 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
398 	if (stat == NULL) {
399 		free_bdev(bdev);
400 		return;
401 	}
402 
403 	done = false;
404 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
405 	while (!done) { poll_threads(); }
406 
407 
408 }
409 
410 static void
411 open_write_test(void)
412 {
413 	struct spdk_bdev *bdev[9];
414 	struct spdk_bdev_desc *desc[9] = {};
415 	int rc;
416 
417 	/*
418 	 * Create a tree of bdevs to test various open w/ write cases.
419 	 *
420 	 * bdev0 through bdev3 are physical block devices, such as NVMe
421 	 * namespaces or Ceph block devices.
422 	 *
423 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
424 	 * caching or RAID use cases.
425 	 *
426 	 * bdev5 through bdev7 are all virtual bdevs with the same base
427 	 * bdev (except bdev7). This models partitioning or logical volume
428 	 * use cases.
429 	 *
430 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
431 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
432 	 * models caching, RAID, partitioning or logical volumes use cases.
433 	 *
434 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
435 	 * base bdevs are themselves virtual bdevs.
436 	 *
437 	 *                bdev8
438 	 *                  |
439 	 *            +----------+
440 	 *            |          |
441 	 *          bdev4      bdev5   bdev6   bdev7
442 	 *            |          |       |       |
443 	 *        +---+---+      +---+   +   +---+---+
444 	 *        |       |           \  |  /         \
445 	 *      bdev0   bdev1          bdev2         bdev3
446 	 */
447 
448 	bdev[0] = allocate_bdev("bdev0");
449 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
450 	CU_ASSERT(rc == 0);
451 
452 	bdev[1] = allocate_bdev("bdev1");
453 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
454 	CU_ASSERT(rc == 0);
455 
456 	bdev[2] = allocate_bdev("bdev2");
457 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
458 	CU_ASSERT(rc == 0);
459 
460 	bdev[3] = allocate_bdev("bdev3");
461 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
462 	CU_ASSERT(rc == 0);
463 
464 	bdev[4] = allocate_vbdev("bdev4");
465 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
466 	CU_ASSERT(rc == 0);
467 
468 	bdev[5] = allocate_vbdev("bdev5");
469 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
470 	CU_ASSERT(rc == 0);
471 
472 	bdev[6] = allocate_vbdev("bdev6");
473 
474 	bdev[7] = allocate_vbdev("bdev7");
475 
476 	bdev[8] = allocate_vbdev("bdev8");
477 
478 	/* Open bdev0 read-only.  This should succeed. */
479 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
480 	CU_ASSERT(rc == 0);
481 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
482 	spdk_bdev_close(desc[0]);
483 
484 	/*
485 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
486 	 * by a vbdev module.
487 	 */
488 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
489 	CU_ASSERT(rc == -EPERM);
490 
491 	/*
492 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
493 	 * by a vbdev module.
494 	 */
495 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
496 	CU_ASSERT(rc == -EPERM);
497 
498 	/* Open bdev4 read-only.  This should succeed. */
499 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
500 	CU_ASSERT(rc == 0);
501 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
502 	spdk_bdev_close(desc[4]);
503 
504 	/*
505 	 * Open bdev8 read/write.  This should succeed since it is a leaf
506 	 * bdev.
507 	 */
508 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
509 	CU_ASSERT(rc == 0);
510 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
511 	spdk_bdev_close(desc[8]);
512 
513 	/*
514 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
515 	 * by a vbdev module.
516 	 */
517 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
518 	CU_ASSERT(rc == -EPERM);
519 
520 	/* Open bdev4 read-only.  This should succeed. */
521 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
522 	CU_ASSERT(rc == 0);
523 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
524 	spdk_bdev_close(desc[5]);
525 
526 	free_vbdev(bdev[8]);
527 
528 	free_vbdev(bdev[5]);
529 	free_vbdev(bdev[6]);
530 	free_vbdev(bdev[7]);
531 
532 	free_vbdev(bdev[4]);
533 
534 	free_bdev(bdev[0]);
535 	free_bdev(bdev[1]);
536 	free_bdev(bdev[2]);
537 	free_bdev(bdev[3]);
538 }
539 
540 static void
541 bytes_to_blocks_test(void)
542 {
543 	struct spdk_bdev bdev;
544 	uint64_t offset_blocks, num_blocks;
545 
546 	memset(&bdev, 0, sizeof(bdev));
547 
548 	bdev.blocklen = 512;
549 
550 	/* All parameters valid */
551 	offset_blocks = 0;
552 	num_blocks = 0;
553 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
554 	CU_ASSERT(offset_blocks == 1);
555 	CU_ASSERT(num_blocks == 2);
556 
557 	/* Offset not a block multiple */
558 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
559 
560 	/* Length not a block multiple */
561 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
562 
563 	/* In case blocklen not the power of two */
564 	bdev.blocklen = 100;
565 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
566 	CU_ASSERT(offset_blocks == 1);
567 	CU_ASSERT(num_blocks == 2);
568 
569 	/* Offset not a block multiple */
570 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
571 
572 	/* Length not a block multiple */
573 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
574 }
575 
576 static void
577 num_blocks_test(void)
578 {
579 	struct spdk_bdev bdev;
580 	struct spdk_bdev_desc *desc = NULL;
581 	int rc;
582 
583 	memset(&bdev, 0, sizeof(bdev));
584 	bdev.name = "num_blocks";
585 	bdev.fn_table = &fn_table;
586 	bdev.module = &bdev_ut_if;
587 	spdk_bdev_register(&bdev);
588 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
589 
590 	/* Growing block number */
591 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
592 	/* Shrinking block number */
593 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
594 
595 	/* In case bdev opened */
596 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
597 	CU_ASSERT(rc == 0);
598 	SPDK_CU_ASSERT_FATAL(desc != NULL);
599 
600 	/* Growing block number */
601 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
602 	/* Shrinking block number */
603 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
604 
605 	spdk_bdev_close(desc);
606 	spdk_bdev_unregister(&bdev, NULL, NULL);
607 
608 	poll_threads();
609 }
610 
611 static void
612 io_valid_test(void)
613 {
614 	struct spdk_bdev bdev;
615 
616 	memset(&bdev, 0, sizeof(bdev));
617 
618 	bdev.blocklen = 512;
619 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
620 
621 	/* All parameters valid */
622 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
623 
624 	/* Last valid block */
625 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
626 
627 	/* Offset past end of bdev */
628 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
629 
630 	/* Offset + length past end of bdev */
631 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
632 
633 	/* Offset near end of uint64_t range (2^64 - 1) */
634 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
635 }
636 
637 static void
638 alias_add_del_test(void)
639 {
640 	struct spdk_bdev *bdev[3];
641 	int rc;
642 
643 	/* Creating and registering bdevs */
644 	bdev[0] = allocate_bdev("bdev0");
645 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
646 
647 	bdev[1] = allocate_bdev("bdev1");
648 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
649 
650 	bdev[2] = allocate_bdev("bdev2");
651 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
652 
653 	poll_threads();
654 
655 	/*
656 	 * Trying adding an alias identical to name.
657 	 * Alias is identical to name, so it can not be added to aliases list
658 	 */
659 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
660 	CU_ASSERT(rc == -EEXIST);
661 
662 	/*
663 	 * Trying to add empty alias,
664 	 * this one should fail
665 	 */
666 	rc = spdk_bdev_alias_add(bdev[0], NULL);
667 	CU_ASSERT(rc == -EINVAL);
668 
669 	/* Trying adding same alias to two different registered bdevs */
670 
671 	/* Alias is used first time, so this one should pass */
672 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
673 	CU_ASSERT(rc == 0);
674 
675 	/* Alias was added to another bdev, so this one should fail */
676 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
677 	CU_ASSERT(rc == -EEXIST);
678 
679 	/* Alias is used first time, so this one should pass */
680 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
681 	CU_ASSERT(rc == 0);
682 
683 	/* Trying removing an alias from registered bdevs */
684 
685 	/* Alias is not on a bdev aliases list, so this one should fail */
686 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
687 	CU_ASSERT(rc == -ENOENT);
688 
689 	/* Alias is present on a bdev aliases list, so this one should pass */
690 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
691 	CU_ASSERT(rc == 0);
692 
693 	/* Alias is present on a bdev aliases list, so this one should pass */
694 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
695 	CU_ASSERT(rc == 0);
696 
697 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
698 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
699 	CU_ASSERT(rc != 0);
700 
701 	/* Trying to del all alias from empty alias list */
702 	spdk_bdev_alias_del_all(bdev[2]);
703 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
704 
705 	/* Trying to del all alias from non-empty alias list */
706 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
707 	CU_ASSERT(rc == 0);
708 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
709 	CU_ASSERT(rc == 0);
710 	spdk_bdev_alias_del_all(bdev[2]);
711 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
712 
713 	/* Unregister and free bdevs */
714 	spdk_bdev_unregister(bdev[0], NULL, NULL);
715 	spdk_bdev_unregister(bdev[1], NULL, NULL);
716 	spdk_bdev_unregister(bdev[2], NULL, NULL);
717 
718 	poll_threads();
719 
720 	free(bdev[0]);
721 	free(bdev[1]);
722 	free(bdev[2]);
723 }
724 
725 static void
726 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
727 {
728 	g_io_done = true;
729 	g_io_status = bdev_io->internal.status;
730 	spdk_bdev_free_io(bdev_io);
731 }
732 
733 static void
734 bdev_init_cb(void *arg, int rc)
735 {
736 	CU_ASSERT(rc == 0);
737 }
738 
739 static void
740 bdev_fini_cb(void *arg)
741 {
742 }
743 
744 struct bdev_ut_io_wait_entry {
745 	struct spdk_bdev_io_wait_entry	entry;
746 	struct spdk_io_channel		*io_ch;
747 	struct spdk_bdev_desc		*desc;
748 	bool				submitted;
749 };
750 
751 static void
752 io_wait_cb(void *arg)
753 {
754 	struct bdev_ut_io_wait_entry *entry = arg;
755 	int rc;
756 
757 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
758 	CU_ASSERT(rc == 0);
759 	entry->submitted = true;
760 }
761 
762 static void
763 bdev_io_wait_test(void)
764 {
765 	struct spdk_bdev *bdev;
766 	struct spdk_bdev_desc *desc = NULL;
767 	struct spdk_io_channel *io_ch;
768 	struct spdk_bdev_opts bdev_opts = {
769 		.bdev_io_pool_size = 4,
770 		.bdev_io_cache_size = 2,
771 	};
772 	struct bdev_ut_io_wait_entry io_wait_entry;
773 	struct bdev_ut_io_wait_entry io_wait_entry2;
774 	int rc;
775 
776 	rc = spdk_bdev_set_opts(&bdev_opts);
777 	CU_ASSERT(rc == 0);
778 	spdk_bdev_initialize(bdev_init_cb, NULL);
779 	poll_threads();
780 
781 	bdev = allocate_bdev("bdev0");
782 
783 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
784 	CU_ASSERT(rc == 0);
785 	poll_threads();
786 	SPDK_CU_ASSERT_FATAL(desc != NULL);
787 	io_ch = spdk_bdev_get_io_channel(desc);
788 	CU_ASSERT(io_ch != NULL);
789 
790 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
791 	CU_ASSERT(rc == 0);
792 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
793 	CU_ASSERT(rc == 0);
794 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
795 	CU_ASSERT(rc == 0);
796 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
797 	CU_ASSERT(rc == 0);
798 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
799 
800 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
801 	CU_ASSERT(rc == -ENOMEM);
802 
803 	io_wait_entry.entry.bdev = bdev;
804 	io_wait_entry.entry.cb_fn = io_wait_cb;
805 	io_wait_entry.entry.cb_arg = &io_wait_entry;
806 	io_wait_entry.io_ch = io_ch;
807 	io_wait_entry.desc = desc;
808 	io_wait_entry.submitted = false;
809 	/* Cannot use the same io_wait_entry for two different calls. */
810 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
811 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
812 
813 	/* Queue two I/O waits. */
814 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
815 	CU_ASSERT(rc == 0);
816 	CU_ASSERT(io_wait_entry.submitted == false);
817 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
818 	CU_ASSERT(rc == 0);
819 	CU_ASSERT(io_wait_entry2.submitted == false);
820 
821 	stub_complete_io(1);
822 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
823 	CU_ASSERT(io_wait_entry.submitted == true);
824 	CU_ASSERT(io_wait_entry2.submitted == false);
825 
826 	stub_complete_io(1);
827 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
828 	CU_ASSERT(io_wait_entry2.submitted == true);
829 
830 	stub_complete_io(4);
831 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
832 
833 	spdk_put_io_channel(io_ch);
834 	spdk_bdev_close(desc);
835 	free_bdev(bdev);
836 	spdk_bdev_finish(bdev_fini_cb, NULL);
837 	poll_threads();
838 }
839 
840 static void
841 bdev_io_spans_boundary_test(void)
842 {
843 	struct spdk_bdev bdev;
844 	struct spdk_bdev_io bdev_io;
845 
846 	memset(&bdev, 0, sizeof(bdev));
847 
848 	bdev.optimal_io_boundary = 0;
849 	bdev_io.bdev = &bdev;
850 
851 	/* bdev has no optimal_io_boundary set - so this should return false. */
852 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
853 
854 	bdev.optimal_io_boundary = 32;
855 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
856 
857 	/* RESETs are not based on LBAs - so this should return false. */
858 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
859 
860 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
861 	bdev_io.u.bdev.offset_blocks = 0;
862 	bdev_io.u.bdev.num_blocks = 32;
863 
864 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
865 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
866 
867 	bdev_io.u.bdev.num_blocks = 33;
868 
869 	/* This I/O spans a boundary. */
870 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
871 }
872 
873 static void
874 bdev_io_split(void)
875 {
876 	struct spdk_bdev *bdev;
877 	struct spdk_bdev_desc *desc = NULL;
878 	struct spdk_io_channel *io_ch;
879 	struct spdk_bdev_opts bdev_opts = {
880 		.bdev_io_pool_size = 512,
881 		.bdev_io_cache_size = 64,
882 	};
883 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
884 	struct ut_expected_io *expected_io;
885 	uint64_t i;
886 	int rc;
887 
888 	rc = spdk_bdev_set_opts(&bdev_opts);
889 	CU_ASSERT(rc == 0);
890 	spdk_bdev_initialize(bdev_init_cb, NULL);
891 
892 	bdev = allocate_bdev("bdev0");
893 
894 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
895 	CU_ASSERT(rc == 0);
896 	SPDK_CU_ASSERT_FATAL(desc != NULL);
897 	io_ch = spdk_bdev_get_io_channel(desc);
898 	CU_ASSERT(io_ch != NULL);
899 
900 	bdev->optimal_io_boundary = 16;
901 	bdev->split_on_optimal_io_boundary = false;
902 
903 	g_io_done = false;
904 
905 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
906 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
907 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
908 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
909 
910 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
911 	CU_ASSERT(rc == 0);
912 	CU_ASSERT(g_io_done == false);
913 
914 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
915 	stub_complete_io(1);
916 	CU_ASSERT(g_io_done == true);
917 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
918 
919 	bdev->split_on_optimal_io_boundary = true;
920 
921 	/* Now test that a single-vector command is split correctly.
922 	 * Offset 14, length 8, payload 0xF000
923 	 *  Child - Offset 14, length 2, payload 0xF000
924 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
925 	 *
926 	 * Set up the expected values before calling spdk_bdev_read_blocks
927 	 */
928 	g_io_done = false;
929 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
930 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
931 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
932 
933 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
934 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
935 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
936 
937 	/* spdk_bdev_read_blocks will submit the first child immediately. */
938 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
939 	CU_ASSERT(rc == 0);
940 	CU_ASSERT(g_io_done == false);
941 
942 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
943 	stub_complete_io(2);
944 	CU_ASSERT(g_io_done == true);
945 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
946 
947 	/* Now set up a more complex, multi-vector command that needs to be split,
948 	 *  including splitting iovecs.
949 	 */
950 	iov[0].iov_base = (void *)0x10000;
951 	iov[0].iov_len = 512;
952 	iov[1].iov_base = (void *)0x20000;
953 	iov[1].iov_len = 20 * 512;
954 	iov[2].iov_base = (void *)0x30000;
955 	iov[2].iov_len = 11 * 512;
956 
957 	g_io_done = false;
958 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
959 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
960 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
961 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
962 
963 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
964 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
965 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
966 
967 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
968 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
969 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
970 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
971 
972 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
973 	CU_ASSERT(rc == 0);
974 	CU_ASSERT(g_io_done == false);
975 
976 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
977 	stub_complete_io(3);
978 	CU_ASSERT(g_io_done == true);
979 
980 	/* Test multi vector command that needs to be split by strip and then needs to be
981 	 * split further due to the capacity of child iovs.
982 	 */
983 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
984 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
985 		iov[i].iov_len = 512;
986 	}
987 
988 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
989 	g_io_done = false;
990 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
991 					   BDEV_IO_NUM_CHILD_IOV);
992 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
993 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
994 	}
995 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
996 
997 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
998 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
999 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1000 		ut_expected_io_set_iov(expected_io, i,
1001 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1002 	}
1003 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1004 
1005 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1006 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1007 	CU_ASSERT(rc == 0);
1008 	CU_ASSERT(g_io_done == false);
1009 
1010 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1011 	stub_complete_io(1);
1012 	CU_ASSERT(g_io_done == false);
1013 
1014 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1015 	stub_complete_io(1);
1016 	CU_ASSERT(g_io_done == true);
1017 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1018 
1019 	/* Test multi vector command that needs to be split by strip and then needs to be
1020 	 * split further due to the capacity of child iovs, but fails to split. The cause
1021 	 * of failure of split is that the length of an iovec is not multiple of block size.
1022 	 */
1023 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1024 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1025 		iov[i].iov_len = 512;
1026 	}
1027 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1028 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1029 
1030 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1031 	g_io_done = false;
1032 	g_io_status = 0;
1033 
1034 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1035 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1036 	CU_ASSERT(rc == 0);
1037 	CU_ASSERT(g_io_done == true);
1038 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1039 
1040 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1041 	 * split, so test that.
1042 	 */
1043 	bdev->optimal_io_boundary = 15;
1044 	g_io_done = false;
1045 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1046 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1047 
1048 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1049 	CU_ASSERT(rc == 0);
1050 	CU_ASSERT(g_io_done == false);
1051 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1052 	stub_complete_io(1);
1053 	CU_ASSERT(g_io_done == true);
1054 
1055 	/* Test an UNMAP.  This should also not be split. */
1056 	bdev->optimal_io_boundary = 16;
1057 	g_io_done = false;
1058 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1059 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1060 
1061 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1062 	CU_ASSERT(rc == 0);
1063 	CU_ASSERT(g_io_done == false);
1064 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1065 	stub_complete_io(1);
1066 	CU_ASSERT(g_io_done == true);
1067 
1068 	/* Test a FLUSH.  This should also not be split. */
1069 	bdev->optimal_io_boundary = 16;
1070 	g_io_done = false;
1071 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1072 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1073 
1074 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1075 	CU_ASSERT(rc == 0);
1076 	CU_ASSERT(g_io_done == false);
1077 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1078 	stub_complete_io(1);
1079 	CU_ASSERT(g_io_done == true);
1080 
1081 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1082 
1083 	spdk_put_io_channel(io_ch);
1084 	spdk_bdev_close(desc);
1085 	free_bdev(bdev);
1086 	spdk_bdev_finish(bdev_fini_cb, NULL);
1087 	poll_threads();
1088 }
1089 
1090 static void
1091 bdev_io_split_with_io_wait(void)
1092 {
1093 	struct spdk_bdev *bdev;
1094 	struct spdk_bdev_desc *desc;
1095 	struct spdk_io_channel *io_ch;
1096 	struct spdk_bdev_channel *channel;
1097 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1098 	struct spdk_bdev_opts bdev_opts = {
1099 		.bdev_io_pool_size = 2,
1100 		.bdev_io_cache_size = 1,
1101 	};
1102 	struct iovec iov[3];
1103 	struct ut_expected_io *expected_io;
1104 	int rc;
1105 
1106 	rc = spdk_bdev_set_opts(&bdev_opts);
1107 	CU_ASSERT(rc == 0);
1108 	spdk_bdev_initialize(bdev_init_cb, NULL);
1109 
1110 	bdev = allocate_bdev("bdev0");
1111 
1112 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1113 	CU_ASSERT(rc == 0);
1114 	CU_ASSERT(desc != NULL);
1115 	io_ch = spdk_bdev_get_io_channel(desc);
1116 	CU_ASSERT(io_ch != NULL);
1117 	channel = spdk_io_channel_get_ctx(io_ch);
1118 	mgmt_ch = channel->shared_resource->mgmt_ch;
1119 
1120 	bdev->optimal_io_boundary = 16;
1121 	bdev->split_on_optimal_io_boundary = true;
1122 
1123 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1124 	CU_ASSERT(rc == 0);
1125 
1126 	/* Now test that a single-vector command is split correctly.
1127 	 * Offset 14, length 8, payload 0xF000
1128 	 *  Child - Offset 14, length 2, payload 0xF000
1129 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1130 	 *
1131 	 * Set up the expected values before calling spdk_bdev_read_blocks
1132 	 */
1133 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1134 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1135 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1136 
1137 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1138 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1139 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1140 
1141 	/* The following children will be submitted sequentially due to the capacity of
1142 	 * spdk_bdev_io.
1143 	 */
1144 
1145 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1146 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1147 	CU_ASSERT(rc == 0);
1148 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1149 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1150 
1151 	/* Completing the first read I/O will submit the first child */
1152 	stub_complete_io(1);
1153 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1154 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1155 
1156 	/* Completing the first child will submit the second child */
1157 	stub_complete_io(1);
1158 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1159 
1160 	/* Complete the second child I/O.  This should result in our callback getting
1161 	 * invoked since the parent I/O is now complete.
1162 	 */
1163 	stub_complete_io(1);
1164 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1165 
1166 	/* Now set up a more complex, multi-vector command that needs to be split,
1167 	 *  including splitting iovecs.
1168 	 */
1169 	iov[0].iov_base = (void *)0x10000;
1170 	iov[0].iov_len = 512;
1171 	iov[1].iov_base = (void *)0x20000;
1172 	iov[1].iov_len = 20 * 512;
1173 	iov[2].iov_base = (void *)0x30000;
1174 	iov[2].iov_len = 11 * 512;
1175 
1176 	g_io_done = false;
1177 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1178 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1179 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1180 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1181 
1182 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1183 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1184 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1185 
1186 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1187 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1188 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1189 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1190 
1191 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1192 	CU_ASSERT(rc == 0);
1193 	CU_ASSERT(g_io_done == false);
1194 
1195 	/* The following children will be submitted sequentially due to the capacity of
1196 	 * spdk_bdev_io.
1197 	 */
1198 
1199 	/* Completing the first child will submit the second child */
1200 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1201 	stub_complete_io(1);
1202 	CU_ASSERT(g_io_done == false);
1203 
1204 	/* Completing the second child will submit the third child */
1205 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1206 	stub_complete_io(1);
1207 	CU_ASSERT(g_io_done == false);
1208 
1209 	/* Completing the third child will result in our callback getting invoked
1210 	 * since the parent I/O is now complete.
1211 	 */
1212 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1213 	stub_complete_io(1);
1214 	CU_ASSERT(g_io_done == true);
1215 
1216 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1217 
1218 	spdk_put_io_channel(io_ch);
1219 	spdk_bdev_close(desc);
1220 	free_bdev(bdev);
1221 	spdk_bdev_finish(bdev_fini_cb, NULL);
1222 	poll_threads();
1223 }
1224 
1225 static void
1226 bdev_io_alignment(void)
1227 {
1228 	struct spdk_bdev *bdev;
1229 	struct spdk_bdev_desc *desc;
1230 	struct spdk_io_channel *io_ch;
1231 	struct spdk_bdev_opts bdev_opts = {
1232 		.bdev_io_pool_size = 20,
1233 		.bdev_io_cache_size = 2,
1234 	};
1235 	int rc;
1236 	void *buf;
1237 	struct iovec iovs[2];
1238 	int iovcnt;
1239 	uint64_t alignment;
1240 
1241 	rc = spdk_bdev_set_opts(&bdev_opts);
1242 	CU_ASSERT(rc == 0);
1243 	spdk_bdev_initialize(bdev_init_cb, NULL);
1244 
1245 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1246 	bdev = allocate_bdev("bdev0");
1247 
1248 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1249 	CU_ASSERT(rc == 0);
1250 	CU_ASSERT(desc != NULL);
1251 	io_ch = spdk_bdev_get_io_channel(desc);
1252 	CU_ASSERT(io_ch != NULL);
1253 
1254 	/* Create aligned buffer */
1255 	rc = posix_memalign(&buf, 4096, 8192);
1256 	SPDK_CU_ASSERT_FATAL(rc == 0);
1257 
1258 	/* Pass aligned single buffer with no alignment required */
1259 	alignment = 1;
1260 	bdev->required_alignment = spdk_u32log2(alignment);
1261 
1262 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1263 	CU_ASSERT(rc == 0);
1264 	stub_complete_io(1);
1265 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1266 				    alignment));
1267 
1268 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1269 	CU_ASSERT(rc == 0);
1270 	stub_complete_io(1);
1271 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1272 				    alignment));
1273 
1274 	/* Pass unaligned single buffer with no alignment required */
1275 	alignment = 1;
1276 	bdev->required_alignment = spdk_u32log2(alignment);
1277 
1278 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1279 	CU_ASSERT(rc == 0);
1280 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1281 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1282 	stub_complete_io(1);
1283 
1284 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1285 	CU_ASSERT(rc == 0);
1286 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1287 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1288 	stub_complete_io(1);
1289 
1290 	/* Pass unaligned single buffer with 512 alignment required */
1291 	alignment = 512;
1292 	bdev->required_alignment = spdk_u32log2(alignment);
1293 
1294 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1295 	CU_ASSERT(rc == 0);
1296 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1297 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1298 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1299 				    alignment));
1300 	stub_complete_io(1);
1301 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1302 
1303 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1304 	CU_ASSERT(rc == 0);
1305 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1306 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1307 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1308 				    alignment));
1309 	stub_complete_io(1);
1310 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1311 
1312 	/* Pass unaligned single buffer with 4096 alignment required */
1313 	alignment = 4096;
1314 	bdev->required_alignment = spdk_u32log2(alignment);
1315 
1316 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1317 	CU_ASSERT(rc == 0);
1318 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1319 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1320 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1321 				    alignment));
1322 	stub_complete_io(1);
1323 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1324 
1325 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1326 	CU_ASSERT(rc == 0);
1327 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1328 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1329 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1330 				    alignment));
1331 	stub_complete_io(1);
1332 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1333 
1334 	/* Pass aligned iovs with no alignment required */
1335 	alignment = 1;
1336 	bdev->required_alignment = spdk_u32log2(alignment);
1337 
1338 	iovcnt = 1;
1339 	iovs[0].iov_base = buf;
1340 	iovs[0].iov_len = 512;
1341 
1342 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1343 	CU_ASSERT(rc == 0);
1344 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1345 	stub_complete_io(1);
1346 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1347 
1348 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1349 	CU_ASSERT(rc == 0);
1350 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1351 	stub_complete_io(1);
1352 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1353 
1354 	/* Pass unaligned iovs with no alignment required */
1355 	alignment = 1;
1356 	bdev->required_alignment = spdk_u32log2(alignment);
1357 
1358 	iovcnt = 2;
1359 	iovs[0].iov_base = buf + 16;
1360 	iovs[0].iov_len = 256;
1361 	iovs[1].iov_base = buf + 16 + 256 + 32;
1362 	iovs[1].iov_len = 256;
1363 
1364 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1365 	CU_ASSERT(rc == 0);
1366 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1367 	stub_complete_io(1);
1368 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1369 
1370 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1371 	CU_ASSERT(rc == 0);
1372 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1373 	stub_complete_io(1);
1374 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1375 
1376 	/* Pass unaligned iov with 2048 alignment required */
1377 	alignment = 2048;
1378 	bdev->required_alignment = spdk_u32log2(alignment);
1379 
1380 	iovcnt = 2;
1381 	iovs[0].iov_base = buf + 16;
1382 	iovs[0].iov_len = 256;
1383 	iovs[1].iov_base = buf + 16 + 256 + 32;
1384 	iovs[1].iov_len = 256;
1385 
1386 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1387 	CU_ASSERT(rc == 0);
1388 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1389 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1390 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1391 				    alignment));
1392 	stub_complete_io(1);
1393 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1394 
1395 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1396 	CU_ASSERT(rc == 0);
1397 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1398 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1399 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1400 				    alignment));
1401 	stub_complete_io(1);
1402 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1403 
1404 	/* Pass iov without allocated buffer without alignment required */
1405 	alignment = 1;
1406 	bdev->required_alignment = spdk_u32log2(alignment);
1407 
1408 	iovcnt = 1;
1409 	iovs[0].iov_base = NULL;
1410 	iovs[0].iov_len = 0;
1411 
1412 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1413 	CU_ASSERT(rc == 0);
1414 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1415 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1416 				    alignment));
1417 	stub_complete_io(1);
1418 
1419 	/* Pass iov without allocated buffer with 1024 alignment required */
1420 	alignment = 1024;
1421 	bdev->required_alignment = spdk_u32log2(alignment);
1422 
1423 	iovcnt = 1;
1424 	iovs[0].iov_base = NULL;
1425 	iovs[0].iov_len = 0;
1426 
1427 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1428 	CU_ASSERT(rc == 0);
1429 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1430 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1431 				    alignment));
1432 	stub_complete_io(1);
1433 
1434 	spdk_put_io_channel(io_ch);
1435 	spdk_bdev_close(desc);
1436 	free_bdev(bdev);
1437 	spdk_bdev_finish(bdev_fini_cb, NULL);
1438 	poll_threads();
1439 
1440 	free(buf);
1441 }
1442 
1443 static void
1444 histogram_status_cb(void *cb_arg, int status)
1445 {
1446 	g_status = status;
1447 }
1448 
1449 static void
1450 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1451 {
1452 	g_status = status;
1453 	g_histogram = histogram;
1454 }
1455 
1456 static void
1457 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1458 		   uint64_t total, uint64_t so_far)
1459 {
1460 	g_count += count;
1461 }
1462 
1463 static void
1464 bdev_histograms(void)
1465 {
1466 	struct spdk_bdev *bdev;
1467 	struct spdk_bdev_desc *desc;
1468 	struct spdk_io_channel *ch;
1469 	struct spdk_histogram_data *histogram;
1470 	uint8_t buf[4096];
1471 	int rc;
1472 
1473 	spdk_bdev_initialize(bdev_init_cb, NULL);
1474 
1475 	bdev = allocate_bdev("bdev");
1476 
1477 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1478 	CU_ASSERT(rc == 0);
1479 	CU_ASSERT(desc != NULL);
1480 
1481 	ch = spdk_bdev_get_io_channel(desc);
1482 	CU_ASSERT(ch != NULL);
1483 
1484 	/* Enable histogram */
1485 	g_status = -1;
1486 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1487 	poll_threads();
1488 	CU_ASSERT(g_status == 0);
1489 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1490 
1491 	/* Allocate histogram */
1492 	histogram = spdk_histogram_data_alloc();
1493 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1494 
1495 	/* Check if histogram is zeroed */
1496 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1497 	poll_threads();
1498 	CU_ASSERT(g_status == 0);
1499 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1500 
1501 	g_count = 0;
1502 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1503 
1504 	CU_ASSERT(g_count == 0);
1505 
1506 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1507 	CU_ASSERT(rc == 0);
1508 
1509 	spdk_delay_us(10);
1510 	stub_complete_io(1);
1511 	poll_threads();
1512 
1513 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1514 	CU_ASSERT(rc == 0);
1515 
1516 	spdk_delay_us(10);
1517 	stub_complete_io(1);
1518 	poll_threads();
1519 
1520 	/* Check if histogram gathered data from all I/O channels */
1521 	g_histogram = NULL;
1522 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1523 	poll_threads();
1524 	CU_ASSERT(g_status == 0);
1525 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1526 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1527 
1528 	g_count = 0;
1529 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1530 	CU_ASSERT(g_count == 2);
1531 
1532 	/* Disable histogram */
1533 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1534 	poll_threads();
1535 	CU_ASSERT(g_status == 0);
1536 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1537 
1538 	/* Try to run histogram commands on disabled bdev */
1539 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1540 	poll_threads();
1541 	CU_ASSERT(g_status == -EFAULT);
1542 
1543 	spdk_histogram_data_free(g_histogram);
1544 	spdk_put_io_channel(ch);
1545 	spdk_bdev_close(desc);
1546 	free_bdev(bdev);
1547 	spdk_bdev_finish(bdev_fini_cb, NULL);
1548 	poll_threads();
1549 }
1550 
1551 int
1552 main(int argc, char **argv)
1553 {
1554 	CU_pSuite		suite = NULL;
1555 	unsigned int		num_failures;
1556 
1557 	if (CU_initialize_registry() != CUE_SUCCESS) {
1558 		return CU_get_error();
1559 	}
1560 
1561 	suite = CU_add_suite("bdev", null_init, null_clean);
1562 	if (suite == NULL) {
1563 		CU_cleanup_registry();
1564 		return CU_get_error();
1565 	}
1566 
1567 	if (
1568 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1569 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1570 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1571 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1572 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1573 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1574 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1575 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1576 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1577 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1578 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
1579 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL
1580 	) {
1581 		CU_cleanup_registry();
1582 		return CU_get_error();
1583 	}
1584 
1585 	allocate_threads(1);
1586 	set_thread(0);
1587 
1588 	CU_basic_set_mode(CU_BRM_VERBOSE);
1589 	CU_basic_run_tests();
1590 	num_failures = CU_get_number_of_failures();
1591 	CU_cleanup_registry();
1592 
1593 	free_threads();
1594 
1595 	return num_failures;
1596 }
1597