xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision 552e21cce6cccbf833ed9109827e08337377d7ce)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_is_ptr, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 
62 int g_status;
63 int g_count;
64 struct spdk_histogram_data *g_histogram;
65 
66 void
67 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
68 			 int *sc, int *sk, int *asc, int *ascq)
69 {
70 }
71 
72 static int
73 null_init(void)
74 {
75 	return 0;
76 }
77 
78 static int
79 null_clean(void)
80 {
81 	return 0;
82 }
83 
84 static int
85 stub_destruct(void *ctx)
86 {
87 	return 0;
88 }
89 
90 struct ut_expected_io {
91 	uint8_t				type;
92 	uint64_t			offset;
93 	uint64_t			length;
94 	int				iovcnt;
95 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
96 	TAILQ_ENTRY(ut_expected_io)	link;
97 };
98 
99 struct bdev_ut_channel {
100 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
101 	uint32_t			outstanding_io_count;
102 	TAILQ_HEAD(, ut_expected_io)	expected_io;
103 };
104 
105 static bool g_io_done;
106 static struct spdk_bdev_io *g_bdev_io;
107 static enum spdk_bdev_io_status g_io_status;
108 static uint32_t g_bdev_ut_io_device;
109 static struct bdev_ut_channel *g_bdev_ut_channel;
110 
111 static struct ut_expected_io *
112 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
113 {
114 	struct ut_expected_io *expected_io;
115 
116 	expected_io = calloc(1, sizeof(*expected_io));
117 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
118 
119 	expected_io->type = type;
120 	expected_io->offset = offset;
121 	expected_io->length = length;
122 	expected_io->iovcnt = iovcnt;
123 
124 	return expected_io;
125 }
126 
127 static void
128 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
129 {
130 	expected_io->iov[pos].iov_base = base;
131 	expected_io->iov[pos].iov_len = len;
132 }
133 
134 static void
135 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
136 {
137 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
138 	struct ut_expected_io *expected_io;
139 	struct iovec *iov, *expected_iov;
140 	int i;
141 
142 	g_bdev_io = bdev_io;
143 
144 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
145 	ch->outstanding_io_count++;
146 
147 	expected_io = TAILQ_FIRST(&ch->expected_io);
148 	if (expected_io == NULL) {
149 		return;
150 	}
151 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
152 
153 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
154 		CU_ASSERT(bdev_io->type == expected_io->type);
155 	}
156 
157 	if (expected_io->length == 0) {
158 		free(expected_io);
159 		return;
160 	}
161 
162 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
163 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
164 
165 	if (expected_io->iovcnt == 0) {
166 		free(expected_io);
167 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
168 		return;
169 	}
170 
171 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
172 	for (i = 0; i < expected_io->iovcnt; i++) {
173 		iov = &bdev_io->u.bdev.iovs[i];
174 		expected_iov = &expected_io->iov[i];
175 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
176 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
177 	}
178 
179 	free(expected_io);
180 }
181 
182 static void
183 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
184 {
185 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request,
186 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
187 }
188 
189 static uint32_t
190 stub_complete_io(uint32_t num_to_complete)
191 {
192 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
193 	struct spdk_bdev_io *bdev_io;
194 	uint32_t num_completed = 0;
195 
196 	while (num_completed < num_to_complete) {
197 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
198 			break;
199 		}
200 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
201 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
202 		ch->outstanding_io_count--;
203 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
204 		num_completed++;
205 	}
206 
207 	return num_completed;
208 }
209 
210 static struct spdk_io_channel *
211 bdev_ut_get_io_channel(void *ctx)
212 {
213 	return spdk_get_io_channel(&g_bdev_ut_io_device);
214 }
215 
216 static bool
217 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
218 {
219 	return true;
220 }
221 
222 static struct spdk_bdev_fn_table fn_table = {
223 	.destruct = stub_destruct,
224 	.submit_request = stub_submit_request,
225 	.get_io_channel = bdev_ut_get_io_channel,
226 	.io_type_supported = stub_io_type_supported,
227 };
228 
229 static int
230 bdev_ut_create_ch(void *io_device, void *ctx_buf)
231 {
232 	struct bdev_ut_channel *ch = ctx_buf;
233 
234 	CU_ASSERT(g_bdev_ut_channel == NULL);
235 	g_bdev_ut_channel = ch;
236 
237 	TAILQ_INIT(&ch->outstanding_io);
238 	ch->outstanding_io_count = 0;
239 	TAILQ_INIT(&ch->expected_io);
240 	return 0;
241 }
242 
243 static void
244 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
245 {
246 	CU_ASSERT(g_bdev_ut_channel != NULL);
247 	g_bdev_ut_channel = NULL;
248 }
249 
250 static int
251 bdev_ut_module_init(void)
252 {
253 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
254 				sizeof(struct bdev_ut_channel), NULL);
255 	return 0;
256 }
257 
258 static void
259 bdev_ut_module_fini(void)
260 {
261 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
262 }
263 
264 struct spdk_bdev_module bdev_ut_if = {
265 	.name = "bdev_ut",
266 	.module_init = bdev_ut_module_init,
267 	.module_fini = bdev_ut_module_fini,
268 };
269 
270 static void vbdev_ut_examine(struct spdk_bdev *bdev);
271 
272 static int
273 vbdev_ut_module_init(void)
274 {
275 	return 0;
276 }
277 
278 static void
279 vbdev_ut_module_fini(void)
280 {
281 }
282 
283 struct spdk_bdev_module vbdev_ut_if = {
284 	.name = "vbdev_ut",
285 	.module_init = vbdev_ut_module_init,
286 	.module_fini = vbdev_ut_module_fini,
287 	.examine_config = vbdev_ut_examine,
288 };
289 
290 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
291 SPDK_BDEV_MODULE_REGISTER(&vbdev_ut_if)
292 
293 static void
294 vbdev_ut_examine(struct spdk_bdev *bdev)
295 {
296 	spdk_bdev_module_examine_done(&vbdev_ut_if);
297 }
298 
299 static struct spdk_bdev *
300 allocate_bdev(char *name)
301 {
302 	struct spdk_bdev *bdev;
303 	int rc;
304 
305 	bdev = calloc(1, sizeof(*bdev));
306 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
307 
308 	bdev->name = name;
309 	bdev->fn_table = &fn_table;
310 	bdev->module = &bdev_ut_if;
311 	bdev->blockcnt = 1024;
312 	bdev->blocklen = 512;
313 
314 	rc = spdk_bdev_register(bdev);
315 	CU_ASSERT(rc == 0);
316 
317 	return bdev;
318 }
319 
320 static struct spdk_bdev *
321 allocate_vbdev(char *name, struct spdk_bdev *base1, struct spdk_bdev *base2)
322 {
323 	struct spdk_bdev *bdev;
324 	struct spdk_bdev *array[2];
325 	int rc;
326 
327 	bdev = calloc(1, sizeof(*bdev));
328 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
329 
330 	bdev->name = name;
331 	bdev->fn_table = &fn_table;
332 	bdev->module = &vbdev_ut_if;
333 
334 	/* vbdev must have at least one base bdev */
335 	CU_ASSERT(base1 != NULL);
336 
337 	array[0] = base1;
338 	array[1] = base2;
339 
340 	rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2);
341 	CU_ASSERT(rc == 0);
342 
343 	return bdev;
344 }
345 
346 static void
347 free_bdev(struct spdk_bdev *bdev)
348 {
349 	spdk_bdev_unregister(bdev, NULL, NULL);
350 	poll_threads();
351 	memset(bdev, 0xFF, sizeof(*bdev));
352 	free(bdev);
353 }
354 
355 static void
356 free_vbdev(struct spdk_bdev *bdev)
357 {
358 	spdk_bdev_unregister(bdev, NULL, NULL);
359 	poll_threads();
360 	memset(bdev, 0xFF, sizeof(*bdev));
361 	free(bdev);
362 }
363 
364 static void
365 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
366 {
367 	const char *bdev_name;
368 
369 	CU_ASSERT(bdev != NULL);
370 	CU_ASSERT(rc == 0);
371 	bdev_name = spdk_bdev_get_name(bdev);
372 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
373 
374 	free(stat);
375 	free_bdev(bdev);
376 
377 	*(bool *)cb_arg = true;
378 }
379 
380 static void
381 get_device_stat_test(void)
382 {
383 	struct spdk_bdev *bdev;
384 	struct spdk_bdev_io_stat *stat;
385 	bool done;
386 
387 	bdev = allocate_bdev("bdev0");
388 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
389 	if (stat == NULL) {
390 		free_bdev(bdev);
391 		return;
392 	}
393 
394 	done = false;
395 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
396 	while (!done) { poll_threads(); }
397 
398 
399 }
400 
401 static void
402 open_write_test(void)
403 {
404 	struct spdk_bdev *bdev[9];
405 	struct spdk_bdev_desc *desc[9] = {};
406 	int rc;
407 
408 	/*
409 	 * Create a tree of bdevs to test various open w/ write cases.
410 	 *
411 	 * bdev0 through bdev3 are physical block devices, such as NVMe
412 	 * namespaces or Ceph block devices.
413 	 *
414 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
415 	 * caching or RAID use cases.
416 	 *
417 	 * bdev5 through bdev7 are all virtual bdevs with the same base
418 	 * bdev (except bdev7). This models partitioning or logical volume
419 	 * use cases.
420 	 *
421 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
422 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
423 	 * models caching, RAID, partitioning or logical volumes use cases.
424 	 *
425 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
426 	 * base bdevs are themselves virtual bdevs.
427 	 *
428 	 *                bdev8
429 	 *                  |
430 	 *            +----------+
431 	 *            |          |
432 	 *          bdev4      bdev5   bdev6   bdev7
433 	 *            |          |       |       |
434 	 *        +---+---+      +---+   +   +---+---+
435 	 *        |       |           \  |  /         \
436 	 *      bdev0   bdev1          bdev2         bdev3
437 	 */
438 
439 	bdev[0] = allocate_bdev("bdev0");
440 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
441 	CU_ASSERT(rc == 0);
442 
443 	bdev[1] = allocate_bdev("bdev1");
444 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
445 	CU_ASSERT(rc == 0);
446 
447 	bdev[2] = allocate_bdev("bdev2");
448 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
449 	CU_ASSERT(rc == 0);
450 
451 	bdev[3] = allocate_bdev("bdev3");
452 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
453 	CU_ASSERT(rc == 0);
454 
455 	bdev[4] = allocate_vbdev("bdev4", bdev[0], bdev[1]);
456 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
457 	CU_ASSERT(rc == 0);
458 
459 	bdev[5] = allocate_vbdev("bdev5", bdev[2], NULL);
460 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
461 	CU_ASSERT(rc == 0);
462 
463 	bdev[6] = allocate_vbdev("bdev6", bdev[2], NULL);
464 
465 	bdev[7] = allocate_vbdev("bdev7", bdev[2], bdev[3]);
466 
467 	bdev[8] = allocate_vbdev("bdev8", bdev[4], bdev[5]);
468 
469 	/* Open bdev0 read-only.  This should succeed. */
470 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
471 	CU_ASSERT(rc == 0);
472 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
473 	spdk_bdev_close(desc[0]);
474 
475 	/*
476 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
477 	 * by a vbdev module.
478 	 */
479 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
480 	CU_ASSERT(rc == -EPERM);
481 
482 	/*
483 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
484 	 * by a vbdev module.
485 	 */
486 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
487 	CU_ASSERT(rc == -EPERM);
488 
489 	/* Open bdev4 read-only.  This should succeed. */
490 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
491 	CU_ASSERT(rc == 0);
492 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
493 	spdk_bdev_close(desc[4]);
494 
495 	/*
496 	 * Open bdev8 read/write.  This should succeed since it is a leaf
497 	 * bdev.
498 	 */
499 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
500 	CU_ASSERT(rc == 0);
501 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
502 	spdk_bdev_close(desc[8]);
503 
504 	/*
505 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
506 	 * by a vbdev module.
507 	 */
508 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
509 	CU_ASSERT(rc == -EPERM);
510 
511 	/* Open bdev4 read-only.  This should succeed. */
512 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
513 	CU_ASSERT(rc == 0);
514 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
515 	spdk_bdev_close(desc[5]);
516 
517 	free_vbdev(bdev[8]);
518 
519 	free_vbdev(bdev[5]);
520 	free_vbdev(bdev[6]);
521 	free_vbdev(bdev[7]);
522 
523 	free_vbdev(bdev[4]);
524 
525 	free_bdev(bdev[0]);
526 	free_bdev(bdev[1]);
527 	free_bdev(bdev[2]);
528 	free_bdev(bdev[3]);
529 }
530 
531 static void
532 bytes_to_blocks_test(void)
533 {
534 	struct spdk_bdev bdev;
535 	uint64_t offset_blocks, num_blocks;
536 
537 	memset(&bdev, 0, sizeof(bdev));
538 
539 	bdev.blocklen = 512;
540 
541 	/* All parameters valid */
542 	offset_blocks = 0;
543 	num_blocks = 0;
544 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
545 	CU_ASSERT(offset_blocks == 1);
546 	CU_ASSERT(num_blocks == 2);
547 
548 	/* Offset not a block multiple */
549 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
550 
551 	/* Length not a block multiple */
552 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
553 
554 	/* In case blocklen not the power of two */
555 	bdev.blocklen = 100;
556 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
557 	CU_ASSERT(offset_blocks == 1);
558 	CU_ASSERT(num_blocks == 2);
559 
560 	/* Offset not a block multiple */
561 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
562 
563 	/* Length not a block multiple */
564 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
565 }
566 
567 static void
568 num_blocks_test(void)
569 {
570 	struct spdk_bdev bdev;
571 	struct spdk_bdev_desc *desc = NULL;
572 	int rc;
573 
574 	memset(&bdev, 0, sizeof(bdev));
575 	bdev.name = "num_blocks";
576 	bdev.fn_table = &fn_table;
577 	bdev.module = &bdev_ut_if;
578 	spdk_bdev_register(&bdev);
579 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
580 
581 	/* Growing block number */
582 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
583 	/* Shrinking block number */
584 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
585 
586 	/* In case bdev opened */
587 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
588 	CU_ASSERT(rc == 0);
589 	SPDK_CU_ASSERT_FATAL(desc != NULL);
590 
591 	/* Growing block number */
592 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
593 	/* Shrinking block number */
594 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
595 
596 	spdk_bdev_close(desc);
597 	spdk_bdev_unregister(&bdev, NULL, NULL);
598 
599 	poll_threads();
600 }
601 
602 static void
603 io_valid_test(void)
604 {
605 	struct spdk_bdev bdev;
606 
607 	memset(&bdev, 0, sizeof(bdev));
608 
609 	bdev.blocklen = 512;
610 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
611 
612 	/* All parameters valid */
613 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
614 
615 	/* Last valid block */
616 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
617 
618 	/* Offset past end of bdev */
619 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
620 
621 	/* Offset + length past end of bdev */
622 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
623 
624 	/* Offset near end of uint64_t range (2^64 - 1) */
625 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
626 }
627 
628 static void
629 alias_add_del_test(void)
630 {
631 	struct spdk_bdev *bdev[3];
632 	int rc;
633 
634 	/* Creating and registering bdevs */
635 	bdev[0] = allocate_bdev("bdev0");
636 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
637 
638 	bdev[1] = allocate_bdev("bdev1");
639 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
640 
641 	bdev[2] = allocate_bdev("bdev2");
642 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
643 
644 	poll_threads();
645 
646 	/*
647 	 * Trying adding an alias identical to name.
648 	 * Alias is identical to name, so it can not be added to aliases list
649 	 */
650 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
651 	CU_ASSERT(rc == -EEXIST);
652 
653 	/*
654 	 * Trying to add empty alias,
655 	 * this one should fail
656 	 */
657 	rc = spdk_bdev_alias_add(bdev[0], NULL);
658 	CU_ASSERT(rc == -EINVAL);
659 
660 	/* Trying adding same alias to two different registered bdevs */
661 
662 	/* Alias is used first time, so this one should pass */
663 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
664 	CU_ASSERT(rc == 0);
665 
666 	/* Alias was added to another bdev, so this one should fail */
667 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
668 	CU_ASSERT(rc == -EEXIST);
669 
670 	/* Alias is used first time, so this one should pass */
671 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
672 	CU_ASSERT(rc == 0);
673 
674 	/* Trying removing an alias from registered bdevs */
675 
676 	/* Alias is not on a bdev aliases list, so this one should fail */
677 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
678 	CU_ASSERT(rc == -ENOENT);
679 
680 	/* Alias is present on a bdev aliases list, so this one should pass */
681 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
682 	CU_ASSERT(rc == 0);
683 
684 	/* Alias is present on a bdev aliases list, so this one should pass */
685 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
686 	CU_ASSERT(rc == 0);
687 
688 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
689 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
690 	CU_ASSERT(rc != 0);
691 
692 	/* Trying to del all alias from empty alias list */
693 	spdk_bdev_alias_del_all(bdev[2]);
694 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
695 
696 	/* Trying to del all alias from non-empty alias list */
697 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
698 	CU_ASSERT(rc == 0);
699 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
700 	CU_ASSERT(rc == 0);
701 	spdk_bdev_alias_del_all(bdev[2]);
702 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
703 
704 	/* Unregister and free bdevs */
705 	spdk_bdev_unregister(bdev[0], NULL, NULL);
706 	spdk_bdev_unregister(bdev[1], NULL, NULL);
707 	spdk_bdev_unregister(bdev[2], NULL, NULL);
708 
709 	poll_threads();
710 
711 	free(bdev[0]);
712 	free(bdev[1]);
713 	free(bdev[2]);
714 }
715 
716 static void
717 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
718 {
719 	g_io_done = true;
720 	g_io_status = bdev_io->internal.status;
721 	spdk_bdev_free_io(bdev_io);
722 }
723 
724 static void
725 bdev_init_cb(void *arg, int rc)
726 {
727 	CU_ASSERT(rc == 0);
728 }
729 
730 static void
731 bdev_fini_cb(void *arg)
732 {
733 }
734 
735 struct bdev_ut_io_wait_entry {
736 	struct spdk_bdev_io_wait_entry	entry;
737 	struct spdk_io_channel		*io_ch;
738 	struct spdk_bdev_desc		*desc;
739 	bool				submitted;
740 };
741 
742 static void
743 io_wait_cb(void *arg)
744 {
745 	struct bdev_ut_io_wait_entry *entry = arg;
746 	int rc;
747 
748 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
749 	CU_ASSERT(rc == 0);
750 	entry->submitted = true;
751 }
752 
753 static void
754 bdev_io_wait_test(void)
755 {
756 	struct spdk_bdev *bdev;
757 	struct spdk_bdev_desc *desc = NULL;
758 	struct spdk_io_channel *io_ch;
759 	struct spdk_bdev_opts bdev_opts = {
760 		.bdev_io_pool_size = 4,
761 		.bdev_io_cache_size = 2,
762 	};
763 	struct bdev_ut_io_wait_entry io_wait_entry;
764 	struct bdev_ut_io_wait_entry io_wait_entry2;
765 	int rc;
766 
767 	rc = spdk_bdev_set_opts(&bdev_opts);
768 	CU_ASSERT(rc == 0);
769 	spdk_bdev_initialize(bdev_init_cb, NULL);
770 	poll_threads();
771 
772 	bdev = allocate_bdev("bdev0");
773 
774 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
775 	CU_ASSERT(rc == 0);
776 	poll_threads();
777 	SPDK_CU_ASSERT_FATAL(desc != NULL);
778 	io_ch = spdk_bdev_get_io_channel(desc);
779 	CU_ASSERT(io_ch != NULL);
780 
781 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
782 	CU_ASSERT(rc == 0);
783 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
784 	CU_ASSERT(rc == 0);
785 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
786 	CU_ASSERT(rc == 0);
787 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
788 	CU_ASSERT(rc == 0);
789 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
790 
791 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
792 	CU_ASSERT(rc == -ENOMEM);
793 
794 	io_wait_entry.entry.bdev = bdev;
795 	io_wait_entry.entry.cb_fn = io_wait_cb;
796 	io_wait_entry.entry.cb_arg = &io_wait_entry;
797 	io_wait_entry.io_ch = io_ch;
798 	io_wait_entry.desc = desc;
799 	io_wait_entry.submitted = false;
800 	/* Cannot use the same io_wait_entry for two different calls. */
801 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
802 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
803 
804 	/* Queue two I/O waits. */
805 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
806 	CU_ASSERT(rc == 0);
807 	CU_ASSERT(io_wait_entry.submitted == false);
808 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
809 	CU_ASSERT(rc == 0);
810 	CU_ASSERT(io_wait_entry2.submitted == false);
811 
812 	stub_complete_io(1);
813 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
814 	CU_ASSERT(io_wait_entry.submitted == true);
815 	CU_ASSERT(io_wait_entry2.submitted == false);
816 
817 	stub_complete_io(1);
818 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
819 	CU_ASSERT(io_wait_entry2.submitted == true);
820 
821 	stub_complete_io(4);
822 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
823 
824 	spdk_put_io_channel(io_ch);
825 	spdk_bdev_close(desc);
826 	free_bdev(bdev);
827 	spdk_bdev_finish(bdev_fini_cb, NULL);
828 	poll_threads();
829 }
830 
831 static void
832 bdev_io_spans_boundary_test(void)
833 {
834 	struct spdk_bdev bdev;
835 	struct spdk_bdev_io bdev_io;
836 
837 	memset(&bdev, 0, sizeof(bdev));
838 
839 	bdev.optimal_io_boundary = 0;
840 	bdev_io.bdev = &bdev;
841 
842 	/* bdev has no optimal_io_boundary set - so this should return false. */
843 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
844 
845 	bdev.optimal_io_boundary = 32;
846 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
847 
848 	/* RESETs are not based on LBAs - so this should return false. */
849 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
850 
851 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
852 	bdev_io.u.bdev.offset_blocks = 0;
853 	bdev_io.u.bdev.num_blocks = 32;
854 
855 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
856 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
857 
858 	bdev_io.u.bdev.num_blocks = 33;
859 
860 	/* This I/O spans a boundary. */
861 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
862 }
863 
864 static void
865 bdev_io_split(void)
866 {
867 	struct spdk_bdev *bdev;
868 	struct spdk_bdev_desc *desc = NULL;
869 	struct spdk_io_channel *io_ch;
870 	struct spdk_bdev_opts bdev_opts = {
871 		.bdev_io_pool_size = 512,
872 		.bdev_io_cache_size = 64,
873 	};
874 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
875 	struct ut_expected_io *expected_io;
876 	uint64_t i;
877 	int rc;
878 
879 	rc = spdk_bdev_set_opts(&bdev_opts);
880 	CU_ASSERT(rc == 0);
881 	spdk_bdev_initialize(bdev_init_cb, NULL);
882 
883 	bdev = allocate_bdev("bdev0");
884 
885 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
886 	CU_ASSERT(rc == 0);
887 	SPDK_CU_ASSERT_FATAL(desc != NULL);
888 	io_ch = spdk_bdev_get_io_channel(desc);
889 	CU_ASSERT(io_ch != NULL);
890 
891 	bdev->optimal_io_boundary = 16;
892 	bdev->split_on_optimal_io_boundary = false;
893 
894 	g_io_done = false;
895 
896 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
897 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
898 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
899 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
900 
901 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
902 	CU_ASSERT(rc == 0);
903 	CU_ASSERT(g_io_done == false);
904 
905 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
906 	stub_complete_io(1);
907 	CU_ASSERT(g_io_done == true);
908 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
909 
910 	bdev->split_on_optimal_io_boundary = true;
911 
912 	/* Now test that a single-vector command is split correctly.
913 	 * Offset 14, length 8, payload 0xF000
914 	 *  Child - Offset 14, length 2, payload 0xF000
915 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
916 	 *
917 	 * Set up the expected values before calling spdk_bdev_read_blocks
918 	 */
919 	g_io_done = false;
920 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
921 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
922 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
923 
924 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
925 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
926 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
927 
928 	/* spdk_bdev_read_blocks will submit the first child immediately. */
929 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
930 	CU_ASSERT(rc == 0);
931 	CU_ASSERT(g_io_done == false);
932 
933 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
934 	stub_complete_io(2);
935 	CU_ASSERT(g_io_done == true);
936 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
937 
938 	/* Now set up a more complex, multi-vector command that needs to be split,
939 	 *  including splitting iovecs.
940 	 */
941 	iov[0].iov_base = (void *)0x10000;
942 	iov[0].iov_len = 512;
943 	iov[1].iov_base = (void *)0x20000;
944 	iov[1].iov_len = 20 * 512;
945 	iov[2].iov_base = (void *)0x30000;
946 	iov[2].iov_len = 11 * 512;
947 
948 	g_io_done = false;
949 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
950 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
951 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
952 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
953 
954 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
955 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
956 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
957 
958 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
959 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
960 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
961 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
962 
963 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
964 	CU_ASSERT(rc == 0);
965 	CU_ASSERT(g_io_done == false);
966 
967 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
968 	stub_complete_io(3);
969 	CU_ASSERT(g_io_done == true);
970 
971 	/* Test multi vector command that needs to be split by strip and then needs to be
972 	 * split further due to the capacity of child iovs.
973 	 */
974 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
975 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
976 		iov[i].iov_len = 512;
977 	}
978 
979 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
980 	g_io_done = false;
981 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
982 					   BDEV_IO_NUM_CHILD_IOV);
983 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
984 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
985 	}
986 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
987 
988 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
989 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
990 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
991 		ut_expected_io_set_iov(expected_io, i,
992 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
993 	}
994 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
995 
996 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
997 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
998 	CU_ASSERT(rc == 0);
999 	CU_ASSERT(g_io_done == false);
1000 
1001 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1002 	stub_complete_io(1);
1003 	CU_ASSERT(g_io_done == false);
1004 
1005 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1006 	stub_complete_io(1);
1007 	CU_ASSERT(g_io_done == true);
1008 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1009 
1010 	/* Test multi vector command that needs to be split by strip and then needs to be
1011 	 * split further due to the capacity of child iovs, but fails to split. The cause
1012 	 * of failure of split is that the length of an iovec is not multiple of block size.
1013 	 */
1014 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1015 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1016 		iov[i].iov_len = 512;
1017 	}
1018 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1019 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1020 
1021 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1022 	g_io_done = false;
1023 	g_io_status = 0;
1024 
1025 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1026 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1027 	CU_ASSERT(rc == 0);
1028 	CU_ASSERT(g_io_done == true);
1029 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1030 
1031 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1032 	 * split, so test that.
1033 	 */
1034 	bdev->optimal_io_boundary = 15;
1035 	g_io_done = false;
1036 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1037 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1038 
1039 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1040 	CU_ASSERT(rc == 0);
1041 	CU_ASSERT(g_io_done == false);
1042 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1043 	stub_complete_io(1);
1044 	CU_ASSERT(g_io_done == true);
1045 
1046 	/* Test an UNMAP.  This should also not be split. */
1047 	bdev->optimal_io_boundary = 16;
1048 	g_io_done = false;
1049 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1050 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1051 
1052 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1053 	CU_ASSERT(rc == 0);
1054 	CU_ASSERT(g_io_done == false);
1055 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1056 	stub_complete_io(1);
1057 	CU_ASSERT(g_io_done == true);
1058 
1059 	/* Test a FLUSH.  This should also not be split. */
1060 	bdev->optimal_io_boundary = 16;
1061 	g_io_done = false;
1062 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1063 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1064 
1065 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1066 	CU_ASSERT(rc == 0);
1067 	CU_ASSERT(g_io_done == false);
1068 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1069 	stub_complete_io(1);
1070 	CU_ASSERT(g_io_done == true);
1071 
1072 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1073 
1074 	spdk_put_io_channel(io_ch);
1075 	spdk_bdev_close(desc);
1076 	free_bdev(bdev);
1077 	spdk_bdev_finish(bdev_fini_cb, NULL);
1078 	poll_threads();
1079 }
1080 
1081 static void
1082 bdev_io_split_with_io_wait(void)
1083 {
1084 	struct spdk_bdev *bdev;
1085 	struct spdk_bdev_desc *desc;
1086 	struct spdk_io_channel *io_ch;
1087 	struct spdk_bdev_channel *channel;
1088 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1089 	struct spdk_bdev_opts bdev_opts = {
1090 		.bdev_io_pool_size = 2,
1091 		.bdev_io_cache_size = 1,
1092 	};
1093 	struct iovec iov[3];
1094 	struct ut_expected_io *expected_io;
1095 	int rc;
1096 
1097 	rc = spdk_bdev_set_opts(&bdev_opts);
1098 	CU_ASSERT(rc == 0);
1099 	spdk_bdev_initialize(bdev_init_cb, NULL);
1100 
1101 	bdev = allocate_bdev("bdev0");
1102 
1103 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1104 	CU_ASSERT(rc == 0);
1105 	CU_ASSERT(desc != NULL);
1106 	io_ch = spdk_bdev_get_io_channel(desc);
1107 	CU_ASSERT(io_ch != NULL);
1108 	channel = spdk_io_channel_get_ctx(io_ch);
1109 	mgmt_ch = channel->shared_resource->mgmt_ch;
1110 
1111 	bdev->optimal_io_boundary = 16;
1112 	bdev->split_on_optimal_io_boundary = true;
1113 
1114 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1115 	CU_ASSERT(rc == 0);
1116 
1117 	/* Now test that a single-vector command is split correctly.
1118 	 * Offset 14, length 8, payload 0xF000
1119 	 *  Child - Offset 14, length 2, payload 0xF000
1120 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1121 	 *
1122 	 * Set up the expected values before calling spdk_bdev_read_blocks
1123 	 */
1124 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1125 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1126 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1127 
1128 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1129 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1130 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1131 
1132 	/* The following children will be submitted sequentially due to the capacity of
1133 	 * spdk_bdev_io.
1134 	 */
1135 
1136 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1137 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1138 	CU_ASSERT(rc == 0);
1139 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1140 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1141 
1142 	/* Completing the first read I/O will submit the first child */
1143 	stub_complete_io(1);
1144 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1145 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1146 
1147 	/* Completing the first child will submit the second child */
1148 	stub_complete_io(1);
1149 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1150 
1151 	/* Complete the second child I/O.  This should result in our callback getting
1152 	 * invoked since the parent I/O is now complete.
1153 	 */
1154 	stub_complete_io(1);
1155 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1156 
1157 	/* Now set up a more complex, multi-vector command that needs to be split,
1158 	 *  including splitting iovecs.
1159 	 */
1160 	iov[0].iov_base = (void *)0x10000;
1161 	iov[0].iov_len = 512;
1162 	iov[1].iov_base = (void *)0x20000;
1163 	iov[1].iov_len = 20 * 512;
1164 	iov[2].iov_base = (void *)0x30000;
1165 	iov[2].iov_len = 11 * 512;
1166 
1167 	g_io_done = false;
1168 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1169 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1170 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1171 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1172 
1173 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1174 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1175 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1176 
1177 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1178 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1179 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1180 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1181 
1182 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1183 	CU_ASSERT(rc == 0);
1184 	CU_ASSERT(g_io_done == false);
1185 
1186 	/* The following children will be submitted sequentially due to the capacity of
1187 	 * spdk_bdev_io.
1188 	 */
1189 
1190 	/* Completing the first child will submit the second child */
1191 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1192 	stub_complete_io(1);
1193 	CU_ASSERT(g_io_done == false);
1194 
1195 	/* Completing the second child will submit the third child */
1196 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1197 	stub_complete_io(1);
1198 	CU_ASSERT(g_io_done == false);
1199 
1200 	/* Completing the third child will result in our callback getting invoked
1201 	 * since the parent I/O is now complete.
1202 	 */
1203 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1204 	stub_complete_io(1);
1205 	CU_ASSERT(g_io_done == true);
1206 
1207 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1208 
1209 	spdk_put_io_channel(io_ch);
1210 	spdk_bdev_close(desc);
1211 	free_bdev(bdev);
1212 	spdk_bdev_finish(bdev_fini_cb, NULL);
1213 	poll_threads();
1214 }
1215 
1216 static void
1217 bdev_io_alignment(void)
1218 {
1219 	struct spdk_bdev *bdev;
1220 	struct spdk_bdev_desc *desc;
1221 	struct spdk_io_channel *io_ch;
1222 	struct spdk_bdev_opts bdev_opts = {
1223 		.bdev_io_pool_size = 20,
1224 		.bdev_io_cache_size = 2,
1225 	};
1226 	int rc;
1227 	void *buf;
1228 	struct iovec iovs[2];
1229 	int iovcnt;
1230 	uint64_t alignment;
1231 
1232 	rc = spdk_bdev_set_opts(&bdev_opts);
1233 	CU_ASSERT(rc == 0);
1234 	spdk_bdev_initialize(bdev_init_cb, NULL);
1235 
1236 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1237 	bdev = allocate_bdev("bdev0");
1238 
1239 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1240 	CU_ASSERT(rc == 0);
1241 	CU_ASSERT(desc != NULL);
1242 	io_ch = spdk_bdev_get_io_channel(desc);
1243 	CU_ASSERT(io_ch != NULL);
1244 
1245 	/* Create aligned buffer */
1246 	rc = posix_memalign(&buf, 4096, 8192);
1247 	SPDK_CU_ASSERT_FATAL(rc == 0);
1248 
1249 	/* Pass aligned single buffer with no alignment required */
1250 	alignment = 1;
1251 	bdev->required_alignment = spdk_u32log2(alignment);
1252 
1253 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1254 	CU_ASSERT(rc == 0);
1255 	stub_complete_io(1);
1256 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1257 				    alignment));
1258 
1259 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1260 	CU_ASSERT(rc == 0);
1261 	stub_complete_io(1);
1262 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1263 				    alignment));
1264 
1265 	/* Pass unaligned single buffer with no alignment required */
1266 	alignment = 1;
1267 	bdev->required_alignment = spdk_u32log2(alignment);
1268 
1269 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1270 	CU_ASSERT(rc == 0);
1271 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1272 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1273 	stub_complete_io(1);
1274 
1275 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1276 	CU_ASSERT(rc == 0);
1277 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1278 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1279 	stub_complete_io(1);
1280 
1281 	/* Pass unaligned single buffer with 512 alignment required */
1282 	alignment = 512;
1283 	bdev->required_alignment = spdk_u32log2(alignment);
1284 
1285 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1286 	CU_ASSERT(rc == 0);
1287 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1288 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1289 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1290 				    alignment));
1291 	stub_complete_io(1);
1292 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1293 
1294 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1295 	CU_ASSERT(rc == 0);
1296 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1297 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1298 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1299 				    alignment));
1300 	stub_complete_io(1);
1301 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1302 
1303 	/* Pass unaligned single buffer with 4096 alignment required */
1304 	alignment = 4096;
1305 	bdev->required_alignment = spdk_u32log2(alignment);
1306 
1307 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1308 	CU_ASSERT(rc == 0);
1309 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1310 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1311 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1312 				    alignment));
1313 	stub_complete_io(1);
1314 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1315 
1316 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1317 	CU_ASSERT(rc == 0);
1318 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1319 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1320 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1321 				    alignment));
1322 	stub_complete_io(1);
1323 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1324 
1325 	/* Pass aligned iovs with no alignment required */
1326 	alignment = 1;
1327 	bdev->required_alignment = spdk_u32log2(alignment);
1328 
1329 	iovcnt = 1;
1330 	iovs[0].iov_base = buf;
1331 	iovs[0].iov_len = 512;
1332 
1333 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1334 	CU_ASSERT(rc == 0);
1335 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1336 	stub_complete_io(1);
1337 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1338 
1339 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1340 	CU_ASSERT(rc == 0);
1341 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1342 	stub_complete_io(1);
1343 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1344 
1345 	/* Pass unaligned iovs with no alignment required */
1346 	alignment = 1;
1347 	bdev->required_alignment = spdk_u32log2(alignment);
1348 
1349 	iovcnt = 2;
1350 	iovs[0].iov_base = buf + 16;
1351 	iovs[0].iov_len = 256;
1352 	iovs[1].iov_base = buf + 16 + 256 + 32;
1353 	iovs[1].iov_len = 256;
1354 
1355 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1356 	CU_ASSERT(rc == 0);
1357 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1358 	stub_complete_io(1);
1359 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1360 
1361 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1362 	CU_ASSERT(rc == 0);
1363 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1364 	stub_complete_io(1);
1365 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1366 
1367 	/* Pass unaligned iov with 2048 alignment required */
1368 	alignment = 2048;
1369 	bdev->required_alignment = spdk_u32log2(alignment);
1370 
1371 	iovcnt = 2;
1372 	iovs[0].iov_base = buf + 16;
1373 	iovs[0].iov_len = 256;
1374 	iovs[1].iov_base = buf + 16 + 256 + 32;
1375 	iovs[1].iov_len = 256;
1376 
1377 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1378 	CU_ASSERT(rc == 0);
1379 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1380 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1381 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1382 				    alignment));
1383 	stub_complete_io(1);
1384 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1385 
1386 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1387 	CU_ASSERT(rc == 0);
1388 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1389 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1390 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1391 				    alignment));
1392 	stub_complete_io(1);
1393 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1394 
1395 	/* Pass iov without allocated buffer without alignment required */
1396 	alignment = 1;
1397 	bdev->required_alignment = spdk_u32log2(alignment);
1398 
1399 	iovcnt = 1;
1400 	iovs[0].iov_base = NULL;
1401 	iovs[0].iov_len = 0;
1402 
1403 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1404 	CU_ASSERT(rc == 0);
1405 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1406 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1407 				    alignment));
1408 	stub_complete_io(1);
1409 
1410 	/* Pass iov without allocated buffer with 1024 alignment required */
1411 	alignment = 1024;
1412 	bdev->required_alignment = spdk_u32log2(alignment);
1413 
1414 	iovcnt = 1;
1415 	iovs[0].iov_base = NULL;
1416 	iovs[0].iov_len = 0;
1417 
1418 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1419 	CU_ASSERT(rc == 0);
1420 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1421 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1422 				    alignment));
1423 	stub_complete_io(1);
1424 
1425 	spdk_put_io_channel(io_ch);
1426 	spdk_bdev_close(desc);
1427 	free_bdev(bdev);
1428 	spdk_bdev_finish(bdev_fini_cb, NULL);
1429 	poll_threads();
1430 
1431 	free(buf);
1432 }
1433 
1434 static void
1435 histogram_status_cb(void *cb_arg, int status)
1436 {
1437 	g_status = status;
1438 }
1439 
1440 static void
1441 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1442 {
1443 	g_status = status;
1444 	g_histogram = histogram;
1445 }
1446 
1447 static void
1448 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1449 		   uint64_t total, uint64_t so_far)
1450 {
1451 	g_count += count;
1452 }
1453 
1454 static void
1455 bdev_histograms(void)
1456 {
1457 	struct spdk_bdev *bdev;
1458 	struct spdk_bdev_desc *desc;
1459 	struct spdk_io_channel *ch;
1460 	struct spdk_histogram_data *histogram;
1461 	uint8_t buf[4096];
1462 	int rc;
1463 
1464 	spdk_bdev_initialize(bdev_init_cb, NULL);
1465 
1466 	bdev = allocate_bdev("bdev");
1467 
1468 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1469 	CU_ASSERT(rc == 0);
1470 	CU_ASSERT(desc != NULL);
1471 
1472 	ch = spdk_bdev_get_io_channel(desc);
1473 	CU_ASSERT(ch != NULL);
1474 
1475 	/* Enable histogram */
1476 	g_status = -1;
1477 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1478 	poll_threads();
1479 	CU_ASSERT(g_status == 0);
1480 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1481 
1482 	/* Allocate histogram */
1483 	histogram = spdk_histogram_data_alloc();
1484 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1485 
1486 	/* Check if histogram is zeroed */
1487 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1488 	poll_threads();
1489 	CU_ASSERT(g_status == 0);
1490 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1491 
1492 	g_count = 0;
1493 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1494 
1495 	CU_ASSERT(g_count == 0);
1496 
1497 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1498 	CU_ASSERT(rc == 0);
1499 
1500 	spdk_delay_us(10);
1501 	stub_complete_io(1);
1502 	poll_threads();
1503 
1504 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1505 	CU_ASSERT(rc == 0);
1506 
1507 	spdk_delay_us(10);
1508 	stub_complete_io(1);
1509 	poll_threads();
1510 
1511 	/* Check if histogram gathered data from all I/O channels */
1512 	g_histogram = NULL;
1513 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1514 	poll_threads();
1515 	CU_ASSERT(g_status == 0);
1516 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1517 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1518 
1519 	g_count = 0;
1520 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1521 	CU_ASSERT(g_count == 2);
1522 
1523 	/* Disable histogram */
1524 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1525 	poll_threads();
1526 	CU_ASSERT(g_status == 0);
1527 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1528 
1529 	/* Try to run histogram commands on disabled bdev */
1530 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1531 	poll_threads();
1532 	CU_ASSERT(g_status == -EFAULT);
1533 
1534 	spdk_histogram_data_free(g_histogram);
1535 	spdk_put_io_channel(ch);
1536 	spdk_bdev_close(desc);
1537 	free_bdev(bdev);
1538 	spdk_bdev_finish(bdev_fini_cb, NULL);
1539 	poll_threads();
1540 }
1541 
1542 int
1543 main(int argc, char **argv)
1544 {
1545 	CU_pSuite		suite = NULL;
1546 	unsigned int		num_failures;
1547 
1548 	if (CU_initialize_registry() != CUE_SUCCESS) {
1549 		return CU_get_error();
1550 	}
1551 
1552 	suite = CU_add_suite("bdev", null_init, null_clean);
1553 	if (suite == NULL) {
1554 		CU_cleanup_registry();
1555 		return CU_get_error();
1556 	}
1557 
1558 	if (
1559 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1560 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1561 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1562 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1563 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1564 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1565 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1566 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1567 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1568 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1569 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
1570 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL
1571 	) {
1572 		CU_cleanup_registry();
1573 		return CU_get_error();
1574 	}
1575 
1576 	allocate_threads(1);
1577 	set_thread(0);
1578 
1579 	CU_basic_set_mode(CU_BRM_VERBOSE);
1580 	CU_basic_run_tests();
1581 	num_failures = CU_get_number_of_failures();
1582 	CU_cleanup_registry();
1583 
1584 	free_threads();
1585 
1586 	return num_failures;
1587 }
1588