xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision fdd365ed799cd634b898979bd1504f4c9cf3ced4)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_type, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
63 
64 
65 int g_status;
66 int g_count;
67 struct spdk_histogram_data *g_histogram;
68 
69 void
70 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
71 			 int *sc, int *sk, int *asc, int *ascq)
72 {
73 }
74 
75 static int
76 null_init(void)
77 {
78 	return 0;
79 }
80 
81 static int
82 null_clean(void)
83 {
84 	return 0;
85 }
86 
87 static int
88 stub_destruct(void *ctx)
89 {
90 	return 0;
91 }
92 
93 struct ut_expected_io {
94 	uint8_t				type;
95 	uint64_t			offset;
96 	uint64_t			length;
97 	int				iovcnt;
98 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
99 	TAILQ_ENTRY(ut_expected_io)	link;
100 };
101 
102 struct bdev_ut_channel {
103 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
104 	uint32_t			outstanding_io_count;
105 	TAILQ_HEAD(, ut_expected_io)	expected_io;
106 };
107 
108 static bool g_io_done;
109 static struct spdk_bdev_io *g_bdev_io;
110 static enum spdk_bdev_io_status g_io_status;
111 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
112 static uint32_t g_bdev_ut_io_device;
113 static struct bdev_ut_channel *g_bdev_ut_channel;
114 
115 static struct ut_expected_io *
116 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
117 {
118 	struct ut_expected_io *expected_io;
119 
120 	expected_io = calloc(1, sizeof(*expected_io));
121 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
122 
123 	expected_io->type = type;
124 	expected_io->offset = offset;
125 	expected_io->length = length;
126 	expected_io->iovcnt = iovcnt;
127 
128 	return expected_io;
129 }
130 
131 static void
132 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
133 {
134 	expected_io->iov[pos].iov_base = base;
135 	expected_io->iov[pos].iov_len = len;
136 }
137 
138 static void
139 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
140 {
141 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
142 	struct ut_expected_io *expected_io;
143 	struct iovec *iov, *expected_iov;
144 	int i;
145 
146 	g_bdev_io = bdev_io;
147 
148 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
149 	ch->outstanding_io_count++;
150 
151 	expected_io = TAILQ_FIRST(&ch->expected_io);
152 	if (expected_io == NULL) {
153 		return;
154 	}
155 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
156 
157 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
158 		CU_ASSERT(bdev_io->type == expected_io->type);
159 	}
160 
161 	if (expected_io->length == 0) {
162 		free(expected_io);
163 		return;
164 	}
165 
166 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
167 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
168 
169 	if (expected_io->iovcnt == 0) {
170 		free(expected_io);
171 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
172 		return;
173 	}
174 
175 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
176 	for (i = 0; i < expected_io->iovcnt; i++) {
177 		iov = &bdev_io->u.bdev.iovs[i];
178 		expected_iov = &expected_io->iov[i];
179 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
180 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
181 	}
182 
183 	free(expected_io);
184 }
185 
186 static void
187 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
188 				      struct spdk_bdev_io *bdev_io, bool success)
189 {
190 	CU_ASSERT(success == true);
191 
192 	stub_submit_request(_ch, bdev_io);
193 }
194 
195 static void
196 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
197 {
198 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
199 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
200 }
201 
202 static uint32_t
203 stub_complete_io(uint32_t num_to_complete)
204 {
205 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
206 	struct spdk_bdev_io *bdev_io;
207 	static enum spdk_bdev_io_status io_status;
208 	uint32_t num_completed = 0;
209 
210 	while (num_completed < num_to_complete) {
211 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
212 			break;
213 		}
214 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
215 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
216 		ch->outstanding_io_count--;
217 		io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
218 			    g_io_exp_status;
219 		spdk_bdev_io_complete(bdev_io, io_status);
220 		num_completed++;
221 	}
222 
223 	return num_completed;
224 }
225 
226 static struct spdk_io_channel *
227 bdev_ut_get_io_channel(void *ctx)
228 {
229 	return spdk_get_io_channel(&g_bdev_ut_io_device);
230 }
231 
232 DEFINE_STUB(stub_io_type_supported, static bool, (void *_bdev, enum spdk_bdev_io_type io_type),
233 	    true);
234 
235 static struct spdk_bdev_fn_table fn_table = {
236 	.destruct = stub_destruct,
237 	.submit_request = stub_submit_request,
238 	.get_io_channel = bdev_ut_get_io_channel,
239 	.io_type_supported = stub_io_type_supported,
240 };
241 
242 static int
243 bdev_ut_create_ch(void *io_device, void *ctx_buf)
244 {
245 	struct bdev_ut_channel *ch = ctx_buf;
246 
247 	CU_ASSERT(g_bdev_ut_channel == NULL);
248 	g_bdev_ut_channel = ch;
249 
250 	TAILQ_INIT(&ch->outstanding_io);
251 	ch->outstanding_io_count = 0;
252 	TAILQ_INIT(&ch->expected_io);
253 	return 0;
254 }
255 
256 static void
257 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
258 {
259 	CU_ASSERT(g_bdev_ut_channel != NULL);
260 	g_bdev_ut_channel = NULL;
261 }
262 
263 struct spdk_bdev_module bdev_ut_if;
264 
265 static int
266 bdev_ut_module_init(void)
267 {
268 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
269 				sizeof(struct bdev_ut_channel), NULL);
270 	spdk_bdev_module_init_done(&bdev_ut_if);
271 	return 0;
272 }
273 
274 static void
275 bdev_ut_module_fini(void)
276 {
277 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
278 }
279 
280 struct spdk_bdev_module bdev_ut_if = {
281 	.name = "bdev_ut",
282 	.module_init = bdev_ut_module_init,
283 	.module_fini = bdev_ut_module_fini,
284 	.async_init = true,
285 };
286 
287 static void vbdev_ut_examine(struct spdk_bdev *bdev);
288 
289 static int
290 vbdev_ut_module_init(void)
291 {
292 	return 0;
293 }
294 
295 static void
296 vbdev_ut_module_fini(void)
297 {
298 }
299 
300 struct spdk_bdev_module vbdev_ut_if = {
301 	.name = "vbdev_ut",
302 	.module_init = vbdev_ut_module_init,
303 	.module_fini = vbdev_ut_module_fini,
304 	.examine_config = vbdev_ut_examine,
305 };
306 
307 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
308 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
309 
310 static void
311 vbdev_ut_examine(struct spdk_bdev *bdev)
312 {
313 	spdk_bdev_module_examine_done(&vbdev_ut_if);
314 }
315 
316 static struct spdk_bdev *
317 allocate_bdev(char *name)
318 {
319 	struct spdk_bdev *bdev;
320 	int rc;
321 
322 	bdev = calloc(1, sizeof(*bdev));
323 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
324 
325 	bdev->name = name;
326 	bdev->fn_table = &fn_table;
327 	bdev->module = &bdev_ut_if;
328 	bdev->blockcnt = 1024;
329 	bdev->blocklen = 512;
330 
331 	rc = spdk_bdev_register(bdev);
332 	CU_ASSERT(rc == 0);
333 
334 	return bdev;
335 }
336 
337 static struct spdk_bdev *
338 allocate_vbdev(char *name)
339 {
340 	struct spdk_bdev *bdev;
341 	int rc;
342 
343 	bdev = calloc(1, sizeof(*bdev));
344 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
345 
346 	bdev->name = name;
347 	bdev->fn_table = &fn_table;
348 	bdev->module = &vbdev_ut_if;
349 
350 	rc = spdk_bdev_register(bdev);
351 	CU_ASSERT(rc == 0);
352 
353 	return bdev;
354 }
355 
356 static void
357 free_bdev(struct spdk_bdev *bdev)
358 {
359 	spdk_bdev_unregister(bdev, NULL, NULL);
360 	poll_threads();
361 	memset(bdev, 0xFF, sizeof(*bdev));
362 	free(bdev);
363 }
364 
365 static void
366 free_vbdev(struct spdk_bdev *bdev)
367 {
368 	spdk_bdev_unregister(bdev, NULL, NULL);
369 	poll_threads();
370 	memset(bdev, 0xFF, sizeof(*bdev));
371 	free(bdev);
372 }
373 
374 static void
375 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
376 {
377 	const char *bdev_name;
378 
379 	CU_ASSERT(bdev != NULL);
380 	CU_ASSERT(rc == 0);
381 	bdev_name = spdk_bdev_get_name(bdev);
382 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
383 
384 	free(stat);
385 	free_bdev(bdev);
386 
387 	*(bool *)cb_arg = true;
388 }
389 
390 static void
391 get_device_stat_test(void)
392 {
393 	struct spdk_bdev *bdev;
394 	struct spdk_bdev_io_stat *stat;
395 	bool done;
396 
397 	bdev = allocate_bdev("bdev0");
398 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
399 	if (stat == NULL) {
400 		free_bdev(bdev);
401 		return;
402 	}
403 
404 	done = false;
405 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
406 	while (!done) { poll_threads(); }
407 
408 
409 }
410 
411 static void
412 open_write_test(void)
413 {
414 	struct spdk_bdev *bdev[9];
415 	struct spdk_bdev_desc *desc[9] = {};
416 	int rc;
417 
418 	/*
419 	 * Create a tree of bdevs to test various open w/ write cases.
420 	 *
421 	 * bdev0 through bdev3 are physical block devices, such as NVMe
422 	 * namespaces or Ceph block devices.
423 	 *
424 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
425 	 * caching or RAID use cases.
426 	 *
427 	 * bdev5 through bdev7 are all virtual bdevs with the same base
428 	 * bdev (except bdev7). This models partitioning or logical volume
429 	 * use cases.
430 	 *
431 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
432 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
433 	 * models caching, RAID, partitioning or logical volumes use cases.
434 	 *
435 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
436 	 * base bdevs are themselves virtual bdevs.
437 	 *
438 	 *                bdev8
439 	 *                  |
440 	 *            +----------+
441 	 *            |          |
442 	 *          bdev4      bdev5   bdev6   bdev7
443 	 *            |          |       |       |
444 	 *        +---+---+      +---+   +   +---+---+
445 	 *        |       |           \  |  /         \
446 	 *      bdev0   bdev1          bdev2         bdev3
447 	 */
448 
449 	bdev[0] = allocate_bdev("bdev0");
450 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
451 	CU_ASSERT(rc == 0);
452 
453 	bdev[1] = allocate_bdev("bdev1");
454 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
455 	CU_ASSERT(rc == 0);
456 
457 	bdev[2] = allocate_bdev("bdev2");
458 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
459 	CU_ASSERT(rc == 0);
460 
461 	bdev[3] = allocate_bdev("bdev3");
462 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
463 	CU_ASSERT(rc == 0);
464 
465 	bdev[4] = allocate_vbdev("bdev4");
466 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
467 	CU_ASSERT(rc == 0);
468 
469 	bdev[5] = allocate_vbdev("bdev5");
470 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
471 	CU_ASSERT(rc == 0);
472 
473 	bdev[6] = allocate_vbdev("bdev6");
474 
475 	bdev[7] = allocate_vbdev("bdev7");
476 
477 	bdev[8] = allocate_vbdev("bdev8");
478 
479 	/* Open bdev0 read-only.  This should succeed. */
480 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
481 	CU_ASSERT(rc == 0);
482 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
483 	spdk_bdev_close(desc[0]);
484 
485 	/*
486 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
487 	 * by a vbdev module.
488 	 */
489 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
490 	CU_ASSERT(rc == -EPERM);
491 
492 	/*
493 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
494 	 * by a vbdev module.
495 	 */
496 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
497 	CU_ASSERT(rc == -EPERM);
498 
499 	/* Open bdev4 read-only.  This should succeed. */
500 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
501 	CU_ASSERT(rc == 0);
502 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
503 	spdk_bdev_close(desc[4]);
504 
505 	/*
506 	 * Open bdev8 read/write.  This should succeed since it is a leaf
507 	 * bdev.
508 	 */
509 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
510 	CU_ASSERT(rc == 0);
511 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
512 	spdk_bdev_close(desc[8]);
513 
514 	/*
515 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
516 	 * by a vbdev module.
517 	 */
518 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
519 	CU_ASSERT(rc == -EPERM);
520 
521 	/* Open bdev4 read-only.  This should succeed. */
522 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
523 	CU_ASSERT(rc == 0);
524 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
525 	spdk_bdev_close(desc[5]);
526 
527 	free_vbdev(bdev[8]);
528 
529 	free_vbdev(bdev[5]);
530 	free_vbdev(bdev[6]);
531 	free_vbdev(bdev[7]);
532 
533 	free_vbdev(bdev[4]);
534 
535 	free_bdev(bdev[0]);
536 	free_bdev(bdev[1]);
537 	free_bdev(bdev[2]);
538 	free_bdev(bdev[3]);
539 }
540 
541 static void
542 bytes_to_blocks_test(void)
543 {
544 	struct spdk_bdev bdev;
545 	uint64_t offset_blocks, num_blocks;
546 
547 	memset(&bdev, 0, sizeof(bdev));
548 
549 	bdev.blocklen = 512;
550 
551 	/* All parameters valid */
552 	offset_blocks = 0;
553 	num_blocks = 0;
554 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
555 	CU_ASSERT(offset_blocks == 1);
556 	CU_ASSERT(num_blocks == 2);
557 
558 	/* Offset not a block multiple */
559 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
560 
561 	/* Length not a block multiple */
562 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
563 
564 	/* In case blocklen not the power of two */
565 	bdev.blocklen = 100;
566 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
567 	CU_ASSERT(offset_blocks == 1);
568 	CU_ASSERT(num_blocks == 2);
569 
570 	/* Offset not a block multiple */
571 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
572 
573 	/* Length not a block multiple */
574 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
575 }
576 
577 static void
578 num_blocks_test(void)
579 {
580 	struct spdk_bdev bdev;
581 	struct spdk_bdev_desc *desc = NULL;
582 	int rc;
583 
584 	memset(&bdev, 0, sizeof(bdev));
585 	bdev.name = "num_blocks";
586 	bdev.fn_table = &fn_table;
587 	bdev.module = &bdev_ut_if;
588 	spdk_bdev_register(&bdev);
589 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
590 
591 	/* Growing block number */
592 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
593 	/* Shrinking block number */
594 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
595 
596 	/* In case bdev opened */
597 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
598 	CU_ASSERT(rc == 0);
599 	SPDK_CU_ASSERT_FATAL(desc != NULL);
600 
601 	/* Growing block number */
602 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
603 	/* Shrinking block number */
604 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
605 
606 	spdk_bdev_close(desc);
607 	spdk_bdev_unregister(&bdev, NULL, NULL);
608 
609 	poll_threads();
610 }
611 
612 static void
613 io_valid_test(void)
614 {
615 	struct spdk_bdev bdev;
616 
617 	memset(&bdev, 0, sizeof(bdev));
618 
619 	bdev.blocklen = 512;
620 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
621 
622 	/* All parameters valid */
623 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
624 
625 	/* Last valid block */
626 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
627 
628 	/* Offset past end of bdev */
629 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
630 
631 	/* Offset + length past end of bdev */
632 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
633 
634 	/* Offset near end of uint64_t range (2^64 - 1) */
635 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
636 }
637 
638 static void
639 alias_add_del_test(void)
640 {
641 	struct spdk_bdev *bdev[3];
642 	int rc;
643 
644 	/* Creating and registering bdevs */
645 	bdev[0] = allocate_bdev("bdev0");
646 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
647 
648 	bdev[1] = allocate_bdev("bdev1");
649 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
650 
651 	bdev[2] = allocate_bdev("bdev2");
652 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
653 
654 	poll_threads();
655 
656 	/*
657 	 * Trying adding an alias identical to name.
658 	 * Alias is identical to name, so it can not be added to aliases list
659 	 */
660 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
661 	CU_ASSERT(rc == -EEXIST);
662 
663 	/*
664 	 * Trying to add empty alias,
665 	 * this one should fail
666 	 */
667 	rc = spdk_bdev_alias_add(bdev[0], NULL);
668 	CU_ASSERT(rc == -EINVAL);
669 
670 	/* Trying adding same alias to two different registered bdevs */
671 
672 	/* Alias is used first time, so this one should pass */
673 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
674 	CU_ASSERT(rc == 0);
675 
676 	/* Alias was added to another bdev, so this one should fail */
677 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
678 	CU_ASSERT(rc == -EEXIST);
679 
680 	/* Alias is used first time, so this one should pass */
681 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
682 	CU_ASSERT(rc == 0);
683 
684 	/* Trying removing an alias from registered bdevs */
685 
686 	/* Alias is not on a bdev aliases list, so this one should fail */
687 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
688 	CU_ASSERT(rc == -ENOENT);
689 
690 	/* Alias is present on a bdev aliases list, so this one should pass */
691 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
692 	CU_ASSERT(rc == 0);
693 
694 	/* Alias is present on a bdev aliases list, so this one should pass */
695 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
696 	CU_ASSERT(rc == 0);
697 
698 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
699 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
700 	CU_ASSERT(rc != 0);
701 
702 	/* Trying to del all alias from empty alias list */
703 	spdk_bdev_alias_del_all(bdev[2]);
704 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
705 
706 	/* Trying to del all alias from non-empty alias list */
707 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
708 	CU_ASSERT(rc == 0);
709 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
710 	CU_ASSERT(rc == 0);
711 	spdk_bdev_alias_del_all(bdev[2]);
712 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
713 
714 	/* Unregister and free bdevs */
715 	spdk_bdev_unregister(bdev[0], NULL, NULL);
716 	spdk_bdev_unregister(bdev[1], NULL, NULL);
717 	spdk_bdev_unregister(bdev[2], NULL, NULL);
718 
719 	poll_threads();
720 
721 	free(bdev[0]);
722 	free(bdev[1]);
723 	free(bdev[2]);
724 }
725 
726 static void
727 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
728 {
729 	g_io_done = true;
730 	g_io_status = bdev_io->internal.status;
731 	spdk_bdev_free_io(bdev_io);
732 }
733 
734 static void
735 bdev_init_cb(void *arg, int rc)
736 {
737 	CU_ASSERT(rc == 0);
738 }
739 
740 static void
741 bdev_fini_cb(void *arg)
742 {
743 }
744 
745 struct bdev_ut_io_wait_entry {
746 	struct spdk_bdev_io_wait_entry	entry;
747 	struct spdk_io_channel		*io_ch;
748 	struct spdk_bdev_desc		*desc;
749 	bool				submitted;
750 };
751 
752 static void
753 io_wait_cb(void *arg)
754 {
755 	struct bdev_ut_io_wait_entry *entry = arg;
756 	int rc;
757 
758 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
759 	CU_ASSERT(rc == 0);
760 	entry->submitted = true;
761 }
762 
763 static void
764 bdev_io_types_test(void)
765 {
766 	struct spdk_bdev *bdev;
767 	struct spdk_bdev_desc *desc = NULL;
768 	struct spdk_io_channel *io_ch;
769 	struct spdk_bdev_opts bdev_opts = {
770 		.bdev_io_pool_size = 4,
771 		.bdev_io_cache_size = 2,
772 	};
773 	int rc;
774 
775 	rc = spdk_bdev_set_opts(&bdev_opts);
776 	CU_ASSERT(rc == 0);
777 	spdk_bdev_initialize(bdev_init_cb, NULL);
778 	poll_threads();
779 
780 	bdev = allocate_bdev("bdev0");
781 
782 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
783 	CU_ASSERT(rc == 0);
784 	poll_threads();
785 	SPDK_CU_ASSERT_FATAL(desc != NULL);
786 	io_ch = spdk_bdev_get_io_channel(desc);
787 	CU_ASSERT(io_ch != NULL);
788 
789 	/* WRITE and WRITE ZEROES are not supported */
790 	MOCK_SET(stub_io_type_supported, false);
791 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
792 	CU_ASSERT(rc == -ENOTSUP);
793 	MOCK_SET(stub_io_type_supported, true);
794 
795 	spdk_put_io_channel(io_ch);
796 	spdk_bdev_close(desc);
797 	free_bdev(bdev);
798 	spdk_bdev_finish(bdev_fini_cb, NULL);
799 	poll_threads();
800 }
801 
802 static void
803 bdev_io_wait_test(void)
804 {
805 	struct spdk_bdev *bdev;
806 	struct spdk_bdev_desc *desc = NULL;
807 	struct spdk_io_channel *io_ch;
808 	struct spdk_bdev_opts bdev_opts = {
809 		.bdev_io_pool_size = 4,
810 		.bdev_io_cache_size = 2,
811 	};
812 	struct bdev_ut_io_wait_entry io_wait_entry;
813 	struct bdev_ut_io_wait_entry io_wait_entry2;
814 	int rc;
815 
816 	rc = spdk_bdev_set_opts(&bdev_opts);
817 	CU_ASSERT(rc == 0);
818 	spdk_bdev_initialize(bdev_init_cb, NULL);
819 	poll_threads();
820 
821 	bdev = allocate_bdev("bdev0");
822 
823 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
824 	CU_ASSERT(rc == 0);
825 	poll_threads();
826 	SPDK_CU_ASSERT_FATAL(desc != NULL);
827 	io_ch = spdk_bdev_get_io_channel(desc);
828 	CU_ASSERT(io_ch != NULL);
829 
830 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
831 	CU_ASSERT(rc == 0);
832 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
833 	CU_ASSERT(rc == 0);
834 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
835 	CU_ASSERT(rc == 0);
836 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
837 	CU_ASSERT(rc == 0);
838 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
839 
840 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
841 	CU_ASSERT(rc == -ENOMEM);
842 
843 	io_wait_entry.entry.bdev = bdev;
844 	io_wait_entry.entry.cb_fn = io_wait_cb;
845 	io_wait_entry.entry.cb_arg = &io_wait_entry;
846 	io_wait_entry.io_ch = io_ch;
847 	io_wait_entry.desc = desc;
848 	io_wait_entry.submitted = false;
849 	/* Cannot use the same io_wait_entry for two different calls. */
850 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
851 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
852 
853 	/* Queue two I/O waits. */
854 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
855 	CU_ASSERT(rc == 0);
856 	CU_ASSERT(io_wait_entry.submitted == false);
857 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
858 	CU_ASSERT(rc == 0);
859 	CU_ASSERT(io_wait_entry2.submitted == false);
860 
861 	stub_complete_io(1);
862 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
863 	CU_ASSERT(io_wait_entry.submitted == true);
864 	CU_ASSERT(io_wait_entry2.submitted == false);
865 
866 	stub_complete_io(1);
867 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
868 	CU_ASSERT(io_wait_entry2.submitted == true);
869 
870 	stub_complete_io(4);
871 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
872 
873 	spdk_put_io_channel(io_ch);
874 	spdk_bdev_close(desc);
875 	free_bdev(bdev);
876 	spdk_bdev_finish(bdev_fini_cb, NULL);
877 	poll_threads();
878 }
879 
880 static void
881 bdev_io_spans_boundary_test(void)
882 {
883 	struct spdk_bdev bdev;
884 	struct spdk_bdev_io bdev_io;
885 
886 	memset(&bdev, 0, sizeof(bdev));
887 
888 	bdev.optimal_io_boundary = 0;
889 	bdev_io.bdev = &bdev;
890 
891 	/* bdev has no optimal_io_boundary set - so this should return false. */
892 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
893 
894 	bdev.optimal_io_boundary = 32;
895 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
896 
897 	/* RESETs are not based on LBAs - so this should return false. */
898 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
899 
900 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
901 	bdev_io.u.bdev.offset_blocks = 0;
902 	bdev_io.u.bdev.num_blocks = 32;
903 
904 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
905 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
906 
907 	bdev_io.u.bdev.num_blocks = 33;
908 
909 	/* This I/O spans a boundary. */
910 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
911 }
912 
913 static void
914 bdev_io_split(void)
915 {
916 	struct spdk_bdev *bdev;
917 	struct spdk_bdev_desc *desc = NULL;
918 	struct spdk_io_channel *io_ch;
919 	struct spdk_bdev_opts bdev_opts = {
920 		.bdev_io_pool_size = 512,
921 		.bdev_io_cache_size = 64,
922 	};
923 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
924 	struct ut_expected_io *expected_io;
925 	uint64_t i;
926 	int rc;
927 
928 	rc = spdk_bdev_set_opts(&bdev_opts);
929 	CU_ASSERT(rc == 0);
930 	spdk_bdev_initialize(bdev_init_cb, NULL);
931 
932 	bdev = allocate_bdev("bdev0");
933 
934 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
935 	CU_ASSERT(rc == 0);
936 	SPDK_CU_ASSERT_FATAL(desc != NULL);
937 	io_ch = spdk_bdev_get_io_channel(desc);
938 	CU_ASSERT(io_ch != NULL);
939 
940 	bdev->optimal_io_boundary = 16;
941 	bdev->split_on_optimal_io_boundary = false;
942 
943 	g_io_done = false;
944 
945 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
946 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
947 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
948 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
949 
950 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
951 	CU_ASSERT(rc == 0);
952 	CU_ASSERT(g_io_done == false);
953 
954 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
955 	stub_complete_io(1);
956 	CU_ASSERT(g_io_done == true);
957 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
958 
959 	bdev->split_on_optimal_io_boundary = true;
960 
961 	/* Now test that a single-vector command is split correctly.
962 	 * Offset 14, length 8, payload 0xF000
963 	 *  Child - Offset 14, length 2, payload 0xF000
964 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
965 	 *
966 	 * Set up the expected values before calling spdk_bdev_read_blocks
967 	 */
968 	g_io_done = false;
969 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
970 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
971 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
972 
973 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
974 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
975 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
976 
977 	/* spdk_bdev_read_blocks will submit the first child immediately. */
978 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
979 	CU_ASSERT(rc == 0);
980 	CU_ASSERT(g_io_done == false);
981 
982 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
983 	stub_complete_io(2);
984 	CU_ASSERT(g_io_done == true);
985 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
986 
987 	/* Now set up a more complex, multi-vector command that needs to be split,
988 	 *  including splitting iovecs.
989 	 */
990 	iov[0].iov_base = (void *)0x10000;
991 	iov[0].iov_len = 512;
992 	iov[1].iov_base = (void *)0x20000;
993 	iov[1].iov_len = 20 * 512;
994 	iov[2].iov_base = (void *)0x30000;
995 	iov[2].iov_len = 11 * 512;
996 
997 	g_io_done = false;
998 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
999 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1000 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1001 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1002 
1003 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1004 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1005 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1006 
1007 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1008 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1009 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1010 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1011 
1012 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1013 	CU_ASSERT(rc == 0);
1014 	CU_ASSERT(g_io_done == false);
1015 
1016 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1017 	stub_complete_io(3);
1018 	CU_ASSERT(g_io_done == true);
1019 
1020 	/* Test multi vector command that needs to be split by strip and then needs to be
1021 	 * split further due to the capacity of child iovs.
1022 	 */
1023 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
1024 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1025 		iov[i].iov_len = 512;
1026 	}
1027 
1028 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1029 	g_io_done = false;
1030 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
1031 					   BDEV_IO_NUM_CHILD_IOV);
1032 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1033 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
1034 	}
1035 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1036 
1037 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1038 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
1039 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1040 		ut_expected_io_set_iov(expected_io, i,
1041 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1042 	}
1043 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1044 
1045 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1046 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1047 	CU_ASSERT(rc == 0);
1048 	CU_ASSERT(g_io_done == false);
1049 
1050 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1051 	stub_complete_io(1);
1052 	CU_ASSERT(g_io_done == false);
1053 
1054 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1055 	stub_complete_io(1);
1056 	CU_ASSERT(g_io_done == true);
1057 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1058 
1059 	/* Test multi vector command that needs to be split by strip and then needs to be
1060 	 * split further due to the capacity of child iovs. In this case, the length of
1061 	 * the rest of iovec array with an I/O boundary is the multiple of block size.
1062 	 */
1063 
1064 	/* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1065 	 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1066 	 */
1067 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1068 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1069 		iov[i].iov_len = 512;
1070 	}
1071 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1072 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1073 		iov[i].iov_len = 256;
1074 	}
1075 	iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1076 	iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1077 
1078 	/* Add an extra iovec to trigger split */
1079 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1080 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1081 
1082 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1083 	g_io_done = false;
1084 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1085 					   BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV);
1086 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1087 		ut_expected_io_set_iov(expected_io, i,
1088 				       (void *)((i + 1) * 0x10000), 512);
1089 	}
1090 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1091 		ut_expected_io_set_iov(expected_io, i,
1092 				       (void *)((i + 1) * 0x10000), 256);
1093 	}
1094 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1095 
1096 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1097 					   1, 1);
1098 	ut_expected_io_set_iov(expected_io, 0,
1099 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1100 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1101 
1102 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1103 					   1, 1);
1104 	ut_expected_io_set_iov(expected_io, 0,
1105 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1106 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1107 
1108 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0,
1109 				    BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1110 	CU_ASSERT(rc == 0);
1111 	CU_ASSERT(g_io_done == false);
1112 
1113 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1114 	stub_complete_io(1);
1115 	CU_ASSERT(g_io_done == false);
1116 
1117 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1118 	stub_complete_io(2);
1119 	CU_ASSERT(g_io_done == true);
1120 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1121 
1122 	/* Test multi vector command that needs to be split by strip and then needs to be
1123 	 * split further due to the capacity of child iovs, but fails to split. The cause
1124 	 * of failure of split is that the length of an iovec is not multiple of block size.
1125 	 */
1126 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1127 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1128 		iov[i].iov_len = 512;
1129 	}
1130 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1131 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1132 
1133 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1134 	g_io_done = false;
1135 	g_io_status = 0;
1136 
1137 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1138 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1139 	CU_ASSERT(rc == 0);
1140 	CU_ASSERT(g_io_done == true);
1141 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1142 
1143 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1144 	 * split, so test that.
1145 	 */
1146 	bdev->optimal_io_boundary = 15;
1147 	g_io_done = false;
1148 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1149 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1150 
1151 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1152 	CU_ASSERT(rc == 0);
1153 	CU_ASSERT(g_io_done == false);
1154 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1155 	stub_complete_io(1);
1156 	CU_ASSERT(g_io_done == true);
1157 
1158 	/* Test an UNMAP.  This should also not be split. */
1159 	bdev->optimal_io_boundary = 16;
1160 	g_io_done = false;
1161 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1162 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1163 
1164 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1165 	CU_ASSERT(rc == 0);
1166 	CU_ASSERT(g_io_done == false);
1167 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1168 	stub_complete_io(1);
1169 	CU_ASSERT(g_io_done == true);
1170 
1171 	/* Test a FLUSH.  This should also not be split. */
1172 	bdev->optimal_io_boundary = 16;
1173 	g_io_done = false;
1174 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1175 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1176 
1177 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1178 	CU_ASSERT(rc == 0);
1179 	CU_ASSERT(g_io_done == false);
1180 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1181 	stub_complete_io(1);
1182 	CU_ASSERT(g_io_done == true);
1183 
1184 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1185 
1186 	/* Children requests return an error status */
1187 	bdev->optimal_io_boundary = 16;
1188 	iov[0].iov_base = (void *)0x10000;
1189 	iov[0].iov_len = 512 * 64;
1190 	g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1191 	g_io_done = false;
1192 	g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1193 
1194 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1195 	CU_ASSERT(rc == 0);
1196 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1197 	stub_complete_io(4);
1198 	CU_ASSERT(g_io_done == false);
1199 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1200 	stub_complete_io(1);
1201 	CU_ASSERT(g_io_done == true);
1202 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1203 
1204 	spdk_put_io_channel(io_ch);
1205 	spdk_bdev_close(desc);
1206 	free_bdev(bdev);
1207 	spdk_bdev_finish(bdev_fini_cb, NULL);
1208 	poll_threads();
1209 }
1210 
1211 static void
1212 bdev_io_split_with_io_wait(void)
1213 {
1214 	struct spdk_bdev *bdev;
1215 	struct spdk_bdev_desc *desc;
1216 	struct spdk_io_channel *io_ch;
1217 	struct spdk_bdev_channel *channel;
1218 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1219 	struct spdk_bdev_opts bdev_opts = {
1220 		.bdev_io_pool_size = 2,
1221 		.bdev_io_cache_size = 1,
1222 	};
1223 	struct iovec iov[3];
1224 	struct ut_expected_io *expected_io;
1225 	int rc;
1226 
1227 	rc = spdk_bdev_set_opts(&bdev_opts);
1228 	CU_ASSERT(rc == 0);
1229 	spdk_bdev_initialize(bdev_init_cb, NULL);
1230 
1231 	bdev = allocate_bdev("bdev0");
1232 
1233 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1234 	CU_ASSERT(rc == 0);
1235 	CU_ASSERT(desc != NULL);
1236 	io_ch = spdk_bdev_get_io_channel(desc);
1237 	CU_ASSERT(io_ch != NULL);
1238 	channel = spdk_io_channel_get_ctx(io_ch);
1239 	mgmt_ch = channel->shared_resource->mgmt_ch;
1240 
1241 	bdev->optimal_io_boundary = 16;
1242 	bdev->split_on_optimal_io_boundary = true;
1243 
1244 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1245 	CU_ASSERT(rc == 0);
1246 
1247 	/* Now test that a single-vector command is split correctly.
1248 	 * Offset 14, length 8, payload 0xF000
1249 	 *  Child - Offset 14, length 2, payload 0xF000
1250 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1251 	 *
1252 	 * Set up the expected values before calling spdk_bdev_read_blocks
1253 	 */
1254 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1255 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1256 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1257 
1258 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1259 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1260 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1261 
1262 	/* The following children will be submitted sequentially due to the capacity of
1263 	 * spdk_bdev_io.
1264 	 */
1265 
1266 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1267 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1268 	CU_ASSERT(rc == 0);
1269 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1270 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1271 
1272 	/* Completing the first read I/O will submit the first child */
1273 	stub_complete_io(1);
1274 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1275 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1276 
1277 	/* Completing the first child will submit the second child */
1278 	stub_complete_io(1);
1279 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1280 
1281 	/* Complete the second child I/O.  This should result in our callback getting
1282 	 * invoked since the parent I/O is now complete.
1283 	 */
1284 	stub_complete_io(1);
1285 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1286 
1287 	/* Now set up a more complex, multi-vector command that needs to be split,
1288 	 *  including splitting iovecs.
1289 	 */
1290 	iov[0].iov_base = (void *)0x10000;
1291 	iov[0].iov_len = 512;
1292 	iov[1].iov_base = (void *)0x20000;
1293 	iov[1].iov_len = 20 * 512;
1294 	iov[2].iov_base = (void *)0x30000;
1295 	iov[2].iov_len = 11 * 512;
1296 
1297 	g_io_done = false;
1298 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1299 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1300 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1301 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1302 
1303 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1304 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1305 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1306 
1307 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1308 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1309 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1310 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1311 
1312 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1313 	CU_ASSERT(rc == 0);
1314 	CU_ASSERT(g_io_done == false);
1315 
1316 	/* The following children will be submitted sequentially due to the capacity of
1317 	 * spdk_bdev_io.
1318 	 */
1319 
1320 	/* Completing the first child will submit the second child */
1321 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1322 	stub_complete_io(1);
1323 	CU_ASSERT(g_io_done == false);
1324 
1325 	/* Completing the second child will submit the third child */
1326 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1327 	stub_complete_io(1);
1328 	CU_ASSERT(g_io_done == false);
1329 
1330 	/* Completing the third child will result in our callback getting invoked
1331 	 * since the parent I/O is now complete.
1332 	 */
1333 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1334 	stub_complete_io(1);
1335 	CU_ASSERT(g_io_done == true);
1336 
1337 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1338 
1339 	spdk_put_io_channel(io_ch);
1340 	spdk_bdev_close(desc);
1341 	free_bdev(bdev);
1342 	spdk_bdev_finish(bdev_fini_cb, NULL);
1343 	poll_threads();
1344 }
1345 
1346 static void
1347 bdev_io_alignment(void)
1348 {
1349 	struct spdk_bdev *bdev;
1350 	struct spdk_bdev_desc *desc;
1351 	struct spdk_io_channel *io_ch;
1352 	struct spdk_bdev_opts bdev_opts = {
1353 		.bdev_io_pool_size = 20,
1354 		.bdev_io_cache_size = 2,
1355 	};
1356 	int rc;
1357 	void *buf;
1358 	struct iovec iovs[2];
1359 	int iovcnt;
1360 	uint64_t alignment;
1361 
1362 	rc = spdk_bdev_set_opts(&bdev_opts);
1363 	CU_ASSERT(rc == 0);
1364 	spdk_bdev_initialize(bdev_init_cb, NULL);
1365 
1366 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1367 	bdev = allocate_bdev("bdev0");
1368 
1369 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1370 	CU_ASSERT(rc == 0);
1371 	CU_ASSERT(desc != NULL);
1372 	io_ch = spdk_bdev_get_io_channel(desc);
1373 	CU_ASSERT(io_ch != NULL);
1374 
1375 	/* Create aligned buffer */
1376 	rc = posix_memalign(&buf, 4096, 8192);
1377 	SPDK_CU_ASSERT_FATAL(rc == 0);
1378 
1379 	/* Pass aligned single buffer with no alignment required */
1380 	alignment = 1;
1381 	bdev->required_alignment = spdk_u32log2(alignment);
1382 
1383 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1384 	CU_ASSERT(rc == 0);
1385 	stub_complete_io(1);
1386 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1387 				    alignment));
1388 
1389 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1390 	CU_ASSERT(rc == 0);
1391 	stub_complete_io(1);
1392 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1393 				    alignment));
1394 
1395 	/* Pass unaligned single buffer with no alignment required */
1396 	alignment = 1;
1397 	bdev->required_alignment = spdk_u32log2(alignment);
1398 
1399 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1400 	CU_ASSERT(rc == 0);
1401 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1402 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1403 	stub_complete_io(1);
1404 
1405 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1406 	CU_ASSERT(rc == 0);
1407 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1408 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1409 	stub_complete_io(1);
1410 
1411 	/* Pass unaligned single buffer with 512 alignment required */
1412 	alignment = 512;
1413 	bdev->required_alignment = spdk_u32log2(alignment);
1414 
1415 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1416 	CU_ASSERT(rc == 0);
1417 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1418 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1419 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1420 				    alignment));
1421 	stub_complete_io(1);
1422 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1423 
1424 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1425 	CU_ASSERT(rc == 0);
1426 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1427 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1428 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1429 				    alignment));
1430 	stub_complete_io(1);
1431 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1432 
1433 	/* Pass unaligned single buffer with 4096 alignment required */
1434 	alignment = 4096;
1435 	bdev->required_alignment = spdk_u32log2(alignment);
1436 
1437 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1438 	CU_ASSERT(rc == 0);
1439 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1440 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1441 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1442 				    alignment));
1443 	stub_complete_io(1);
1444 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1445 
1446 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1447 	CU_ASSERT(rc == 0);
1448 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1449 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1450 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1451 				    alignment));
1452 	stub_complete_io(1);
1453 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1454 
1455 	/* Pass aligned iovs with no alignment required */
1456 	alignment = 1;
1457 	bdev->required_alignment = spdk_u32log2(alignment);
1458 
1459 	iovcnt = 1;
1460 	iovs[0].iov_base = buf;
1461 	iovs[0].iov_len = 512;
1462 
1463 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1464 	CU_ASSERT(rc == 0);
1465 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1466 	stub_complete_io(1);
1467 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1468 
1469 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1470 	CU_ASSERT(rc == 0);
1471 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1472 	stub_complete_io(1);
1473 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1474 
1475 	/* Pass unaligned iovs with no alignment required */
1476 	alignment = 1;
1477 	bdev->required_alignment = spdk_u32log2(alignment);
1478 
1479 	iovcnt = 2;
1480 	iovs[0].iov_base = buf + 16;
1481 	iovs[0].iov_len = 256;
1482 	iovs[1].iov_base = buf + 16 + 256 + 32;
1483 	iovs[1].iov_len = 256;
1484 
1485 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1486 	CU_ASSERT(rc == 0);
1487 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1488 	stub_complete_io(1);
1489 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1490 
1491 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1492 	CU_ASSERT(rc == 0);
1493 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1494 	stub_complete_io(1);
1495 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1496 
1497 	/* Pass unaligned iov with 2048 alignment required */
1498 	alignment = 2048;
1499 	bdev->required_alignment = spdk_u32log2(alignment);
1500 
1501 	iovcnt = 2;
1502 	iovs[0].iov_base = buf + 16;
1503 	iovs[0].iov_len = 256;
1504 	iovs[1].iov_base = buf + 16 + 256 + 32;
1505 	iovs[1].iov_len = 256;
1506 
1507 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1508 	CU_ASSERT(rc == 0);
1509 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1510 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1511 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1512 				    alignment));
1513 	stub_complete_io(1);
1514 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1515 
1516 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1517 	CU_ASSERT(rc == 0);
1518 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1519 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1520 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1521 				    alignment));
1522 	stub_complete_io(1);
1523 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1524 
1525 	/* Pass iov without allocated buffer without alignment required */
1526 	alignment = 1;
1527 	bdev->required_alignment = spdk_u32log2(alignment);
1528 
1529 	iovcnt = 1;
1530 	iovs[0].iov_base = NULL;
1531 	iovs[0].iov_len = 0;
1532 
1533 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1534 	CU_ASSERT(rc == 0);
1535 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1536 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1537 				    alignment));
1538 	stub_complete_io(1);
1539 
1540 	/* Pass iov without allocated buffer with 1024 alignment required */
1541 	alignment = 1024;
1542 	bdev->required_alignment = spdk_u32log2(alignment);
1543 
1544 	iovcnt = 1;
1545 	iovs[0].iov_base = NULL;
1546 	iovs[0].iov_len = 0;
1547 
1548 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1549 	CU_ASSERT(rc == 0);
1550 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1551 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1552 				    alignment));
1553 	stub_complete_io(1);
1554 
1555 	spdk_put_io_channel(io_ch);
1556 	spdk_bdev_close(desc);
1557 	free_bdev(bdev);
1558 	spdk_bdev_finish(bdev_fini_cb, NULL);
1559 	poll_threads();
1560 
1561 	free(buf);
1562 }
1563 
1564 static void
1565 histogram_status_cb(void *cb_arg, int status)
1566 {
1567 	g_status = status;
1568 }
1569 
1570 static void
1571 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1572 {
1573 	g_status = status;
1574 	g_histogram = histogram;
1575 }
1576 
1577 static void
1578 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1579 		   uint64_t total, uint64_t so_far)
1580 {
1581 	g_count += count;
1582 }
1583 
1584 static void
1585 bdev_histograms(void)
1586 {
1587 	struct spdk_bdev *bdev;
1588 	struct spdk_bdev_desc *desc;
1589 	struct spdk_io_channel *ch;
1590 	struct spdk_histogram_data *histogram;
1591 	uint8_t buf[4096];
1592 	int rc;
1593 
1594 	spdk_bdev_initialize(bdev_init_cb, NULL);
1595 
1596 	bdev = allocate_bdev("bdev");
1597 
1598 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1599 	CU_ASSERT(rc == 0);
1600 	CU_ASSERT(desc != NULL);
1601 
1602 	ch = spdk_bdev_get_io_channel(desc);
1603 	CU_ASSERT(ch != NULL);
1604 
1605 	/* Enable histogram */
1606 	g_status = -1;
1607 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1608 	poll_threads();
1609 	CU_ASSERT(g_status == 0);
1610 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1611 
1612 	/* Allocate histogram */
1613 	histogram = spdk_histogram_data_alloc();
1614 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1615 
1616 	/* Check if histogram is zeroed */
1617 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1618 	poll_threads();
1619 	CU_ASSERT(g_status == 0);
1620 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1621 
1622 	g_count = 0;
1623 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1624 
1625 	CU_ASSERT(g_count == 0);
1626 
1627 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1628 	CU_ASSERT(rc == 0);
1629 
1630 	spdk_delay_us(10);
1631 	stub_complete_io(1);
1632 	poll_threads();
1633 
1634 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1635 	CU_ASSERT(rc == 0);
1636 
1637 	spdk_delay_us(10);
1638 	stub_complete_io(1);
1639 	poll_threads();
1640 
1641 	/* Check if histogram gathered data from all I/O channels */
1642 	g_histogram = NULL;
1643 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1644 	poll_threads();
1645 	CU_ASSERT(g_status == 0);
1646 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1647 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1648 
1649 	g_count = 0;
1650 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1651 	CU_ASSERT(g_count == 2);
1652 
1653 	/* Disable histogram */
1654 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1655 	poll_threads();
1656 	CU_ASSERT(g_status == 0);
1657 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1658 
1659 	/* Try to run histogram commands on disabled bdev */
1660 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1661 	poll_threads();
1662 	CU_ASSERT(g_status == -EFAULT);
1663 
1664 	spdk_histogram_data_free(g_histogram);
1665 	spdk_put_io_channel(ch);
1666 	spdk_bdev_close(desc);
1667 	free_bdev(bdev);
1668 	spdk_bdev_finish(bdev_fini_cb, NULL);
1669 	poll_threads();
1670 }
1671 
1672 int
1673 main(int argc, char **argv)
1674 {
1675 	CU_pSuite		suite = NULL;
1676 	unsigned int		num_failures;
1677 
1678 	if (CU_initialize_registry() != CUE_SUCCESS) {
1679 		return CU_get_error();
1680 	}
1681 
1682 	suite = CU_add_suite("bdev", null_init, null_clean);
1683 	if (suite == NULL) {
1684 		CU_cleanup_registry();
1685 		return CU_get_error();
1686 	}
1687 
1688 	if (
1689 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1690 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1691 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1692 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1693 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1694 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1695 		CU_add_test(suite, "bdev_io_types", bdev_io_types_test) == NULL ||
1696 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1697 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1698 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1699 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1700 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
1701 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL
1702 	) {
1703 		CU_cleanup_registry();
1704 		return CU_get_error();
1705 	}
1706 
1707 	allocate_threads(1);
1708 	set_thread(0);
1709 
1710 	CU_basic_set_mode(CU_BRM_VERBOSE);
1711 	CU_basic_run_tests();
1712 	num_failures = CU_get_number_of_failures();
1713 	CU_cleanup_registry();
1714 
1715 	free_threads();
1716 
1717 	return num_failures;
1718 }
1719