xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision d0d19eb82e3ba677162ae5c1930d9ddcf728bcbf)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_type, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
63 
64 
65 int g_status;
66 int g_count;
67 struct spdk_histogram_data *g_histogram;
68 
69 void
70 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
71 			 int *sc, int *sk, int *asc, int *ascq)
72 {
73 }
74 
75 static int
76 null_init(void)
77 {
78 	return 0;
79 }
80 
81 static int
82 null_clean(void)
83 {
84 	return 0;
85 }
86 
87 static int
88 stub_destruct(void *ctx)
89 {
90 	return 0;
91 }
92 
93 struct ut_expected_io {
94 	uint8_t				type;
95 	uint64_t			offset;
96 	uint64_t			length;
97 	int				iovcnt;
98 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
99 	TAILQ_ENTRY(ut_expected_io)	link;
100 };
101 
102 struct bdev_ut_channel {
103 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
104 	uint32_t			outstanding_io_count;
105 	TAILQ_HEAD(, ut_expected_io)	expected_io;
106 };
107 
108 static bool g_io_done;
109 static struct spdk_bdev_io *g_bdev_io;
110 static enum spdk_bdev_io_status g_io_status;
111 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
112 static uint32_t g_bdev_ut_io_device;
113 static struct bdev_ut_channel *g_bdev_ut_channel;
114 
115 static struct ut_expected_io *
116 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
117 {
118 	struct ut_expected_io *expected_io;
119 
120 	expected_io = calloc(1, sizeof(*expected_io));
121 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
122 
123 	expected_io->type = type;
124 	expected_io->offset = offset;
125 	expected_io->length = length;
126 	expected_io->iovcnt = iovcnt;
127 
128 	return expected_io;
129 }
130 
131 static void
132 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
133 {
134 	expected_io->iov[pos].iov_base = base;
135 	expected_io->iov[pos].iov_len = len;
136 }
137 
138 static void
139 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
140 {
141 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
142 	struct ut_expected_io *expected_io;
143 	struct iovec *iov, *expected_iov;
144 	int i;
145 
146 	g_bdev_io = bdev_io;
147 
148 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
149 	ch->outstanding_io_count++;
150 
151 	expected_io = TAILQ_FIRST(&ch->expected_io);
152 	if (expected_io == NULL) {
153 		return;
154 	}
155 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
156 
157 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
158 		CU_ASSERT(bdev_io->type == expected_io->type);
159 	}
160 
161 	if (expected_io->length == 0) {
162 		free(expected_io);
163 		return;
164 	}
165 
166 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
167 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
168 
169 	if (expected_io->iovcnt == 0) {
170 		free(expected_io);
171 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
172 		return;
173 	}
174 
175 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
176 	for (i = 0; i < expected_io->iovcnt; i++) {
177 		iov = &bdev_io->u.bdev.iovs[i];
178 		expected_iov = &expected_io->iov[i];
179 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
180 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
181 	}
182 
183 	free(expected_io);
184 }
185 
186 static void
187 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
188 				      struct spdk_bdev_io *bdev_io, bool success)
189 {
190 	CU_ASSERT(success == true);
191 
192 	stub_submit_request(_ch, bdev_io);
193 }
194 
195 static void
196 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
197 {
198 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
199 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
200 }
201 
202 static uint32_t
203 stub_complete_io(uint32_t num_to_complete)
204 {
205 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
206 	struct spdk_bdev_io *bdev_io;
207 	static enum spdk_bdev_io_status io_status;
208 	uint32_t num_completed = 0;
209 
210 	while (num_completed < num_to_complete) {
211 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
212 			break;
213 		}
214 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
215 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
216 		ch->outstanding_io_count--;
217 		io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
218 			    g_io_exp_status;
219 		spdk_bdev_io_complete(bdev_io, io_status);
220 		num_completed++;
221 	}
222 
223 	return num_completed;
224 }
225 
226 static struct spdk_io_channel *
227 bdev_ut_get_io_channel(void *ctx)
228 {
229 	return spdk_get_io_channel(&g_bdev_ut_io_device);
230 }
231 
232 static bool
233 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
234 {
235 	return true;
236 }
237 
238 static struct spdk_bdev_fn_table fn_table = {
239 	.destruct = stub_destruct,
240 	.submit_request = stub_submit_request,
241 	.get_io_channel = bdev_ut_get_io_channel,
242 	.io_type_supported = stub_io_type_supported,
243 };
244 
245 static int
246 bdev_ut_create_ch(void *io_device, void *ctx_buf)
247 {
248 	struct bdev_ut_channel *ch = ctx_buf;
249 
250 	CU_ASSERT(g_bdev_ut_channel == NULL);
251 	g_bdev_ut_channel = ch;
252 
253 	TAILQ_INIT(&ch->outstanding_io);
254 	ch->outstanding_io_count = 0;
255 	TAILQ_INIT(&ch->expected_io);
256 	return 0;
257 }
258 
259 static void
260 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
261 {
262 	CU_ASSERT(g_bdev_ut_channel != NULL);
263 	g_bdev_ut_channel = NULL;
264 }
265 
266 struct spdk_bdev_module bdev_ut_if;
267 
268 static int
269 bdev_ut_module_init(void)
270 {
271 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
272 				sizeof(struct bdev_ut_channel), NULL);
273 	spdk_bdev_module_init_done(&bdev_ut_if);
274 	return 0;
275 }
276 
277 static void
278 bdev_ut_module_fini(void)
279 {
280 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
281 }
282 
283 struct spdk_bdev_module bdev_ut_if = {
284 	.name = "bdev_ut",
285 	.module_init = bdev_ut_module_init,
286 	.module_fini = bdev_ut_module_fini,
287 	.async_init = true,
288 };
289 
290 static void vbdev_ut_examine(struct spdk_bdev *bdev);
291 
292 static int
293 vbdev_ut_module_init(void)
294 {
295 	return 0;
296 }
297 
298 static void
299 vbdev_ut_module_fini(void)
300 {
301 }
302 
303 struct spdk_bdev_module vbdev_ut_if = {
304 	.name = "vbdev_ut",
305 	.module_init = vbdev_ut_module_init,
306 	.module_fini = vbdev_ut_module_fini,
307 	.examine_config = vbdev_ut_examine,
308 };
309 
310 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
311 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
312 
313 static void
314 vbdev_ut_examine(struct spdk_bdev *bdev)
315 {
316 	spdk_bdev_module_examine_done(&vbdev_ut_if);
317 }
318 
319 static struct spdk_bdev *
320 allocate_bdev(char *name)
321 {
322 	struct spdk_bdev *bdev;
323 	int rc;
324 
325 	bdev = calloc(1, sizeof(*bdev));
326 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
327 
328 	bdev->name = name;
329 	bdev->fn_table = &fn_table;
330 	bdev->module = &bdev_ut_if;
331 	bdev->blockcnt = 1024;
332 	bdev->blocklen = 512;
333 
334 	rc = spdk_bdev_register(bdev);
335 	CU_ASSERT(rc == 0);
336 
337 	return bdev;
338 }
339 
340 static struct spdk_bdev *
341 allocate_vbdev(char *name)
342 {
343 	struct spdk_bdev *bdev;
344 	int rc;
345 
346 	bdev = calloc(1, sizeof(*bdev));
347 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
348 
349 	bdev->name = name;
350 	bdev->fn_table = &fn_table;
351 	bdev->module = &vbdev_ut_if;
352 
353 	rc = spdk_bdev_register(bdev);
354 	CU_ASSERT(rc == 0);
355 
356 	return bdev;
357 }
358 
359 static void
360 free_bdev(struct spdk_bdev *bdev)
361 {
362 	spdk_bdev_unregister(bdev, NULL, NULL);
363 	poll_threads();
364 	memset(bdev, 0xFF, sizeof(*bdev));
365 	free(bdev);
366 }
367 
368 static void
369 free_vbdev(struct spdk_bdev *bdev)
370 {
371 	spdk_bdev_unregister(bdev, NULL, NULL);
372 	poll_threads();
373 	memset(bdev, 0xFF, sizeof(*bdev));
374 	free(bdev);
375 }
376 
377 static void
378 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
379 {
380 	const char *bdev_name;
381 
382 	CU_ASSERT(bdev != NULL);
383 	CU_ASSERT(rc == 0);
384 	bdev_name = spdk_bdev_get_name(bdev);
385 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
386 
387 	free(stat);
388 	free_bdev(bdev);
389 
390 	*(bool *)cb_arg = true;
391 }
392 
393 static void
394 get_device_stat_test(void)
395 {
396 	struct spdk_bdev *bdev;
397 	struct spdk_bdev_io_stat *stat;
398 	bool done;
399 
400 	bdev = allocate_bdev("bdev0");
401 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
402 	if (stat == NULL) {
403 		free_bdev(bdev);
404 		return;
405 	}
406 
407 	done = false;
408 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
409 	while (!done) { poll_threads(); }
410 
411 
412 }
413 
414 static void
415 open_write_test(void)
416 {
417 	struct spdk_bdev *bdev[9];
418 	struct spdk_bdev_desc *desc[9] = {};
419 	int rc;
420 
421 	/*
422 	 * Create a tree of bdevs to test various open w/ write cases.
423 	 *
424 	 * bdev0 through bdev3 are physical block devices, such as NVMe
425 	 * namespaces or Ceph block devices.
426 	 *
427 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
428 	 * caching or RAID use cases.
429 	 *
430 	 * bdev5 through bdev7 are all virtual bdevs with the same base
431 	 * bdev (except bdev7). This models partitioning or logical volume
432 	 * use cases.
433 	 *
434 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
435 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
436 	 * models caching, RAID, partitioning or logical volumes use cases.
437 	 *
438 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
439 	 * base bdevs are themselves virtual bdevs.
440 	 *
441 	 *                bdev8
442 	 *                  |
443 	 *            +----------+
444 	 *            |          |
445 	 *          bdev4      bdev5   bdev6   bdev7
446 	 *            |          |       |       |
447 	 *        +---+---+      +---+   +   +---+---+
448 	 *        |       |           \  |  /         \
449 	 *      bdev0   bdev1          bdev2         bdev3
450 	 */
451 
452 	bdev[0] = allocate_bdev("bdev0");
453 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
454 	CU_ASSERT(rc == 0);
455 
456 	bdev[1] = allocate_bdev("bdev1");
457 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
458 	CU_ASSERT(rc == 0);
459 
460 	bdev[2] = allocate_bdev("bdev2");
461 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
462 	CU_ASSERT(rc == 0);
463 
464 	bdev[3] = allocate_bdev("bdev3");
465 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
466 	CU_ASSERT(rc == 0);
467 
468 	bdev[4] = allocate_vbdev("bdev4");
469 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
470 	CU_ASSERT(rc == 0);
471 
472 	bdev[5] = allocate_vbdev("bdev5");
473 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
474 	CU_ASSERT(rc == 0);
475 
476 	bdev[6] = allocate_vbdev("bdev6");
477 
478 	bdev[7] = allocate_vbdev("bdev7");
479 
480 	bdev[8] = allocate_vbdev("bdev8");
481 
482 	/* Open bdev0 read-only.  This should succeed. */
483 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
484 	CU_ASSERT(rc == 0);
485 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
486 	spdk_bdev_close(desc[0]);
487 
488 	/*
489 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
490 	 * by a vbdev module.
491 	 */
492 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
493 	CU_ASSERT(rc == -EPERM);
494 
495 	/*
496 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
497 	 * by a vbdev module.
498 	 */
499 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
500 	CU_ASSERT(rc == -EPERM);
501 
502 	/* Open bdev4 read-only.  This should succeed. */
503 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
504 	CU_ASSERT(rc == 0);
505 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
506 	spdk_bdev_close(desc[4]);
507 
508 	/*
509 	 * Open bdev8 read/write.  This should succeed since it is a leaf
510 	 * bdev.
511 	 */
512 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
513 	CU_ASSERT(rc == 0);
514 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
515 	spdk_bdev_close(desc[8]);
516 
517 	/*
518 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
519 	 * by a vbdev module.
520 	 */
521 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
522 	CU_ASSERT(rc == -EPERM);
523 
524 	/* Open bdev4 read-only.  This should succeed. */
525 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
526 	CU_ASSERT(rc == 0);
527 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
528 	spdk_bdev_close(desc[5]);
529 
530 	free_vbdev(bdev[8]);
531 
532 	free_vbdev(bdev[5]);
533 	free_vbdev(bdev[6]);
534 	free_vbdev(bdev[7]);
535 
536 	free_vbdev(bdev[4]);
537 
538 	free_bdev(bdev[0]);
539 	free_bdev(bdev[1]);
540 	free_bdev(bdev[2]);
541 	free_bdev(bdev[3]);
542 }
543 
544 static void
545 bytes_to_blocks_test(void)
546 {
547 	struct spdk_bdev bdev;
548 	uint64_t offset_blocks, num_blocks;
549 
550 	memset(&bdev, 0, sizeof(bdev));
551 
552 	bdev.blocklen = 512;
553 
554 	/* All parameters valid */
555 	offset_blocks = 0;
556 	num_blocks = 0;
557 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
558 	CU_ASSERT(offset_blocks == 1);
559 	CU_ASSERT(num_blocks == 2);
560 
561 	/* Offset not a block multiple */
562 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
563 
564 	/* Length not a block multiple */
565 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
566 
567 	/* In case blocklen not the power of two */
568 	bdev.blocklen = 100;
569 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
570 	CU_ASSERT(offset_blocks == 1);
571 	CU_ASSERT(num_blocks == 2);
572 
573 	/* Offset not a block multiple */
574 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
575 
576 	/* Length not a block multiple */
577 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
578 }
579 
580 static void
581 num_blocks_test(void)
582 {
583 	struct spdk_bdev bdev;
584 	struct spdk_bdev_desc *desc = NULL;
585 	int rc;
586 
587 	memset(&bdev, 0, sizeof(bdev));
588 	bdev.name = "num_blocks";
589 	bdev.fn_table = &fn_table;
590 	bdev.module = &bdev_ut_if;
591 	spdk_bdev_register(&bdev);
592 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
593 
594 	/* Growing block number */
595 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
596 	/* Shrinking block number */
597 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
598 
599 	/* In case bdev opened */
600 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
601 	CU_ASSERT(rc == 0);
602 	SPDK_CU_ASSERT_FATAL(desc != NULL);
603 
604 	/* Growing block number */
605 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
606 	/* Shrinking block number */
607 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
608 
609 	spdk_bdev_close(desc);
610 	spdk_bdev_unregister(&bdev, NULL, NULL);
611 
612 	poll_threads();
613 }
614 
615 static void
616 io_valid_test(void)
617 {
618 	struct spdk_bdev bdev;
619 
620 	memset(&bdev, 0, sizeof(bdev));
621 
622 	bdev.blocklen = 512;
623 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
624 
625 	/* All parameters valid */
626 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
627 
628 	/* Last valid block */
629 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
630 
631 	/* Offset past end of bdev */
632 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
633 
634 	/* Offset + length past end of bdev */
635 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
636 
637 	/* Offset near end of uint64_t range (2^64 - 1) */
638 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
639 }
640 
641 static void
642 alias_add_del_test(void)
643 {
644 	struct spdk_bdev *bdev[3];
645 	int rc;
646 
647 	/* Creating and registering bdevs */
648 	bdev[0] = allocate_bdev("bdev0");
649 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
650 
651 	bdev[1] = allocate_bdev("bdev1");
652 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
653 
654 	bdev[2] = allocate_bdev("bdev2");
655 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
656 
657 	poll_threads();
658 
659 	/*
660 	 * Trying adding an alias identical to name.
661 	 * Alias is identical to name, so it can not be added to aliases list
662 	 */
663 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
664 	CU_ASSERT(rc == -EEXIST);
665 
666 	/*
667 	 * Trying to add empty alias,
668 	 * this one should fail
669 	 */
670 	rc = spdk_bdev_alias_add(bdev[0], NULL);
671 	CU_ASSERT(rc == -EINVAL);
672 
673 	/* Trying adding same alias to two different registered bdevs */
674 
675 	/* Alias is used first time, so this one should pass */
676 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
677 	CU_ASSERT(rc == 0);
678 
679 	/* Alias was added to another bdev, so this one should fail */
680 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
681 	CU_ASSERT(rc == -EEXIST);
682 
683 	/* Alias is used first time, so this one should pass */
684 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
685 	CU_ASSERT(rc == 0);
686 
687 	/* Trying removing an alias from registered bdevs */
688 
689 	/* Alias is not on a bdev aliases list, so this one should fail */
690 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
691 	CU_ASSERT(rc == -ENOENT);
692 
693 	/* Alias is present on a bdev aliases list, so this one should pass */
694 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
695 	CU_ASSERT(rc == 0);
696 
697 	/* Alias is present on a bdev aliases list, so this one should pass */
698 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
699 	CU_ASSERT(rc == 0);
700 
701 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
702 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
703 	CU_ASSERT(rc != 0);
704 
705 	/* Trying to del all alias from empty alias list */
706 	spdk_bdev_alias_del_all(bdev[2]);
707 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
708 
709 	/* Trying to del all alias from non-empty alias list */
710 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
711 	CU_ASSERT(rc == 0);
712 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
713 	CU_ASSERT(rc == 0);
714 	spdk_bdev_alias_del_all(bdev[2]);
715 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
716 
717 	/* Unregister and free bdevs */
718 	spdk_bdev_unregister(bdev[0], NULL, NULL);
719 	spdk_bdev_unregister(bdev[1], NULL, NULL);
720 	spdk_bdev_unregister(bdev[2], NULL, NULL);
721 
722 	poll_threads();
723 
724 	free(bdev[0]);
725 	free(bdev[1]);
726 	free(bdev[2]);
727 }
728 
729 static void
730 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
731 {
732 	g_io_done = true;
733 	g_io_status = bdev_io->internal.status;
734 	spdk_bdev_free_io(bdev_io);
735 }
736 
737 static void
738 bdev_init_cb(void *arg, int rc)
739 {
740 	CU_ASSERT(rc == 0);
741 }
742 
743 static void
744 bdev_fini_cb(void *arg)
745 {
746 }
747 
748 struct bdev_ut_io_wait_entry {
749 	struct spdk_bdev_io_wait_entry	entry;
750 	struct spdk_io_channel		*io_ch;
751 	struct spdk_bdev_desc		*desc;
752 	bool				submitted;
753 };
754 
755 static void
756 io_wait_cb(void *arg)
757 {
758 	struct bdev_ut_io_wait_entry *entry = arg;
759 	int rc;
760 
761 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
762 	CU_ASSERT(rc == 0);
763 	entry->submitted = true;
764 }
765 
766 static void
767 bdev_io_wait_test(void)
768 {
769 	struct spdk_bdev *bdev;
770 	struct spdk_bdev_desc *desc = NULL;
771 	struct spdk_io_channel *io_ch;
772 	struct spdk_bdev_opts bdev_opts = {
773 		.bdev_io_pool_size = 4,
774 		.bdev_io_cache_size = 2,
775 	};
776 	struct bdev_ut_io_wait_entry io_wait_entry;
777 	struct bdev_ut_io_wait_entry io_wait_entry2;
778 	int rc;
779 
780 	rc = spdk_bdev_set_opts(&bdev_opts);
781 	CU_ASSERT(rc == 0);
782 	spdk_bdev_initialize(bdev_init_cb, NULL);
783 	poll_threads();
784 
785 	bdev = allocate_bdev("bdev0");
786 
787 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
788 	CU_ASSERT(rc == 0);
789 	poll_threads();
790 	SPDK_CU_ASSERT_FATAL(desc != NULL);
791 	io_ch = spdk_bdev_get_io_channel(desc);
792 	CU_ASSERT(io_ch != NULL);
793 
794 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
795 	CU_ASSERT(rc == 0);
796 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
797 	CU_ASSERT(rc == 0);
798 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
799 	CU_ASSERT(rc == 0);
800 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
801 	CU_ASSERT(rc == 0);
802 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
803 
804 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
805 	CU_ASSERT(rc == -ENOMEM);
806 
807 	io_wait_entry.entry.bdev = bdev;
808 	io_wait_entry.entry.cb_fn = io_wait_cb;
809 	io_wait_entry.entry.cb_arg = &io_wait_entry;
810 	io_wait_entry.io_ch = io_ch;
811 	io_wait_entry.desc = desc;
812 	io_wait_entry.submitted = false;
813 	/* Cannot use the same io_wait_entry for two different calls. */
814 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
815 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
816 
817 	/* Queue two I/O waits. */
818 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
819 	CU_ASSERT(rc == 0);
820 	CU_ASSERT(io_wait_entry.submitted == false);
821 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
822 	CU_ASSERT(rc == 0);
823 	CU_ASSERT(io_wait_entry2.submitted == false);
824 
825 	stub_complete_io(1);
826 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
827 	CU_ASSERT(io_wait_entry.submitted == true);
828 	CU_ASSERT(io_wait_entry2.submitted == false);
829 
830 	stub_complete_io(1);
831 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
832 	CU_ASSERT(io_wait_entry2.submitted == true);
833 
834 	stub_complete_io(4);
835 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
836 
837 	spdk_put_io_channel(io_ch);
838 	spdk_bdev_close(desc);
839 	free_bdev(bdev);
840 	spdk_bdev_finish(bdev_fini_cb, NULL);
841 	poll_threads();
842 }
843 
844 static void
845 bdev_io_spans_boundary_test(void)
846 {
847 	struct spdk_bdev bdev;
848 	struct spdk_bdev_io bdev_io;
849 
850 	memset(&bdev, 0, sizeof(bdev));
851 
852 	bdev.optimal_io_boundary = 0;
853 	bdev_io.bdev = &bdev;
854 
855 	/* bdev has no optimal_io_boundary set - so this should return false. */
856 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
857 
858 	bdev.optimal_io_boundary = 32;
859 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
860 
861 	/* RESETs are not based on LBAs - so this should return false. */
862 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
863 
864 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
865 	bdev_io.u.bdev.offset_blocks = 0;
866 	bdev_io.u.bdev.num_blocks = 32;
867 
868 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
869 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
870 
871 	bdev_io.u.bdev.num_blocks = 33;
872 
873 	/* This I/O spans a boundary. */
874 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
875 }
876 
877 static void
878 bdev_io_split(void)
879 {
880 	struct spdk_bdev *bdev;
881 	struct spdk_bdev_desc *desc = NULL;
882 	struct spdk_io_channel *io_ch;
883 	struct spdk_bdev_opts bdev_opts = {
884 		.bdev_io_pool_size = 512,
885 		.bdev_io_cache_size = 64,
886 	};
887 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
888 	struct ut_expected_io *expected_io;
889 	uint64_t i;
890 	int rc;
891 
892 	rc = spdk_bdev_set_opts(&bdev_opts);
893 	CU_ASSERT(rc == 0);
894 	spdk_bdev_initialize(bdev_init_cb, NULL);
895 
896 	bdev = allocate_bdev("bdev0");
897 
898 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
899 	CU_ASSERT(rc == 0);
900 	SPDK_CU_ASSERT_FATAL(desc != NULL);
901 	io_ch = spdk_bdev_get_io_channel(desc);
902 	CU_ASSERT(io_ch != NULL);
903 
904 	bdev->optimal_io_boundary = 16;
905 	bdev->split_on_optimal_io_boundary = false;
906 
907 	g_io_done = false;
908 
909 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
910 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
911 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
912 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
913 
914 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
915 	CU_ASSERT(rc == 0);
916 	CU_ASSERT(g_io_done == false);
917 
918 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
919 	stub_complete_io(1);
920 	CU_ASSERT(g_io_done == true);
921 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
922 
923 	bdev->split_on_optimal_io_boundary = true;
924 
925 	/* Now test that a single-vector command is split correctly.
926 	 * Offset 14, length 8, payload 0xF000
927 	 *  Child - Offset 14, length 2, payload 0xF000
928 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
929 	 *
930 	 * Set up the expected values before calling spdk_bdev_read_blocks
931 	 */
932 	g_io_done = false;
933 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
934 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
935 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
936 
937 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
938 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
939 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
940 
941 	/* spdk_bdev_read_blocks will submit the first child immediately. */
942 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
943 	CU_ASSERT(rc == 0);
944 	CU_ASSERT(g_io_done == false);
945 
946 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
947 	stub_complete_io(2);
948 	CU_ASSERT(g_io_done == true);
949 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
950 
951 	/* Now set up a more complex, multi-vector command that needs to be split,
952 	 *  including splitting iovecs.
953 	 */
954 	iov[0].iov_base = (void *)0x10000;
955 	iov[0].iov_len = 512;
956 	iov[1].iov_base = (void *)0x20000;
957 	iov[1].iov_len = 20 * 512;
958 	iov[2].iov_base = (void *)0x30000;
959 	iov[2].iov_len = 11 * 512;
960 
961 	g_io_done = false;
962 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
963 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
964 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
965 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
966 
967 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
968 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
969 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
970 
971 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
972 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
973 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
974 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
975 
976 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
977 	CU_ASSERT(rc == 0);
978 	CU_ASSERT(g_io_done == false);
979 
980 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
981 	stub_complete_io(3);
982 	CU_ASSERT(g_io_done == true);
983 
984 	/* Test multi vector command that needs to be split by strip and then needs to be
985 	 * split further due to the capacity of child iovs.
986 	 */
987 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
988 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
989 		iov[i].iov_len = 512;
990 	}
991 
992 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
993 	g_io_done = false;
994 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
995 					   BDEV_IO_NUM_CHILD_IOV);
996 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
997 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
998 	}
999 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1000 
1001 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1002 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
1003 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1004 		ut_expected_io_set_iov(expected_io, i,
1005 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1006 	}
1007 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1008 
1009 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1010 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1011 	CU_ASSERT(rc == 0);
1012 	CU_ASSERT(g_io_done == false);
1013 
1014 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1015 	stub_complete_io(1);
1016 	CU_ASSERT(g_io_done == false);
1017 
1018 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1019 	stub_complete_io(1);
1020 	CU_ASSERT(g_io_done == true);
1021 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1022 
1023 	/* Test multi vector command that needs to be split by strip and then needs to be
1024 	 * split further due to the capacity of child iovs. In this case, the length of
1025 	 * the rest of iovec array with an I/O boundary is the multiple of block size.
1026 	 */
1027 
1028 	/* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1029 	 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1030 	 */
1031 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1032 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1033 		iov[i].iov_len = 512;
1034 	}
1035 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1036 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1037 		iov[i].iov_len = 256;
1038 	}
1039 	iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1040 	iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1041 
1042 	/* Add an extra iovec to trigger split */
1043 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1044 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1045 
1046 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1047 	g_io_done = false;
1048 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1049 					   BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV);
1050 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1051 		ut_expected_io_set_iov(expected_io, i,
1052 				       (void *)((i + 1) * 0x10000), 512);
1053 	}
1054 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1055 		ut_expected_io_set_iov(expected_io, i,
1056 				       (void *)((i + 1) * 0x10000), 256);
1057 	}
1058 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1059 
1060 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1061 					   1, 1);
1062 	ut_expected_io_set_iov(expected_io, 0,
1063 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1064 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1065 
1066 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1067 					   1, 1);
1068 	ut_expected_io_set_iov(expected_io, 0,
1069 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1070 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1071 
1072 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0,
1073 				    BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1074 	CU_ASSERT(rc == 0);
1075 	CU_ASSERT(g_io_done == false);
1076 
1077 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1078 	stub_complete_io(1);
1079 	CU_ASSERT(g_io_done == false);
1080 
1081 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1082 	stub_complete_io(2);
1083 	CU_ASSERT(g_io_done == true);
1084 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1085 
1086 	/* Test multi vector command that needs to be split by strip and then needs to be
1087 	 * split further due to the capacity of child iovs, but fails to split. The cause
1088 	 * of failure of split is that the length of an iovec is not multiple of block size.
1089 	 */
1090 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1091 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1092 		iov[i].iov_len = 512;
1093 	}
1094 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1095 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1096 
1097 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1098 	g_io_done = false;
1099 	g_io_status = 0;
1100 
1101 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1102 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1103 	CU_ASSERT(rc == 0);
1104 	CU_ASSERT(g_io_done == true);
1105 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1106 
1107 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1108 	 * split, so test that.
1109 	 */
1110 	bdev->optimal_io_boundary = 15;
1111 	g_io_done = false;
1112 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1113 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1114 
1115 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1116 	CU_ASSERT(rc == 0);
1117 	CU_ASSERT(g_io_done == false);
1118 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1119 	stub_complete_io(1);
1120 	CU_ASSERT(g_io_done == true);
1121 
1122 	/* Test an UNMAP.  This should also not be split. */
1123 	bdev->optimal_io_boundary = 16;
1124 	g_io_done = false;
1125 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1126 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1127 
1128 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1129 	CU_ASSERT(rc == 0);
1130 	CU_ASSERT(g_io_done == false);
1131 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1132 	stub_complete_io(1);
1133 	CU_ASSERT(g_io_done == true);
1134 
1135 	/* Test a FLUSH.  This should also not be split. */
1136 	bdev->optimal_io_boundary = 16;
1137 	g_io_done = false;
1138 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1139 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1140 
1141 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1142 	CU_ASSERT(rc == 0);
1143 	CU_ASSERT(g_io_done == false);
1144 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1145 	stub_complete_io(1);
1146 	CU_ASSERT(g_io_done == true);
1147 
1148 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1149 
1150 	/* Children requests return an error status */
1151 	bdev->optimal_io_boundary = 16;
1152 	iov[0].iov_base = (void *)0x10000;
1153 	iov[0].iov_len = 512 * 64;
1154 	g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1155 	g_io_done = false;
1156 	g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1157 
1158 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1159 	CU_ASSERT(rc == 0);
1160 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1161 	stub_complete_io(4);
1162 	CU_ASSERT(g_io_done == false);
1163 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1164 	stub_complete_io(1);
1165 	CU_ASSERT(g_io_done == true);
1166 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1167 
1168 	spdk_put_io_channel(io_ch);
1169 	spdk_bdev_close(desc);
1170 	free_bdev(bdev);
1171 	spdk_bdev_finish(bdev_fini_cb, NULL);
1172 	poll_threads();
1173 }
1174 
1175 static void
1176 bdev_io_split_with_io_wait(void)
1177 {
1178 	struct spdk_bdev *bdev;
1179 	struct spdk_bdev_desc *desc;
1180 	struct spdk_io_channel *io_ch;
1181 	struct spdk_bdev_channel *channel;
1182 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1183 	struct spdk_bdev_opts bdev_opts = {
1184 		.bdev_io_pool_size = 2,
1185 		.bdev_io_cache_size = 1,
1186 	};
1187 	struct iovec iov[3];
1188 	struct ut_expected_io *expected_io;
1189 	int rc;
1190 
1191 	rc = spdk_bdev_set_opts(&bdev_opts);
1192 	CU_ASSERT(rc == 0);
1193 	spdk_bdev_initialize(bdev_init_cb, NULL);
1194 
1195 	bdev = allocate_bdev("bdev0");
1196 
1197 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1198 	CU_ASSERT(rc == 0);
1199 	CU_ASSERT(desc != NULL);
1200 	io_ch = spdk_bdev_get_io_channel(desc);
1201 	CU_ASSERT(io_ch != NULL);
1202 	channel = spdk_io_channel_get_ctx(io_ch);
1203 	mgmt_ch = channel->shared_resource->mgmt_ch;
1204 
1205 	bdev->optimal_io_boundary = 16;
1206 	bdev->split_on_optimal_io_boundary = true;
1207 
1208 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1209 	CU_ASSERT(rc == 0);
1210 
1211 	/* Now test that a single-vector command is split correctly.
1212 	 * Offset 14, length 8, payload 0xF000
1213 	 *  Child - Offset 14, length 2, payload 0xF000
1214 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1215 	 *
1216 	 * Set up the expected values before calling spdk_bdev_read_blocks
1217 	 */
1218 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1219 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1220 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1221 
1222 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1223 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1224 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1225 
1226 	/* The following children will be submitted sequentially due to the capacity of
1227 	 * spdk_bdev_io.
1228 	 */
1229 
1230 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1231 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1232 	CU_ASSERT(rc == 0);
1233 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1234 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1235 
1236 	/* Completing the first read I/O will submit the first child */
1237 	stub_complete_io(1);
1238 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1239 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1240 
1241 	/* Completing the first child will submit the second child */
1242 	stub_complete_io(1);
1243 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1244 
1245 	/* Complete the second child I/O.  This should result in our callback getting
1246 	 * invoked since the parent I/O is now complete.
1247 	 */
1248 	stub_complete_io(1);
1249 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1250 
1251 	/* Now set up a more complex, multi-vector command that needs to be split,
1252 	 *  including splitting iovecs.
1253 	 */
1254 	iov[0].iov_base = (void *)0x10000;
1255 	iov[0].iov_len = 512;
1256 	iov[1].iov_base = (void *)0x20000;
1257 	iov[1].iov_len = 20 * 512;
1258 	iov[2].iov_base = (void *)0x30000;
1259 	iov[2].iov_len = 11 * 512;
1260 
1261 	g_io_done = false;
1262 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1263 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1264 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1265 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1266 
1267 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1268 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1269 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1270 
1271 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1272 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1273 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1274 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1275 
1276 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1277 	CU_ASSERT(rc == 0);
1278 	CU_ASSERT(g_io_done == false);
1279 
1280 	/* The following children will be submitted sequentially due to the capacity of
1281 	 * spdk_bdev_io.
1282 	 */
1283 
1284 	/* Completing the first child will submit the second child */
1285 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1286 	stub_complete_io(1);
1287 	CU_ASSERT(g_io_done == false);
1288 
1289 	/* Completing the second child will submit the third child */
1290 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1291 	stub_complete_io(1);
1292 	CU_ASSERT(g_io_done == false);
1293 
1294 	/* Completing the third child will result in our callback getting invoked
1295 	 * since the parent I/O is now complete.
1296 	 */
1297 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1298 	stub_complete_io(1);
1299 	CU_ASSERT(g_io_done == true);
1300 
1301 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1302 
1303 	spdk_put_io_channel(io_ch);
1304 	spdk_bdev_close(desc);
1305 	free_bdev(bdev);
1306 	spdk_bdev_finish(bdev_fini_cb, NULL);
1307 	poll_threads();
1308 }
1309 
1310 static void
1311 bdev_io_alignment(void)
1312 {
1313 	struct spdk_bdev *bdev;
1314 	struct spdk_bdev_desc *desc;
1315 	struct spdk_io_channel *io_ch;
1316 	struct spdk_bdev_opts bdev_opts = {
1317 		.bdev_io_pool_size = 20,
1318 		.bdev_io_cache_size = 2,
1319 	};
1320 	int rc;
1321 	void *buf;
1322 	struct iovec iovs[2];
1323 	int iovcnt;
1324 	uint64_t alignment;
1325 
1326 	rc = spdk_bdev_set_opts(&bdev_opts);
1327 	CU_ASSERT(rc == 0);
1328 	spdk_bdev_initialize(bdev_init_cb, NULL);
1329 
1330 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1331 	bdev = allocate_bdev("bdev0");
1332 
1333 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1334 	CU_ASSERT(rc == 0);
1335 	CU_ASSERT(desc != NULL);
1336 	io_ch = spdk_bdev_get_io_channel(desc);
1337 	CU_ASSERT(io_ch != NULL);
1338 
1339 	/* Create aligned buffer */
1340 	rc = posix_memalign(&buf, 4096, 8192);
1341 	SPDK_CU_ASSERT_FATAL(rc == 0);
1342 
1343 	/* Pass aligned single buffer with no alignment required */
1344 	alignment = 1;
1345 	bdev->required_alignment = spdk_u32log2(alignment);
1346 
1347 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1348 	CU_ASSERT(rc == 0);
1349 	stub_complete_io(1);
1350 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1351 				    alignment));
1352 
1353 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1354 	CU_ASSERT(rc == 0);
1355 	stub_complete_io(1);
1356 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1357 				    alignment));
1358 
1359 	/* Pass unaligned single buffer with no alignment required */
1360 	alignment = 1;
1361 	bdev->required_alignment = spdk_u32log2(alignment);
1362 
1363 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1364 	CU_ASSERT(rc == 0);
1365 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1366 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1367 	stub_complete_io(1);
1368 
1369 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1370 	CU_ASSERT(rc == 0);
1371 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1372 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1373 	stub_complete_io(1);
1374 
1375 	/* Pass unaligned single buffer with 512 alignment required */
1376 	alignment = 512;
1377 	bdev->required_alignment = spdk_u32log2(alignment);
1378 
1379 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1380 	CU_ASSERT(rc == 0);
1381 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1382 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1383 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1384 				    alignment));
1385 	stub_complete_io(1);
1386 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1387 
1388 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1389 	CU_ASSERT(rc == 0);
1390 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1391 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1392 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1393 				    alignment));
1394 	stub_complete_io(1);
1395 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1396 
1397 	/* Pass unaligned single buffer with 4096 alignment required */
1398 	alignment = 4096;
1399 	bdev->required_alignment = spdk_u32log2(alignment);
1400 
1401 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1402 	CU_ASSERT(rc == 0);
1403 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1404 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1405 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1406 				    alignment));
1407 	stub_complete_io(1);
1408 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1409 
1410 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1411 	CU_ASSERT(rc == 0);
1412 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1413 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1414 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1415 				    alignment));
1416 	stub_complete_io(1);
1417 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1418 
1419 	/* Pass aligned iovs with no alignment required */
1420 	alignment = 1;
1421 	bdev->required_alignment = spdk_u32log2(alignment);
1422 
1423 	iovcnt = 1;
1424 	iovs[0].iov_base = buf;
1425 	iovs[0].iov_len = 512;
1426 
1427 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1428 	CU_ASSERT(rc == 0);
1429 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1430 	stub_complete_io(1);
1431 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1432 
1433 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1434 	CU_ASSERT(rc == 0);
1435 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1436 	stub_complete_io(1);
1437 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1438 
1439 	/* Pass unaligned iovs with no alignment required */
1440 	alignment = 1;
1441 	bdev->required_alignment = spdk_u32log2(alignment);
1442 
1443 	iovcnt = 2;
1444 	iovs[0].iov_base = buf + 16;
1445 	iovs[0].iov_len = 256;
1446 	iovs[1].iov_base = buf + 16 + 256 + 32;
1447 	iovs[1].iov_len = 256;
1448 
1449 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1450 	CU_ASSERT(rc == 0);
1451 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1452 	stub_complete_io(1);
1453 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1454 
1455 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1456 	CU_ASSERT(rc == 0);
1457 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1458 	stub_complete_io(1);
1459 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1460 
1461 	/* Pass unaligned iov with 2048 alignment required */
1462 	alignment = 2048;
1463 	bdev->required_alignment = spdk_u32log2(alignment);
1464 
1465 	iovcnt = 2;
1466 	iovs[0].iov_base = buf + 16;
1467 	iovs[0].iov_len = 256;
1468 	iovs[1].iov_base = buf + 16 + 256 + 32;
1469 	iovs[1].iov_len = 256;
1470 
1471 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1472 	CU_ASSERT(rc == 0);
1473 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1474 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1475 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1476 				    alignment));
1477 	stub_complete_io(1);
1478 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1479 
1480 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1481 	CU_ASSERT(rc == 0);
1482 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1483 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1484 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1485 				    alignment));
1486 	stub_complete_io(1);
1487 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1488 
1489 	/* Pass iov without allocated buffer without alignment required */
1490 	alignment = 1;
1491 	bdev->required_alignment = spdk_u32log2(alignment);
1492 
1493 	iovcnt = 1;
1494 	iovs[0].iov_base = NULL;
1495 	iovs[0].iov_len = 0;
1496 
1497 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1498 	CU_ASSERT(rc == 0);
1499 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1500 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1501 				    alignment));
1502 	stub_complete_io(1);
1503 
1504 	/* Pass iov without allocated buffer with 1024 alignment required */
1505 	alignment = 1024;
1506 	bdev->required_alignment = spdk_u32log2(alignment);
1507 
1508 	iovcnt = 1;
1509 	iovs[0].iov_base = NULL;
1510 	iovs[0].iov_len = 0;
1511 
1512 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1513 	CU_ASSERT(rc == 0);
1514 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1515 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1516 				    alignment));
1517 	stub_complete_io(1);
1518 
1519 	spdk_put_io_channel(io_ch);
1520 	spdk_bdev_close(desc);
1521 	free_bdev(bdev);
1522 	spdk_bdev_finish(bdev_fini_cb, NULL);
1523 	poll_threads();
1524 
1525 	free(buf);
1526 }
1527 
1528 static void
1529 histogram_status_cb(void *cb_arg, int status)
1530 {
1531 	g_status = status;
1532 }
1533 
1534 static void
1535 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1536 {
1537 	g_status = status;
1538 	g_histogram = histogram;
1539 }
1540 
1541 static void
1542 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1543 		   uint64_t total, uint64_t so_far)
1544 {
1545 	g_count += count;
1546 }
1547 
1548 static void
1549 bdev_histograms(void)
1550 {
1551 	struct spdk_bdev *bdev;
1552 	struct spdk_bdev_desc *desc;
1553 	struct spdk_io_channel *ch;
1554 	struct spdk_histogram_data *histogram;
1555 	uint8_t buf[4096];
1556 	int rc;
1557 
1558 	spdk_bdev_initialize(bdev_init_cb, NULL);
1559 
1560 	bdev = allocate_bdev("bdev");
1561 
1562 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1563 	CU_ASSERT(rc == 0);
1564 	CU_ASSERT(desc != NULL);
1565 
1566 	ch = spdk_bdev_get_io_channel(desc);
1567 	CU_ASSERT(ch != NULL);
1568 
1569 	/* Enable histogram */
1570 	g_status = -1;
1571 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1572 	poll_threads();
1573 	CU_ASSERT(g_status == 0);
1574 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1575 
1576 	/* Allocate histogram */
1577 	histogram = spdk_histogram_data_alloc();
1578 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1579 
1580 	/* Check if histogram is zeroed */
1581 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1582 	poll_threads();
1583 	CU_ASSERT(g_status == 0);
1584 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1585 
1586 	g_count = 0;
1587 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1588 
1589 	CU_ASSERT(g_count == 0);
1590 
1591 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1592 	CU_ASSERT(rc == 0);
1593 
1594 	spdk_delay_us(10);
1595 	stub_complete_io(1);
1596 	poll_threads();
1597 
1598 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1599 	CU_ASSERT(rc == 0);
1600 
1601 	spdk_delay_us(10);
1602 	stub_complete_io(1);
1603 	poll_threads();
1604 
1605 	/* Check if histogram gathered data from all I/O channels */
1606 	g_histogram = NULL;
1607 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1608 	poll_threads();
1609 	CU_ASSERT(g_status == 0);
1610 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1611 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1612 
1613 	g_count = 0;
1614 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1615 	CU_ASSERT(g_count == 2);
1616 
1617 	/* Disable histogram */
1618 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1619 	poll_threads();
1620 	CU_ASSERT(g_status == 0);
1621 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1622 
1623 	/* Try to run histogram commands on disabled bdev */
1624 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1625 	poll_threads();
1626 	CU_ASSERT(g_status == -EFAULT);
1627 
1628 	spdk_histogram_data_free(g_histogram);
1629 	spdk_put_io_channel(ch);
1630 	spdk_bdev_close(desc);
1631 	free_bdev(bdev);
1632 	spdk_bdev_finish(bdev_fini_cb, NULL);
1633 	poll_threads();
1634 }
1635 
1636 int
1637 main(int argc, char **argv)
1638 {
1639 	CU_pSuite		suite = NULL;
1640 	unsigned int		num_failures;
1641 
1642 	if (CU_initialize_registry() != CUE_SUCCESS) {
1643 		return CU_get_error();
1644 	}
1645 
1646 	suite = CU_add_suite("bdev", null_init, null_clean);
1647 	if (suite == NULL) {
1648 		CU_cleanup_registry();
1649 		return CU_get_error();
1650 	}
1651 
1652 	if (
1653 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1654 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1655 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1656 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1657 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1658 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1659 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1660 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1661 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1662 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1663 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
1664 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL
1665 	) {
1666 		CU_cleanup_registry();
1667 		return CU_get_error();
1668 	}
1669 
1670 	allocate_threads(1);
1671 	set_thread(0);
1672 
1673 	CU_basic_set_mode(CU_BRM_VERBOSE);
1674 	CU_basic_run_tests();
1675 	num_failures = CU_get_number_of_failures();
1676 	CU_cleanup_registry();
1677 
1678 	free_threads();
1679 
1680 	return num_failures;
1681 }
1682