xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision 275f0c341cba262ebf07aa708874456f74518811)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_type, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
63 
64 
65 int g_status;
66 int g_count;
67 struct spdk_histogram_data *g_histogram;
68 
69 void
70 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
71 			 int *sc, int *sk, int *asc, int *ascq)
72 {
73 }
74 
75 static int
76 null_init(void)
77 {
78 	return 0;
79 }
80 
81 static int
82 null_clean(void)
83 {
84 	return 0;
85 }
86 
87 static int
88 stub_destruct(void *ctx)
89 {
90 	return 0;
91 }
92 
93 struct ut_expected_io {
94 	uint8_t				type;
95 	uint64_t			offset;
96 	uint64_t			length;
97 	int				iovcnt;
98 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
99 	TAILQ_ENTRY(ut_expected_io)	link;
100 };
101 
102 struct bdev_ut_channel {
103 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
104 	uint32_t			outstanding_io_count;
105 	TAILQ_HEAD(, ut_expected_io)	expected_io;
106 };
107 
108 static bool g_io_done;
109 static struct spdk_bdev_io *g_bdev_io;
110 static enum spdk_bdev_io_status g_io_status;
111 static uint32_t g_bdev_ut_io_device;
112 static struct bdev_ut_channel *g_bdev_ut_channel;
113 
114 static struct ut_expected_io *
115 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
116 {
117 	struct ut_expected_io *expected_io;
118 
119 	expected_io = calloc(1, sizeof(*expected_io));
120 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
121 
122 	expected_io->type = type;
123 	expected_io->offset = offset;
124 	expected_io->length = length;
125 	expected_io->iovcnt = iovcnt;
126 
127 	return expected_io;
128 }
129 
130 static void
131 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
132 {
133 	expected_io->iov[pos].iov_base = base;
134 	expected_io->iov[pos].iov_len = len;
135 }
136 
137 static void
138 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
139 {
140 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
141 	struct ut_expected_io *expected_io;
142 	struct iovec *iov, *expected_iov;
143 	int i;
144 
145 	g_bdev_io = bdev_io;
146 
147 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
148 	ch->outstanding_io_count++;
149 
150 	expected_io = TAILQ_FIRST(&ch->expected_io);
151 	if (expected_io == NULL) {
152 		return;
153 	}
154 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
155 
156 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
157 		CU_ASSERT(bdev_io->type == expected_io->type);
158 	}
159 
160 	if (expected_io->length == 0) {
161 		free(expected_io);
162 		return;
163 	}
164 
165 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
166 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
167 
168 	if (expected_io->iovcnt == 0) {
169 		free(expected_io);
170 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
171 		return;
172 	}
173 
174 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
175 	for (i = 0; i < expected_io->iovcnt; i++) {
176 		iov = &bdev_io->u.bdev.iovs[i];
177 		expected_iov = &expected_io->iov[i];
178 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
179 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
180 	}
181 
182 	free(expected_io);
183 }
184 
185 static void
186 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
187 				      struct spdk_bdev_io *bdev_io, bool success)
188 {
189 	CU_ASSERT(success == true);
190 
191 	stub_submit_request(_ch, bdev_io);
192 }
193 
194 static void
195 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
196 {
197 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
198 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
199 }
200 
201 static uint32_t
202 stub_complete_io(uint32_t num_to_complete)
203 {
204 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
205 	struct spdk_bdev_io *bdev_io;
206 	uint32_t num_completed = 0;
207 
208 	while (num_completed < num_to_complete) {
209 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
210 			break;
211 		}
212 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
213 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
214 		ch->outstanding_io_count--;
215 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
216 		num_completed++;
217 	}
218 
219 	return num_completed;
220 }
221 
222 static struct spdk_io_channel *
223 bdev_ut_get_io_channel(void *ctx)
224 {
225 	return spdk_get_io_channel(&g_bdev_ut_io_device);
226 }
227 
228 static bool
229 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
230 {
231 	return true;
232 }
233 
234 static struct spdk_bdev_fn_table fn_table = {
235 	.destruct = stub_destruct,
236 	.submit_request = stub_submit_request,
237 	.get_io_channel = bdev_ut_get_io_channel,
238 	.io_type_supported = stub_io_type_supported,
239 };
240 
241 static int
242 bdev_ut_create_ch(void *io_device, void *ctx_buf)
243 {
244 	struct bdev_ut_channel *ch = ctx_buf;
245 
246 	CU_ASSERT(g_bdev_ut_channel == NULL);
247 	g_bdev_ut_channel = ch;
248 
249 	TAILQ_INIT(&ch->outstanding_io);
250 	ch->outstanding_io_count = 0;
251 	TAILQ_INIT(&ch->expected_io);
252 	return 0;
253 }
254 
255 static void
256 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
257 {
258 	CU_ASSERT(g_bdev_ut_channel != NULL);
259 	g_bdev_ut_channel = NULL;
260 }
261 
262 struct spdk_bdev_module bdev_ut_if;
263 
264 static int
265 bdev_ut_module_init(void)
266 {
267 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
268 				sizeof(struct bdev_ut_channel), NULL);
269 	spdk_bdev_module_init_done(&bdev_ut_if);
270 	return 0;
271 }
272 
273 static void
274 bdev_ut_module_fini(void)
275 {
276 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
277 }
278 
279 struct spdk_bdev_module bdev_ut_if = {
280 	.name = "bdev_ut",
281 	.module_init = bdev_ut_module_init,
282 	.module_fini = bdev_ut_module_fini,
283 	.async_init = true,
284 };
285 
286 static void vbdev_ut_examine(struct spdk_bdev *bdev);
287 
288 static int
289 vbdev_ut_module_init(void)
290 {
291 	return 0;
292 }
293 
294 static void
295 vbdev_ut_module_fini(void)
296 {
297 }
298 
299 struct spdk_bdev_module vbdev_ut_if = {
300 	.name = "vbdev_ut",
301 	.module_init = vbdev_ut_module_init,
302 	.module_fini = vbdev_ut_module_fini,
303 	.examine_config = vbdev_ut_examine,
304 };
305 
306 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
307 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
308 
309 static void
310 vbdev_ut_examine(struct spdk_bdev *bdev)
311 {
312 	spdk_bdev_module_examine_done(&vbdev_ut_if);
313 }
314 
315 static struct spdk_bdev *
316 allocate_bdev(char *name)
317 {
318 	struct spdk_bdev *bdev;
319 	int rc;
320 
321 	bdev = calloc(1, sizeof(*bdev));
322 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
323 
324 	bdev->name = name;
325 	bdev->fn_table = &fn_table;
326 	bdev->module = &bdev_ut_if;
327 	bdev->blockcnt = 1024;
328 	bdev->blocklen = 512;
329 
330 	rc = spdk_bdev_register(bdev);
331 	CU_ASSERT(rc == 0);
332 
333 	return bdev;
334 }
335 
336 static struct spdk_bdev *
337 allocate_vbdev(char *name)
338 {
339 	struct spdk_bdev *bdev;
340 	int rc;
341 
342 	bdev = calloc(1, sizeof(*bdev));
343 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
344 
345 	bdev->name = name;
346 	bdev->fn_table = &fn_table;
347 	bdev->module = &vbdev_ut_if;
348 
349 	rc = spdk_bdev_register(bdev);
350 	CU_ASSERT(rc == 0);
351 
352 	return bdev;
353 }
354 
355 static void
356 free_bdev(struct spdk_bdev *bdev)
357 {
358 	spdk_bdev_unregister(bdev, NULL, NULL);
359 	poll_threads();
360 	memset(bdev, 0xFF, sizeof(*bdev));
361 	free(bdev);
362 }
363 
364 static void
365 free_vbdev(struct spdk_bdev *bdev)
366 {
367 	spdk_bdev_unregister(bdev, NULL, NULL);
368 	poll_threads();
369 	memset(bdev, 0xFF, sizeof(*bdev));
370 	free(bdev);
371 }
372 
373 static void
374 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
375 {
376 	const char *bdev_name;
377 
378 	CU_ASSERT(bdev != NULL);
379 	CU_ASSERT(rc == 0);
380 	bdev_name = spdk_bdev_get_name(bdev);
381 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
382 
383 	free(stat);
384 	free_bdev(bdev);
385 
386 	*(bool *)cb_arg = true;
387 }
388 
389 static void
390 get_device_stat_test(void)
391 {
392 	struct spdk_bdev *bdev;
393 	struct spdk_bdev_io_stat *stat;
394 	bool done;
395 
396 	bdev = allocate_bdev("bdev0");
397 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
398 	if (stat == NULL) {
399 		free_bdev(bdev);
400 		return;
401 	}
402 
403 	done = false;
404 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
405 	while (!done) { poll_threads(); }
406 
407 
408 }
409 
410 static void
411 open_write_test(void)
412 {
413 	struct spdk_bdev *bdev[9];
414 	struct spdk_bdev_desc *desc[9] = {};
415 	int rc;
416 
417 	/*
418 	 * Create a tree of bdevs to test various open w/ write cases.
419 	 *
420 	 * bdev0 through bdev3 are physical block devices, such as NVMe
421 	 * namespaces or Ceph block devices.
422 	 *
423 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
424 	 * caching or RAID use cases.
425 	 *
426 	 * bdev5 through bdev7 are all virtual bdevs with the same base
427 	 * bdev (except bdev7). This models partitioning or logical volume
428 	 * use cases.
429 	 *
430 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
431 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
432 	 * models caching, RAID, partitioning or logical volumes use cases.
433 	 *
434 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
435 	 * base bdevs are themselves virtual bdevs.
436 	 *
437 	 *                bdev8
438 	 *                  |
439 	 *            +----------+
440 	 *            |          |
441 	 *          bdev4      bdev5   bdev6   bdev7
442 	 *            |          |       |       |
443 	 *        +---+---+      +---+   +   +---+---+
444 	 *        |       |           \  |  /         \
445 	 *      bdev0   bdev1          bdev2         bdev3
446 	 */
447 
448 	bdev[0] = allocate_bdev("bdev0");
449 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
450 	CU_ASSERT(rc == 0);
451 
452 	bdev[1] = allocate_bdev("bdev1");
453 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
454 	CU_ASSERT(rc == 0);
455 
456 	bdev[2] = allocate_bdev("bdev2");
457 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
458 	CU_ASSERT(rc == 0);
459 
460 	bdev[3] = allocate_bdev("bdev3");
461 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
462 	CU_ASSERT(rc == 0);
463 
464 	bdev[4] = allocate_vbdev("bdev4");
465 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
466 	CU_ASSERT(rc == 0);
467 
468 	bdev[5] = allocate_vbdev("bdev5");
469 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
470 	CU_ASSERT(rc == 0);
471 
472 	bdev[6] = allocate_vbdev("bdev6");
473 
474 	bdev[7] = allocate_vbdev("bdev7");
475 
476 	bdev[8] = allocate_vbdev("bdev8");
477 
478 	/* Open bdev0 read-only.  This should succeed. */
479 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
480 	CU_ASSERT(rc == 0);
481 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
482 	spdk_bdev_close(desc[0]);
483 
484 	/*
485 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
486 	 * by a vbdev module.
487 	 */
488 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
489 	CU_ASSERT(rc == -EPERM);
490 
491 	/*
492 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
493 	 * by a vbdev module.
494 	 */
495 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
496 	CU_ASSERT(rc == -EPERM);
497 
498 	/* Open bdev4 read-only.  This should succeed. */
499 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
500 	CU_ASSERT(rc == 0);
501 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
502 	spdk_bdev_close(desc[4]);
503 
504 	/*
505 	 * Open bdev8 read/write.  This should succeed since it is a leaf
506 	 * bdev.
507 	 */
508 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
509 	CU_ASSERT(rc == 0);
510 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
511 	spdk_bdev_close(desc[8]);
512 
513 	/*
514 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
515 	 * by a vbdev module.
516 	 */
517 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
518 	CU_ASSERT(rc == -EPERM);
519 
520 	/* Open bdev4 read-only.  This should succeed. */
521 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
522 	CU_ASSERT(rc == 0);
523 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
524 	spdk_bdev_close(desc[5]);
525 
526 	free_vbdev(bdev[8]);
527 
528 	free_vbdev(bdev[5]);
529 	free_vbdev(bdev[6]);
530 	free_vbdev(bdev[7]);
531 
532 	free_vbdev(bdev[4]);
533 
534 	free_bdev(bdev[0]);
535 	free_bdev(bdev[1]);
536 	free_bdev(bdev[2]);
537 	free_bdev(bdev[3]);
538 }
539 
540 static void
541 bytes_to_blocks_test(void)
542 {
543 	struct spdk_bdev bdev;
544 	uint64_t offset_blocks, num_blocks;
545 
546 	memset(&bdev, 0, sizeof(bdev));
547 
548 	bdev.blocklen = 512;
549 
550 	/* All parameters valid */
551 	offset_blocks = 0;
552 	num_blocks = 0;
553 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
554 	CU_ASSERT(offset_blocks == 1);
555 	CU_ASSERT(num_blocks == 2);
556 
557 	/* Offset not a block multiple */
558 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
559 
560 	/* Length not a block multiple */
561 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
562 
563 	/* In case blocklen not the power of two */
564 	bdev.blocklen = 100;
565 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
566 	CU_ASSERT(offset_blocks == 1);
567 	CU_ASSERT(num_blocks == 2);
568 
569 	/* Offset not a block multiple */
570 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
571 
572 	/* Length not a block multiple */
573 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
574 }
575 
576 static void
577 num_blocks_test(void)
578 {
579 	struct spdk_bdev bdev;
580 	struct spdk_bdev_desc *desc = NULL;
581 	int rc;
582 
583 	memset(&bdev, 0, sizeof(bdev));
584 	bdev.name = "num_blocks";
585 	bdev.fn_table = &fn_table;
586 	bdev.module = &bdev_ut_if;
587 	spdk_bdev_register(&bdev);
588 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
589 
590 	/* Growing block number */
591 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
592 	/* Shrinking block number */
593 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
594 
595 	/* In case bdev opened */
596 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
597 	CU_ASSERT(rc == 0);
598 	SPDK_CU_ASSERT_FATAL(desc != NULL);
599 
600 	/* Growing block number */
601 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
602 	/* Shrinking block number */
603 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
604 
605 	spdk_bdev_close(desc);
606 	spdk_bdev_unregister(&bdev, NULL, NULL);
607 
608 	poll_threads();
609 }
610 
611 static void
612 io_valid_test(void)
613 {
614 	struct spdk_bdev bdev;
615 
616 	memset(&bdev, 0, sizeof(bdev));
617 
618 	bdev.blocklen = 512;
619 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
620 
621 	/* All parameters valid */
622 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
623 
624 	/* Last valid block */
625 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
626 
627 	/* Offset past end of bdev */
628 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
629 
630 	/* Offset + length past end of bdev */
631 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
632 
633 	/* Offset near end of uint64_t range (2^64 - 1) */
634 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
635 }
636 
637 static void
638 alias_add_del_test(void)
639 {
640 	struct spdk_bdev *bdev[3];
641 	int rc;
642 
643 	/* Creating and registering bdevs */
644 	bdev[0] = allocate_bdev("bdev0");
645 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
646 
647 	bdev[1] = allocate_bdev("bdev1");
648 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
649 
650 	bdev[2] = allocate_bdev("bdev2");
651 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
652 
653 	poll_threads();
654 
655 	/*
656 	 * Trying adding an alias identical to name.
657 	 * Alias is identical to name, so it can not be added to aliases list
658 	 */
659 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
660 	CU_ASSERT(rc == -EEXIST);
661 
662 	/*
663 	 * Trying to add empty alias,
664 	 * this one should fail
665 	 */
666 	rc = spdk_bdev_alias_add(bdev[0], NULL);
667 	CU_ASSERT(rc == -EINVAL);
668 
669 	/* Trying adding same alias to two different registered bdevs */
670 
671 	/* Alias is used first time, so this one should pass */
672 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
673 	CU_ASSERT(rc == 0);
674 
675 	/* Alias was added to another bdev, so this one should fail */
676 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
677 	CU_ASSERT(rc == -EEXIST);
678 
679 	/* Alias is used first time, so this one should pass */
680 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
681 	CU_ASSERT(rc == 0);
682 
683 	/* Trying removing an alias from registered bdevs */
684 
685 	/* Alias is not on a bdev aliases list, so this one should fail */
686 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
687 	CU_ASSERT(rc == -ENOENT);
688 
689 	/* Alias is present on a bdev aliases list, so this one should pass */
690 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
691 	CU_ASSERT(rc == 0);
692 
693 	/* Alias is present on a bdev aliases list, so this one should pass */
694 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
695 	CU_ASSERT(rc == 0);
696 
697 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
698 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
699 	CU_ASSERT(rc != 0);
700 
701 	/* Trying to del all alias from empty alias list */
702 	spdk_bdev_alias_del_all(bdev[2]);
703 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
704 
705 	/* Trying to del all alias from non-empty alias list */
706 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
707 	CU_ASSERT(rc == 0);
708 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
709 	CU_ASSERT(rc == 0);
710 	spdk_bdev_alias_del_all(bdev[2]);
711 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
712 
713 	/* Unregister and free bdevs */
714 	spdk_bdev_unregister(bdev[0], NULL, NULL);
715 	spdk_bdev_unregister(bdev[1], NULL, NULL);
716 	spdk_bdev_unregister(bdev[2], NULL, NULL);
717 
718 	poll_threads();
719 
720 	free(bdev[0]);
721 	free(bdev[1]);
722 	free(bdev[2]);
723 }
724 
725 static void
726 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
727 {
728 	g_io_done = true;
729 	g_io_status = bdev_io->internal.status;
730 	spdk_bdev_free_io(bdev_io);
731 }
732 
733 static void
734 bdev_init_cb(void *arg, int rc)
735 {
736 	CU_ASSERT(rc == 0);
737 }
738 
739 static void
740 bdev_fini_cb(void *arg)
741 {
742 }
743 
744 struct bdev_ut_io_wait_entry {
745 	struct spdk_bdev_io_wait_entry	entry;
746 	struct spdk_io_channel		*io_ch;
747 	struct spdk_bdev_desc		*desc;
748 	bool				submitted;
749 };
750 
751 static void
752 io_wait_cb(void *arg)
753 {
754 	struct bdev_ut_io_wait_entry *entry = arg;
755 	int rc;
756 
757 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
758 	CU_ASSERT(rc == 0);
759 	entry->submitted = true;
760 }
761 
762 static void
763 bdev_io_wait_test(void)
764 {
765 	struct spdk_bdev *bdev;
766 	struct spdk_bdev_desc *desc = NULL;
767 	struct spdk_io_channel *io_ch;
768 	struct spdk_bdev_opts bdev_opts = {
769 		.bdev_io_pool_size = 4,
770 		.bdev_io_cache_size = 2,
771 	};
772 	struct bdev_ut_io_wait_entry io_wait_entry;
773 	struct bdev_ut_io_wait_entry io_wait_entry2;
774 	int rc;
775 
776 	rc = spdk_bdev_set_opts(&bdev_opts);
777 	CU_ASSERT(rc == 0);
778 	spdk_bdev_initialize(bdev_init_cb, NULL);
779 	poll_threads();
780 
781 	bdev = allocate_bdev("bdev0");
782 
783 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
784 	CU_ASSERT(rc == 0);
785 	poll_threads();
786 	SPDK_CU_ASSERT_FATAL(desc != NULL);
787 	io_ch = spdk_bdev_get_io_channel(desc);
788 	CU_ASSERT(io_ch != NULL);
789 
790 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
791 	CU_ASSERT(rc == 0);
792 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
793 	CU_ASSERT(rc == 0);
794 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
795 	CU_ASSERT(rc == 0);
796 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
797 	CU_ASSERT(rc == 0);
798 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
799 
800 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
801 	CU_ASSERT(rc == -ENOMEM);
802 
803 	io_wait_entry.entry.bdev = bdev;
804 	io_wait_entry.entry.cb_fn = io_wait_cb;
805 	io_wait_entry.entry.cb_arg = &io_wait_entry;
806 	io_wait_entry.io_ch = io_ch;
807 	io_wait_entry.desc = desc;
808 	io_wait_entry.submitted = false;
809 	/* Cannot use the same io_wait_entry for two different calls. */
810 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
811 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
812 
813 	/* Queue two I/O waits. */
814 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
815 	CU_ASSERT(rc == 0);
816 	CU_ASSERT(io_wait_entry.submitted == false);
817 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
818 	CU_ASSERT(rc == 0);
819 	CU_ASSERT(io_wait_entry2.submitted == false);
820 
821 	stub_complete_io(1);
822 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
823 	CU_ASSERT(io_wait_entry.submitted == true);
824 	CU_ASSERT(io_wait_entry2.submitted == false);
825 
826 	stub_complete_io(1);
827 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
828 	CU_ASSERT(io_wait_entry2.submitted == true);
829 
830 	stub_complete_io(4);
831 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
832 
833 	spdk_put_io_channel(io_ch);
834 	spdk_bdev_close(desc);
835 	free_bdev(bdev);
836 	spdk_bdev_finish(bdev_fini_cb, NULL);
837 	poll_threads();
838 }
839 
840 static void
841 bdev_io_spans_boundary_test(void)
842 {
843 	struct spdk_bdev bdev;
844 	struct spdk_bdev_io bdev_io;
845 
846 	memset(&bdev, 0, sizeof(bdev));
847 
848 	bdev.optimal_io_boundary = 0;
849 	bdev_io.bdev = &bdev;
850 
851 	/* bdev has no optimal_io_boundary set - so this should return false. */
852 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
853 
854 	bdev.optimal_io_boundary = 32;
855 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
856 
857 	/* RESETs are not based on LBAs - so this should return false. */
858 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
859 
860 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
861 	bdev_io.u.bdev.offset_blocks = 0;
862 	bdev_io.u.bdev.num_blocks = 32;
863 
864 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
865 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
866 
867 	bdev_io.u.bdev.num_blocks = 33;
868 
869 	/* This I/O spans a boundary. */
870 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
871 }
872 
873 static void
874 bdev_io_split(void)
875 {
876 	struct spdk_bdev *bdev;
877 	struct spdk_bdev_desc *desc = NULL;
878 	struct spdk_io_channel *io_ch;
879 	struct spdk_bdev_opts bdev_opts = {
880 		.bdev_io_pool_size = 512,
881 		.bdev_io_cache_size = 64,
882 	};
883 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
884 	struct ut_expected_io *expected_io;
885 	uint64_t i;
886 	int rc;
887 
888 	rc = spdk_bdev_set_opts(&bdev_opts);
889 	CU_ASSERT(rc == 0);
890 	spdk_bdev_initialize(bdev_init_cb, NULL);
891 
892 	bdev = allocate_bdev("bdev0");
893 
894 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
895 	CU_ASSERT(rc == 0);
896 	SPDK_CU_ASSERT_FATAL(desc != NULL);
897 	io_ch = spdk_bdev_get_io_channel(desc);
898 	CU_ASSERT(io_ch != NULL);
899 
900 	bdev->optimal_io_boundary = 16;
901 	bdev->split_on_optimal_io_boundary = false;
902 
903 	g_io_done = false;
904 
905 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
906 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
907 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
908 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
909 
910 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
911 	CU_ASSERT(rc == 0);
912 	CU_ASSERT(g_io_done == false);
913 
914 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
915 	stub_complete_io(1);
916 	CU_ASSERT(g_io_done == true);
917 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
918 
919 	bdev->split_on_optimal_io_boundary = true;
920 
921 	/* Now test that a single-vector command is split correctly.
922 	 * Offset 14, length 8, payload 0xF000
923 	 *  Child - Offset 14, length 2, payload 0xF000
924 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
925 	 *
926 	 * Set up the expected values before calling spdk_bdev_read_blocks
927 	 */
928 	g_io_done = false;
929 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
930 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
931 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
932 
933 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
934 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
935 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
936 
937 	/* spdk_bdev_read_blocks will submit the first child immediately. */
938 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
939 	CU_ASSERT(rc == 0);
940 	CU_ASSERT(g_io_done == false);
941 
942 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
943 	stub_complete_io(2);
944 	CU_ASSERT(g_io_done == true);
945 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
946 
947 	/* Now set up a more complex, multi-vector command that needs to be split,
948 	 *  including splitting iovecs.
949 	 */
950 	iov[0].iov_base = (void *)0x10000;
951 	iov[0].iov_len = 512;
952 	iov[1].iov_base = (void *)0x20000;
953 	iov[1].iov_len = 20 * 512;
954 	iov[2].iov_base = (void *)0x30000;
955 	iov[2].iov_len = 11 * 512;
956 
957 	g_io_done = false;
958 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
959 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
960 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
961 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
962 
963 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
964 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
965 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
966 
967 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
968 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
969 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
970 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
971 
972 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
973 	CU_ASSERT(rc == 0);
974 	CU_ASSERT(g_io_done == false);
975 
976 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
977 	stub_complete_io(3);
978 	CU_ASSERT(g_io_done == true);
979 
980 	/* Test multi vector command that needs to be split by strip and then needs to be
981 	 * split further due to the capacity of child iovs.
982 	 */
983 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
984 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
985 		iov[i].iov_len = 512;
986 	}
987 
988 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
989 	g_io_done = false;
990 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
991 					   BDEV_IO_NUM_CHILD_IOV);
992 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
993 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
994 	}
995 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
996 
997 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
998 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
999 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1000 		ut_expected_io_set_iov(expected_io, i,
1001 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1002 	}
1003 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1004 
1005 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1006 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1007 	CU_ASSERT(rc == 0);
1008 	CU_ASSERT(g_io_done == false);
1009 
1010 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1011 	stub_complete_io(1);
1012 	CU_ASSERT(g_io_done == false);
1013 
1014 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1015 	stub_complete_io(1);
1016 	CU_ASSERT(g_io_done == true);
1017 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1018 
1019 	/* Test multi vector command that needs to be split by strip and then needs to be
1020 	 * split further due to the capacity of child iovs. In this case, the length of
1021 	 * the rest of iovec array with an I/O boundary is the multiple of block size.
1022 	 */
1023 
1024 	/* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1025 	 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1026 	 */
1027 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1028 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1029 		iov[i].iov_len = 512;
1030 	}
1031 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1032 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1033 		iov[i].iov_len = 256;
1034 	}
1035 	iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1036 	iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1037 
1038 	/* Add an extra iovec to trigger split */
1039 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1040 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1041 
1042 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1043 	g_io_done = false;
1044 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1045 					   BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV);
1046 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1047 		ut_expected_io_set_iov(expected_io, i,
1048 				       (void *)((i + 1) * 0x10000), 512);
1049 	}
1050 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1051 		ut_expected_io_set_iov(expected_io, i,
1052 				       (void *)((i + 1) * 0x10000), 256);
1053 	}
1054 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1055 
1056 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1057 					   1, 1);
1058 	ut_expected_io_set_iov(expected_io, 0,
1059 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1060 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1061 
1062 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1063 					   1, 1);
1064 	ut_expected_io_set_iov(expected_io, 0,
1065 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1066 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1067 
1068 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0,
1069 				    BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1070 	CU_ASSERT(rc == 0);
1071 	CU_ASSERT(g_io_done == false);
1072 
1073 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1074 	stub_complete_io(1);
1075 	CU_ASSERT(g_io_done == false);
1076 
1077 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1078 	stub_complete_io(2);
1079 	CU_ASSERT(g_io_done == true);
1080 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1081 
1082 	/* Test multi vector command that needs to be split by strip and then needs to be
1083 	 * split further due to the capacity of child iovs, but fails to split. The cause
1084 	 * of failure of split is that the length of an iovec is not multiple of block size.
1085 	 */
1086 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1087 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1088 		iov[i].iov_len = 512;
1089 	}
1090 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1091 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1092 
1093 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1094 	g_io_done = false;
1095 	g_io_status = 0;
1096 
1097 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1098 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1099 	CU_ASSERT(rc == 0);
1100 	CU_ASSERT(g_io_done == true);
1101 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1102 
1103 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1104 	 * split, so test that.
1105 	 */
1106 	bdev->optimal_io_boundary = 15;
1107 	g_io_done = false;
1108 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1109 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1110 
1111 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1112 	CU_ASSERT(rc == 0);
1113 	CU_ASSERT(g_io_done == false);
1114 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1115 	stub_complete_io(1);
1116 	CU_ASSERT(g_io_done == true);
1117 
1118 	/* Test an UNMAP.  This should also not be split. */
1119 	bdev->optimal_io_boundary = 16;
1120 	g_io_done = false;
1121 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1122 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1123 
1124 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1125 	CU_ASSERT(rc == 0);
1126 	CU_ASSERT(g_io_done == false);
1127 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1128 	stub_complete_io(1);
1129 	CU_ASSERT(g_io_done == true);
1130 
1131 	/* Test a FLUSH.  This should also not be split. */
1132 	bdev->optimal_io_boundary = 16;
1133 	g_io_done = false;
1134 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1135 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1136 
1137 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1138 	CU_ASSERT(rc == 0);
1139 	CU_ASSERT(g_io_done == false);
1140 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1141 	stub_complete_io(1);
1142 	CU_ASSERT(g_io_done == true);
1143 
1144 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1145 
1146 	spdk_put_io_channel(io_ch);
1147 	spdk_bdev_close(desc);
1148 	free_bdev(bdev);
1149 	spdk_bdev_finish(bdev_fini_cb, NULL);
1150 	poll_threads();
1151 }
1152 
1153 static void
1154 bdev_io_split_with_io_wait(void)
1155 {
1156 	struct spdk_bdev *bdev;
1157 	struct spdk_bdev_desc *desc;
1158 	struct spdk_io_channel *io_ch;
1159 	struct spdk_bdev_channel *channel;
1160 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1161 	struct spdk_bdev_opts bdev_opts = {
1162 		.bdev_io_pool_size = 2,
1163 		.bdev_io_cache_size = 1,
1164 	};
1165 	struct iovec iov[3];
1166 	struct ut_expected_io *expected_io;
1167 	int rc;
1168 
1169 	rc = spdk_bdev_set_opts(&bdev_opts);
1170 	CU_ASSERT(rc == 0);
1171 	spdk_bdev_initialize(bdev_init_cb, NULL);
1172 
1173 	bdev = allocate_bdev("bdev0");
1174 
1175 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1176 	CU_ASSERT(rc == 0);
1177 	CU_ASSERT(desc != NULL);
1178 	io_ch = spdk_bdev_get_io_channel(desc);
1179 	CU_ASSERT(io_ch != NULL);
1180 	channel = spdk_io_channel_get_ctx(io_ch);
1181 	mgmt_ch = channel->shared_resource->mgmt_ch;
1182 
1183 	bdev->optimal_io_boundary = 16;
1184 	bdev->split_on_optimal_io_boundary = true;
1185 
1186 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1187 	CU_ASSERT(rc == 0);
1188 
1189 	/* Now test that a single-vector command is split correctly.
1190 	 * Offset 14, length 8, payload 0xF000
1191 	 *  Child - Offset 14, length 2, payload 0xF000
1192 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1193 	 *
1194 	 * Set up the expected values before calling spdk_bdev_read_blocks
1195 	 */
1196 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1197 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1198 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1199 
1200 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1201 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1202 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1203 
1204 	/* The following children will be submitted sequentially due to the capacity of
1205 	 * spdk_bdev_io.
1206 	 */
1207 
1208 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1209 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1210 	CU_ASSERT(rc == 0);
1211 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1212 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1213 
1214 	/* Completing the first read I/O will submit the first child */
1215 	stub_complete_io(1);
1216 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1217 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1218 
1219 	/* Completing the first child will submit the second child */
1220 	stub_complete_io(1);
1221 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1222 
1223 	/* Complete the second child I/O.  This should result in our callback getting
1224 	 * invoked since the parent I/O is now complete.
1225 	 */
1226 	stub_complete_io(1);
1227 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1228 
1229 	/* Now set up a more complex, multi-vector command that needs to be split,
1230 	 *  including splitting iovecs.
1231 	 */
1232 	iov[0].iov_base = (void *)0x10000;
1233 	iov[0].iov_len = 512;
1234 	iov[1].iov_base = (void *)0x20000;
1235 	iov[1].iov_len = 20 * 512;
1236 	iov[2].iov_base = (void *)0x30000;
1237 	iov[2].iov_len = 11 * 512;
1238 
1239 	g_io_done = false;
1240 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1241 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1242 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1243 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1244 
1245 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1246 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1247 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1248 
1249 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1250 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1251 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1252 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1253 
1254 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1255 	CU_ASSERT(rc == 0);
1256 	CU_ASSERT(g_io_done == false);
1257 
1258 	/* The following children will be submitted sequentially due to the capacity of
1259 	 * spdk_bdev_io.
1260 	 */
1261 
1262 	/* Completing the first child will submit the second child */
1263 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1264 	stub_complete_io(1);
1265 	CU_ASSERT(g_io_done == false);
1266 
1267 	/* Completing the second child will submit the third child */
1268 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1269 	stub_complete_io(1);
1270 	CU_ASSERT(g_io_done == false);
1271 
1272 	/* Completing the third child will result in our callback getting invoked
1273 	 * since the parent I/O is now complete.
1274 	 */
1275 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1276 	stub_complete_io(1);
1277 	CU_ASSERT(g_io_done == true);
1278 
1279 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1280 
1281 	spdk_put_io_channel(io_ch);
1282 	spdk_bdev_close(desc);
1283 	free_bdev(bdev);
1284 	spdk_bdev_finish(bdev_fini_cb, NULL);
1285 	poll_threads();
1286 }
1287 
1288 static void
1289 bdev_io_alignment(void)
1290 {
1291 	struct spdk_bdev *bdev;
1292 	struct spdk_bdev_desc *desc;
1293 	struct spdk_io_channel *io_ch;
1294 	struct spdk_bdev_opts bdev_opts = {
1295 		.bdev_io_pool_size = 20,
1296 		.bdev_io_cache_size = 2,
1297 	};
1298 	int rc;
1299 	void *buf;
1300 	struct iovec iovs[2];
1301 	int iovcnt;
1302 	uint64_t alignment;
1303 
1304 	rc = spdk_bdev_set_opts(&bdev_opts);
1305 	CU_ASSERT(rc == 0);
1306 	spdk_bdev_initialize(bdev_init_cb, NULL);
1307 
1308 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1309 	bdev = allocate_bdev("bdev0");
1310 
1311 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1312 	CU_ASSERT(rc == 0);
1313 	CU_ASSERT(desc != NULL);
1314 	io_ch = spdk_bdev_get_io_channel(desc);
1315 	CU_ASSERT(io_ch != NULL);
1316 
1317 	/* Create aligned buffer */
1318 	rc = posix_memalign(&buf, 4096, 8192);
1319 	SPDK_CU_ASSERT_FATAL(rc == 0);
1320 
1321 	/* Pass aligned single buffer with no alignment required */
1322 	alignment = 1;
1323 	bdev->required_alignment = spdk_u32log2(alignment);
1324 
1325 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1326 	CU_ASSERT(rc == 0);
1327 	stub_complete_io(1);
1328 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1329 				    alignment));
1330 
1331 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1332 	CU_ASSERT(rc == 0);
1333 	stub_complete_io(1);
1334 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1335 				    alignment));
1336 
1337 	/* Pass unaligned single buffer with no alignment required */
1338 	alignment = 1;
1339 	bdev->required_alignment = spdk_u32log2(alignment);
1340 
1341 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1342 	CU_ASSERT(rc == 0);
1343 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1344 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1345 	stub_complete_io(1);
1346 
1347 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1348 	CU_ASSERT(rc == 0);
1349 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1350 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1351 	stub_complete_io(1);
1352 
1353 	/* Pass unaligned single buffer with 512 alignment required */
1354 	alignment = 512;
1355 	bdev->required_alignment = spdk_u32log2(alignment);
1356 
1357 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1358 	CU_ASSERT(rc == 0);
1359 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1360 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1361 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1362 				    alignment));
1363 	stub_complete_io(1);
1364 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1365 
1366 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1367 	CU_ASSERT(rc == 0);
1368 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1369 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1370 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1371 				    alignment));
1372 	stub_complete_io(1);
1373 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1374 
1375 	/* Pass unaligned single buffer with 4096 alignment required */
1376 	alignment = 4096;
1377 	bdev->required_alignment = spdk_u32log2(alignment);
1378 
1379 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1380 	CU_ASSERT(rc == 0);
1381 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1382 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1383 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1384 				    alignment));
1385 	stub_complete_io(1);
1386 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1387 
1388 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1389 	CU_ASSERT(rc == 0);
1390 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1391 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1392 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1393 				    alignment));
1394 	stub_complete_io(1);
1395 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1396 
1397 	/* Pass aligned iovs with no alignment required */
1398 	alignment = 1;
1399 	bdev->required_alignment = spdk_u32log2(alignment);
1400 
1401 	iovcnt = 1;
1402 	iovs[0].iov_base = buf;
1403 	iovs[0].iov_len = 512;
1404 
1405 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1406 	CU_ASSERT(rc == 0);
1407 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1408 	stub_complete_io(1);
1409 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1410 
1411 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1412 	CU_ASSERT(rc == 0);
1413 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1414 	stub_complete_io(1);
1415 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1416 
1417 	/* Pass unaligned iovs with no alignment required */
1418 	alignment = 1;
1419 	bdev->required_alignment = spdk_u32log2(alignment);
1420 
1421 	iovcnt = 2;
1422 	iovs[0].iov_base = buf + 16;
1423 	iovs[0].iov_len = 256;
1424 	iovs[1].iov_base = buf + 16 + 256 + 32;
1425 	iovs[1].iov_len = 256;
1426 
1427 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1428 	CU_ASSERT(rc == 0);
1429 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1430 	stub_complete_io(1);
1431 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1432 
1433 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1434 	CU_ASSERT(rc == 0);
1435 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1436 	stub_complete_io(1);
1437 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1438 
1439 	/* Pass unaligned iov with 2048 alignment required */
1440 	alignment = 2048;
1441 	bdev->required_alignment = spdk_u32log2(alignment);
1442 
1443 	iovcnt = 2;
1444 	iovs[0].iov_base = buf + 16;
1445 	iovs[0].iov_len = 256;
1446 	iovs[1].iov_base = buf + 16 + 256 + 32;
1447 	iovs[1].iov_len = 256;
1448 
1449 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1450 	CU_ASSERT(rc == 0);
1451 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1452 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1453 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1454 				    alignment));
1455 	stub_complete_io(1);
1456 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1457 
1458 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1459 	CU_ASSERT(rc == 0);
1460 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1461 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1462 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1463 				    alignment));
1464 	stub_complete_io(1);
1465 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1466 
1467 	/* Pass iov without allocated buffer without alignment required */
1468 	alignment = 1;
1469 	bdev->required_alignment = spdk_u32log2(alignment);
1470 
1471 	iovcnt = 1;
1472 	iovs[0].iov_base = NULL;
1473 	iovs[0].iov_len = 0;
1474 
1475 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1476 	CU_ASSERT(rc == 0);
1477 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1478 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1479 				    alignment));
1480 	stub_complete_io(1);
1481 
1482 	/* Pass iov without allocated buffer with 1024 alignment required */
1483 	alignment = 1024;
1484 	bdev->required_alignment = spdk_u32log2(alignment);
1485 
1486 	iovcnt = 1;
1487 	iovs[0].iov_base = NULL;
1488 	iovs[0].iov_len = 0;
1489 
1490 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1491 	CU_ASSERT(rc == 0);
1492 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1493 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1494 				    alignment));
1495 	stub_complete_io(1);
1496 
1497 	spdk_put_io_channel(io_ch);
1498 	spdk_bdev_close(desc);
1499 	free_bdev(bdev);
1500 	spdk_bdev_finish(bdev_fini_cb, NULL);
1501 	poll_threads();
1502 
1503 	free(buf);
1504 }
1505 
1506 static void
1507 histogram_status_cb(void *cb_arg, int status)
1508 {
1509 	g_status = status;
1510 }
1511 
1512 static void
1513 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1514 {
1515 	g_status = status;
1516 	g_histogram = histogram;
1517 }
1518 
1519 static void
1520 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1521 		   uint64_t total, uint64_t so_far)
1522 {
1523 	g_count += count;
1524 }
1525 
1526 static void
1527 bdev_histograms(void)
1528 {
1529 	struct spdk_bdev *bdev;
1530 	struct spdk_bdev_desc *desc;
1531 	struct spdk_io_channel *ch;
1532 	struct spdk_histogram_data *histogram;
1533 	uint8_t buf[4096];
1534 	int rc;
1535 
1536 	spdk_bdev_initialize(bdev_init_cb, NULL);
1537 
1538 	bdev = allocate_bdev("bdev");
1539 
1540 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1541 	CU_ASSERT(rc == 0);
1542 	CU_ASSERT(desc != NULL);
1543 
1544 	ch = spdk_bdev_get_io_channel(desc);
1545 	CU_ASSERT(ch != NULL);
1546 
1547 	/* Enable histogram */
1548 	g_status = -1;
1549 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1550 	poll_threads();
1551 	CU_ASSERT(g_status == 0);
1552 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1553 
1554 	/* Allocate histogram */
1555 	histogram = spdk_histogram_data_alloc();
1556 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1557 
1558 	/* Check if histogram is zeroed */
1559 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1560 	poll_threads();
1561 	CU_ASSERT(g_status == 0);
1562 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1563 
1564 	g_count = 0;
1565 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1566 
1567 	CU_ASSERT(g_count == 0);
1568 
1569 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1570 	CU_ASSERT(rc == 0);
1571 
1572 	spdk_delay_us(10);
1573 	stub_complete_io(1);
1574 	poll_threads();
1575 
1576 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1577 	CU_ASSERT(rc == 0);
1578 
1579 	spdk_delay_us(10);
1580 	stub_complete_io(1);
1581 	poll_threads();
1582 
1583 	/* Check if histogram gathered data from all I/O channels */
1584 	g_histogram = NULL;
1585 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1586 	poll_threads();
1587 	CU_ASSERT(g_status == 0);
1588 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1589 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1590 
1591 	g_count = 0;
1592 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1593 	CU_ASSERT(g_count == 2);
1594 
1595 	/* Disable histogram */
1596 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1597 	poll_threads();
1598 	CU_ASSERT(g_status == 0);
1599 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1600 
1601 	/* Try to run histogram commands on disabled bdev */
1602 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1603 	poll_threads();
1604 	CU_ASSERT(g_status == -EFAULT);
1605 
1606 	spdk_histogram_data_free(g_histogram);
1607 	spdk_put_io_channel(ch);
1608 	spdk_bdev_close(desc);
1609 	free_bdev(bdev);
1610 	spdk_bdev_finish(bdev_fini_cb, NULL);
1611 	poll_threads();
1612 }
1613 
1614 int
1615 main(int argc, char **argv)
1616 {
1617 	CU_pSuite		suite = NULL;
1618 	unsigned int		num_failures;
1619 
1620 	if (CU_initialize_registry() != CUE_SUCCESS) {
1621 		return CU_get_error();
1622 	}
1623 
1624 	suite = CU_add_suite("bdev", null_init, null_clean);
1625 	if (suite == NULL) {
1626 		CU_cleanup_registry();
1627 		return CU_get_error();
1628 	}
1629 
1630 	if (
1631 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1632 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1633 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1634 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1635 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1636 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1637 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1638 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1639 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1640 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1641 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
1642 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL
1643 	) {
1644 		CU_cleanup_registry();
1645 		return CU_get_error();
1646 	}
1647 
1648 	allocate_threads(1);
1649 	set_thread(0);
1650 
1651 	CU_basic_set_mode(CU_BRM_VERBOSE);
1652 	CU_basic_run_tests();
1653 	num_failures = CU_get_number_of_failures();
1654 	CU_cleanup_registry();
1655 
1656 	free_threads();
1657 
1658 	return num_failures;
1659 }
1660