xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision ae7b5890ef728af40bd233a5011b924c482603bf)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_type, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
63 
64 
65 int g_status;
66 int g_count;
67 struct spdk_histogram_data *g_histogram;
68 
69 void
70 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
71 			 int *sc, int *sk, int *asc, int *ascq)
72 {
73 }
74 
75 static int
76 null_init(void)
77 {
78 	return 0;
79 }
80 
81 static int
82 null_clean(void)
83 {
84 	return 0;
85 }
86 
87 static int
88 stub_destruct(void *ctx)
89 {
90 	return 0;
91 }
92 
93 struct ut_expected_io {
94 	uint8_t				type;
95 	uint64_t			offset;
96 	uint64_t			length;
97 	int				iovcnt;
98 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
99 	void				*md_buf;
100 	TAILQ_ENTRY(ut_expected_io)	link;
101 };
102 
103 struct bdev_ut_channel {
104 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
105 	uint32_t			outstanding_io_count;
106 	TAILQ_HEAD(, ut_expected_io)	expected_io;
107 };
108 
109 static bool g_io_done;
110 static struct spdk_bdev_io *g_bdev_io;
111 static enum spdk_bdev_io_status g_io_status;
112 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
113 static uint32_t g_bdev_ut_io_device;
114 static struct bdev_ut_channel *g_bdev_ut_channel;
115 
116 static struct ut_expected_io *
117 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
118 {
119 	struct ut_expected_io *expected_io;
120 
121 	expected_io = calloc(1, sizeof(*expected_io));
122 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
123 
124 	expected_io->type = type;
125 	expected_io->offset = offset;
126 	expected_io->length = length;
127 	expected_io->iovcnt = iovcnt;
128 
129 	return expected_io;
130 }
131 
132 static void
133 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
134 {
135 	expected_io->iov[pos].iov_base = base;
136 	expected_io->iov[pos].iov_len = len;
137 }
138 
139 static void
140 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
141 {
142 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
143 	struct ut_expected_io *expected_io;
144 	struct iovec *iov, *expected_iov;
145 	int i;
146 
147 	g_bdev_io = bdev_io;
148 
149 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
150 	ch->outstanding_io_count++;
151 
152 	expected_io = TAILQ_FIRST(&ch->expected_io);
153 	if (expected_io == NULL) {
154 		return;
155 	}
156 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
157 
158 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
159 		CU_ASSERT(bdev_io->type == expected_io->type);
160 	}
161 
162 	if (expected_io->md_buf != NULL) {
163 		CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
164 	}
165 
166 	if (expected_io->length == 0) {
167 		free(expected_io);
168 		return;
169 	}
170 
171 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
172 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
173 
174 	if (expected_io->iovcnt == 0) {
175 		free(expected_io);
176 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
177 		return;
178 	}
179 
180 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
181 	for (i = 0; i < expected_io->iovcnt; i++) {
182 		iov = &bdev_io->u.bdev.iovs[i];
183 		expected_iov = &expected_io->iov[i];
184 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
185 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
186 	}
187 
188 	free(expected_io);
189 }
190 
191 static void
192 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
193 				      struct spdk_bdev_io *bdev_io, bool success)
194 {
195 	CU_ASSERT(success == true);
196 
197 	stub_submit_request(_ch, bdev_io);
198 }
199 
200 static void
201 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
202 {
203 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
204 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
205 }
206 
207 static uint32_t
208 stub_complete_io(uint32_t num_to_complete)
209 {
210 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
211 	struct spdk_bdev_io *bdev_io;
212 	static enum spdk_bdev_io_status io_status;
213 	uint32_t num_completed = 0;
214 
215 	while (num_completed < num_to_complete) {
216 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
217 			break;
218 		}
219 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
220 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
221 		ch->outstanding_io_count--;
222 		io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
223 			    g_io_exp_status;
224 		spdk_bdev_io_complete(bdev_io, io_status);
225 		num_completed++;
226 	}
227 
228 	return num_completed;
229 }
230 
231 static struct spdk_io_channel *
232 bdev_ut_get_io_channel(void *ctx)
233 {
234 	return spdk_get_io_channel(&g_bdev_ut_io_device);
235 }
236 
237 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
238 	[SPDK_BDEV_IO_TYPE_READ]		= true,
239 	[SPDK_BDEV_IO_TYPE_WRITE]		= true,
240 	[SPDK_BDEV_IO_TYPE_UNMAP]		= true,
241 	[SPDK_BDEV_IO_TYPE_FLUSH]		= true,
242 	[SPDK_BDEV_IO_TYPE_RESET]		= true,
243 	[SPDK_BDEV_IO_TYPE_NVME_ADMIN]		= true,
244 	[SPDK_BDEV_IO_TYPE_NVME_IO]		= true,
245 	[SPDK_BDEV_IO_TYPE_NVME_IO_MD]		= true,
246 	[SPDK_BDEV_IO_TYPE_WRITE_ZEROES]	= true,
247 	[SPDK_BDEV_IO_TYPE_ZCOPY]		= true,
248 };
249 
250 static void
251 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
252 {
253 	g_io_types_supported[io_type] = enable;
254 }
255 
256 static bool
257 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
258 {
259 	return g_io_types_supported[io_type];
260 }
261 
262 static struct spdk_bdev_fn_table fn_table = {
263 	.destruct = stub_destruct,
264 	.submit_request = stub_submit_request,
265 	.get_io_channel = bdev_ut_get_io_channel,
266 	.io_type_supported = stub_io_type_supported,
267 };
268 
269 static int
270 bdev_ut_create_ch(void *io_device, void *ctx_buf)
271 {
272 	struct bdev_ut_channel *ch = ctx_buf;
273 
274 	CU_ASSERT(g_bdev_ut_channel == NULL);
275 	g_bdev_ut_channel = ch;
276 
277 	TAILQ_INIT(&ch->outstanding_io);
278 	ch->outstanding_io_count = 0;
279 	TAILQ_INIT(&ch->expected_io);
280 	return 0;
281 }
282 
283 static void
284 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
285 {
286 	CU_ASSERT(g_bdev_ut_channel != NULL);
287 	g_bdev_ut_channel = NULL;
288 }
289 
290 struct spdk_bdev_module bdev_ut_if;
291 
292 static int
293 bdev_ut_module_init(void)
294 {
295 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
296 				sizeof(struct bdev_ut_channel), NULL);
297 	spdk_bdev_module_init_done(&bdev_ut_if);
298 	return 0;
299 }
300 
301 static void
302 bdev_ut_module_fini(void)
303 {
304 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
305 }
306 
307 struct spdk_bdev_module bdev_ut_if = {
308 	.name = "bdev_ut",
309 	.module_init = bdev_ut_module_init,
310 	.module_fini = bdev_ut_module_fini,
311 	.async_init = true,
312 };
313 
314 static void vbdev_ut_examine(struct spdk_bdev *bdev);
315 
316 static int
317 vbdev_ut_module_init(void)
318 {
319 	return 0;
320 }
321 
322 static void
323 vbdev_ut_module_fini(void)
324 {
325 }
326 
327 struct spdk_bdev_module vbdev_ut_if = {
328 	.name = "vbdev_ut",
329 	.module_init = vbdev_ut_module_init,
330 	.module_fini = vbdev_ut_module_fini,
331 	.examine_config = vbdev_ut_examine,
332 };
333 
334 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
335 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
336 
337 static void
338 vbdev_ut_examine(struct spdk_bdev *bdev)
339 {
340 	spdk_bdev_module_examine_done(&vbdev_ut_if);
341 }
342 
343 static struct spdk_bdev *
344 allocate_bdev(char *name)
345 {
346 	struct spdk_bdev *bdev;
347 	int rc;
348 
349 	bdev = calloc(1, sizeof(*bdev));
350 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
351 
352 	bdev->name = name;
353 	bdev->fn_table = &fn_table;
354 	bdev->module = &bdev_ut_if;
355 	bdev->blockcnt = 1024;
356 	bdev->blocklen = 512;
357 
358 	rc = spdk_bdev_register(bdev);
359 	CU_ASSERT(rc == 0);
360 
361 	return bdev;
362 }
363 
364 static struct spdk_bdev *
365 allocate_vbdev(char *name)
366 {
367 	struct spdk_bdev *bdev;
368 	int rc;
369 
370 	bdev = calloc(1, sizeof(*bdev));
371 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
372 
373 	bdev->name = name;
374 	bdev->fn_table = &fn_table;
375 	bdev->module = &vbdev_ut_if;
376 
377 	rc = spdk_bdev_register(bdev);
378 	CU_ASSERT(rc == 0);
379 
380 	return bdev;
381 }
382 
383 static void
384 free_bdev(struct spdk_bdev *bdev)
385 {
386 	spdk_bdev_unregister(bdev, NULL, NULL);
387 	poll_threads();
388 	memset(bdev, 0xFF, sizeof(*bdev));
389 	free(bdev);
390 }
391 
392 static void
393 free_vbdev(struct spdk_bdev *bdev)
394 {
395 	spdk_bdev_unregister(bdev, NULL, NULL);
396 	poll_threads();
397 	memset(bdev, 0xFF, sizeof(*bdev));
398 	free(bdev);
399 }
400 
401 static void
402 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
403 {
404 	const char *bdev_name;
405 
406 	CU_ASSERT(bdev != NULL);
407 	CU_ASSERT(rc == 0);
408 	bdev_name = spdk_bdev_get_name(bdev);
409 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
410 
411 	free(stat);
412 	free_bdev(bdev);
413 
414 	*(bool *)cb_arg = true;
415 }
416 
417 static void
418 get_device_stat_test(void)
419 {
420 	struct spdk_bdev *bdev;
421 	struct spdk_bdev_io_stat *stat;
422 	bool done;
423 
424 	bdev = allocate_bdev("bdev0");
425 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
426 	if (stat == NULL) {
427 		free_bdev(bdev);
428 		return;
429 	}
430 
431 	done = false;
432 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
433 	while (!done) { poll_threads(); }
434 
435 
436 }
437 
438 static void
439 open_write_test(void)
440 {
441 	struct spdk_bdev *bdev[9];
442 	struct spdk_bdev_desc *desc[9] = {};
443 	int rc;
444 
445 	/*
446 	 * Create a tree of bdevs to test various open w/ write cases.
447 	 *
448 	 * bdev0 through bdev3 are physical block devices, such as NVMe
449 	 * namespaces or Ceph block devices.
450 	 *
451 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
452 	 * caching or RAID use cases.
453 	 *
454 	 * bdev5 through bdev7 are all virtual bdevs with the same base
455 	 * bdev (except bdev7). This models partitioning or logical volume
456 	 * use cases.
457 	 *
458 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
459 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
460 	 * models caching, RAID, partitioning or logical volumes use cases.
461 	 *
462 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
463 	 * base bdevs are themselves virtual bdevs.
464 	 *
465 	 *                bdev8
466 	 *                  |
467 	 *            +----------+
468 	 *            |          |
469 	 *          bdev4      bdev5   bdev6   bdev7
470 	 *            |          |       |       |
471 	 *        +---+---+      +---+   +   +---+---+
472 	 *        |       |           \  |  /         \
473 	 *      bdev0   bdev1          bdev2         bdev3
474 	 */
475 
476 	bdev[0] = allocate_bdev("bdev0");
477 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
478 	CU_ASSERT(rc == 0);
479 
480 	bdev[1] = allocate_bdev("bdev1");
481 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
482 	CU_ASSERT(rc == 0);
483 
484 	bdev[2] = allocate_bdev("bdev2");
485 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
486 	CU_ASSERT(rc == 0);
487 
488 	bdev[3] = allocate_bdev("bdev3");
489 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
490 	CU_ASSERT(rc == 0);
491 
492 	bdev[4] = allocate_vbdev("bdev4");
493 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
494 	CU_ASSERT(rc == 0);
495 
496 	bdev[5] = allocate_vbdev("bdev5");
497 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
498 	CU_ASSERT(rc == 0);
499 
500 	bdev[6] = allocate_vbdev("bdev6");
501 
502 	bdev[7] = allocate_vbdev("bdev7");
503 
504 	bdev[8] = allocate_vbdev("bdev8");
505 
506 	/* Open bdev0 read-only.  This should succeed. */
507 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
508 	CU_ASSERT(rc == 0);
509 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
510 	spdk_bdev_close(desc[0]);
511 
512 	/*
513 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
514 	 * by a vbdev module.
515 	 */
516 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
517 	CU_ASSERT(rc == -EPERM);
518 
519 	/*
520 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
521 	 * by a vbdev module.
522 	 */
523 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
524 	CU_ASSERT(rc == -EPERM);
525 
526 	/* Open bdev4 read-only.  This should succeed. */
527 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
528 	CU_ASSERT(rc == 0);
529 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
530 	spdk_bdev_close(desc[4]);
531 
532 	/*
533 	 * Open bdev8 read/write.  This should succeed since it is a leaf
534 	 * bdev.
535 	 */
536 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
537 	CU_ASSERT(rc == 0);
538 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
539 	spdk_bdev_close(desc[8]);
540 
541 	/*
542 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
543 	 * by a vbdev module.
544 	 */
545 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
546 	CU_ASSERT(rc == -EPERM);
547 
548 	/* Open bdev4 read-only.  This should succeed. */
549 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
550 	CU_ASSERT(rc == 0);
551 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
552 	spdk_bdev_close(desc[5]);
553 
554 	free_vbdev(bdev[8]);
555 
556 	free_vbdev(bdev[5]);
557 	free_vbdev(bdev[6]);
558 	free_vbdev(bdev[7]);
559 
560 	free_vbdev(bdev[4]);
561 
562 	free_bdev(bdev[0]);
563 	free_bdev(bdev[1]);
564 	free_bdev(bdev[2]);
565 	free_bdev(bdev[3]);
566 }
567 
568 static void
569 bytes_to_blocks_test(void)
570 {
571 	struct spdk_bdev bdev;
572 	uint64_t offset_blocks, num_blocks;
573 
574 	memset(&bdev, 0, sizeof(bdev));
575 
576 	bdev.blocklen = 512;
577 
578 	/* All parameters valid */
579 	offset_blocks = 0;
580 	num_blocks = 0;
581 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
582 	CU_ASSERT(offset_blocks == 1);
583 	CU_ASSERT(num_blocks == 2);
584 
585 	/* Offset not a block multiple */
586 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
587 
588 	/* Length not a block multiple */
589 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
590 
591 	/* In case blocklen not the power of two */
592 	bdev.blocklen = 100;
593 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
594 	CU_ASSERT(offset_blocks == 1);
595 	CU_ASSERT(num_blocks == 2);
596 
597 	/* Offset not a block multiple */
598 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
599 
600 	/* Length not a block multiple */
601 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
602 }
603 
604 static void
605 num_blocks_test(void)
606 {
607 	struct spdk_bdev bdev;
608 	struct spdk_bdev_desc *desc = NULL;
609 	int rc;
610 
611 	memset(&bdev, 0, sizeof(bdev));
612 	bdev.name = "num_blocks";
613 	bdev.fn_table = &fn_table;
614 	bdev.module = &bdev_ut_if;
615 	spdk_bdev_register(&bdev);
616 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
617 
618 	/* Growing block number */
619 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
620 	/* Shrinking block number */
621 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
622 
623 	/* In case bdev opened */
624 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
625 	CU_ASSERT(rc == 0);
626 	SPDK_CU_ASSERT_FATAL(desc != NULL);
627 
628 	/* Growing block number */
629 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
630 	/* Shrinking block number */
631 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
632 
633 	spdk_bdev_close(desc);
634 	spdk_bdev_unregister(&bdev, NULL, NULL);
635 
636 	poll_threads();
637 }
638 
639 static void
640 io_valid_test(void)
641 {
642 	struct spdk_bdev bdev;
643 
644 	memset(&bdev, 0, sizeof(bdev));
645 
646 	bdev.blocklen = 512;
647 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
648 
649 	/* All parameters valid */
650 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
651 
652 	/* Last valid block */
653 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
654 
655 	/* Offset past end of bdev */
656 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
657 
658 	/* Offset + length past end of bdev */
659 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
660 
661 	/* Offset near end of uint64_t range (2^64 - 1) */
662 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
663 }
664 
665 static void
666 alias_add_del_test(void)
667 {
668 	struct spdk_bdev *bdev[3];
669 	int rc;
670 
671 	/* Creating and registering bdevs */
672 	bdev[0] = allocate_bdev("bdev0");
673 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
674 
675 	bdev[1] = allocate_bdev("bdev1");
676 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
677 
678 	bdev[2] = allocate_bdev("bdev2");
679 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
680 
681 	poll_threads();
682 
683 	/*
684 	 * Trying adding an alias identical to name.
685 	 * Alias is identical to name, so it can not be added to aliases list
686 	 */
687 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
688 	CU_ASSERT(rc == -EEXIST);
689 
690 	/*
691 	 * Trying to add empty alias,
692 	 * this one should fail
693 	 */
694 	rc = spdk_bdev_alias_add(bdev[0], NULL);
695 	CU_ASSERT(rc == -EINVAL);
696 
697 	/* Trying adding same alias to two different registered bdevs */
698 
699 	/* Alias is used first time, so this one should pass */
700 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
701 	CU_ASSERT(rc == 0);
702 
703 	/* Alias was added to another bdev, so this one should fail */
704 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
705 	CU_ASSERT(rc == -EEXIST);
706 
707 	/* Alias is used first time, so this one should pass */
708 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
709 	CU_ASSERT(rc == 0);
710 
711 	/* Trying removing an alias from registered bdevs */
712 
713 	/* Alias is not on a bdev aliases list, so this one should fail */
714 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
715 	CU_ASSERT(rc == -ENOENT);
716 
717 	/* Alias is present on a bdev aliases list, so this one should pass */
718 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
719 	CU_ASSERT(rc == 0);
720 
721 	/* Alias is present on a bdev aliases list, so this one should pass */
722 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
723 	CU_ASSERT(rc == 0);
724 
725 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
726 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
727 	CU_ASSERT(rc != 0);
728 
729 	/* Trying to del all alias from empty alias list */
730 	spdk_bdev_alias_del_all(bdev[2]);
731 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
732 
733 	/* Trying to del all alias from non-empty alias list */
734 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
735 	CU_ASSERT(rc == 0);
736 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
737 	CU_ASSERT(rc == 0);
738 	spdk_bdev_alias_del_all(bdev[2]);
739 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
740 
741 	/* Unregister and free bdevs */
742 	spdk_bdev_unregister(bdev[0], NULL, NULL);
743 	spdk_bdev_unregister(bdev[1], NULL, NULL);
744 	spdk_bdev_unregister(bdev[2], NULL, NULL);
745 
746 	poll_threads();
747 
748 	free(bdev[0]);
749 	free(bdev[1]);
750 	free(bdev[2]);
751 }
752 
753 static void
754 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
755 {
756 	g_io_done = true;
757 	g_io_status = bdev_io->internal.status;
758 	spdk_bdev_free_io(bdev_io);
759 }
760 
761 static void
762 bdev_init_cb(void *arg, int rc)
763 {
764 	CU_ASSERT(rc == 0);
765 }
766 
767 static void
768 bdev_fini_cb(void *arg)
769 {
770 }
771 
772 struct bdev_ut_io_wait_entry {
773 	struct spdk_bdev_io_wait_entry	entry;
774 	struct spdk_io_channel		*io_ch;
775 	struct spdk_bdev_desc		*desc;
776 	bool				submitted;
777 };
778 
779 static void
780 io_wait_cb(void *arg)
781 {
782 	struct bdev_ut_io_wait_entry *entry = arg;
783 	int rc;
784 
785 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
786 	CU_ASSERT(rc == 0);
787 	entry->submitted = true;
788 }
789 
790 static void
791 bdev_io_types_test(void)
792 {
793 	struct spdk_bdev *bdev;
794 	struct spdk_bdev_desc *desc = NULL;
795 	struct spdk_io_channel *io_ch;
796 	struct spdk_bdev_opts bdev_opts = {
797 		.bdev_io_pool_size = 4,
798 		.bdev_io_cache_size = 2,
799 	};
800 	int rc;
801 
802 	rc = spdk_bdev_set_opts(&bdev_opts);
803 	CU_ASSERT(rc == 0);
804 	spdk_bdev_initialize(bdev_init_cb, NULL);
805 	poll_threads();
806 
807 	bdev = allocate_bdev("bdev0");
808 
809 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
810 	CU_ASSERT(rc == 0);
811 	poll_threads();
812 	SPDK_CU_ASSERT_FATAL(desc != NULL);
813 	io_ch = spdk_bdev_get_io_channel(desc);
814 	CU_ASSERT(io_ch != NULL);
815 
816 	/* WRITE and WRITE ZEROES are not supported */
817 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
818 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
819 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
820 	CU_ASSERT(rc == -ENOTSUP);
821 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
822 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
823 
824 	spdk_put_io_channel(io_ch);
825 	spdk_bdev_close(desc);
826 	free_bdev(bdev);
827 	spdk_bdev_finish(bdev_fini_cb, NULL);
828 	poll_threads();
829 }
830 
831 static void
832 bdev_io_wait_test(void)
833 {
834 	struct spdk_bdev *bdev;
835 	struct spdk_bdev_desc *desc = NULL;
836 	struct spdk_io_channel *io_ch;
837 	struct spdk_bdev_opts bdev_opts = {
838 		.bdev_io_pool_size = 4,
839 		.bdev_io_cache_size = 2,
840 	};
841 	struct bdev_ut_io_wait_entry io_wait_entry;
842 	struct bdev_ut_io_wait_entry io_wait_entry2;
843 	int rc;
844 
845 	rc = spdk_bdev_set_opts(&bdev_opts);
846 	CU_ASSERT(rc == 0);
847 	spdk_bdev_initialize(bdev_init_cb, NULL);
848 	poll_threads();
849 
850 	bdev = allocate_bdev("bdev0");
851 
852 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
853 	CU_ASSERT(rc == 0);
854 	poll_threads();
855 	SPDK_CU_ASSERT_FATAL(desc != NULL);
856 	io_ch = spdk_bdev_get_io_channel(desc);
857 	CU_ASSERT(io_ch != NULL);
858 
859 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
860 	CU_ASSERT(rc == 0);
861 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
862 	CU_ASSERT(rc == 0);
863 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
864 	CU_ASSERT(rc == 0);
865 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
866 	CU_ASSERT(rc == 0);
867 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
868 
869 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
870 	CU_ASSERT(rc == -ENOMEM);
871 
872 	io_wait_entry.entry.bdev = bdev;
873 	io_wait_entry.entry.cb_fn = io_wait_cb;
874 	io_wait_entry.entry.cb_arg = &io_wait_entry;
875 	io_wait_entry.io_ch = io_ch;
876 	io_wait_entry.desc = desc;
877 	io_wait_entry.submitted = false;
878 	/* Cannot use the same io_wait_entry for two different calls. */
879 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
880 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
881 
882 	/* Queue two I/O waits. */
883 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
884 	CU_ASSERT(rc == 0);
885 	CU_ASSERT(io_wait_entry.submitted == false);
886 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
887 	CU_ASSERT(rc == 0);
888 	CU_ASSERT(io_wait_entry2.submitted == false);
889 
890 	stub_complete_io(1);
891 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
892 	CU_ASSERT(io_wait_entry.submitted == true);
893 	CU_ASSERT(io_wait_entry2.submitted == false);
894 
895 	stub_complete_io(1);
896 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
897 	CU_ASSERT(io_wait_entry2.submitted == true);
898 
899 	stub_complete_io(4);
900 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
901 
902 	spdk_put_io_channel(io_ch);
903 	spdk_bdev_close(desc);
904 	free_bdev(bdev);
905 	spdk_bdev_finish(bdev_fini_cb, NULL);
906 	poll_threads();
907 }
908 
909 static void
910 bdev_io_spans_boundary_test(void)
911 {
912 	struct spdk_bdev bdev;
913 	struct spdk_bdev_io bdev_io;
914 
915 	memset(&bdev, 0, sizeof(bdev));
916 
917 	bdev.optimal_io_boundary = 0;
918 	bdev_io.bdev = &bdev;
919 
920 	/* bdev has no optimal_io_boundary set - so this should return false. */
921 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
922 
923 	bdev.optimal_io_boundary = 32;
924 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
925 
926 	/* RESETs are not based on LBAs - so this should return false. */
927 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
928 
929 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
930 	bdev_io.u.bdev.offset_blocks = 0;
931 	bdev_io.u.bdev.num_blocks = 32;
932 
933 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
934 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
935 
936 	bdev_io.u.bdev.num_blocks = 33;
937 
938 	/* This I/O spans a boundary. */
939 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
940 }
941 
942 static void
943 bdev_io_split(void)
944 {
945 	struct spdk_bdev *bdev;
946 	struct spdk_bdev_desc *desc = NULL;
947 	struct spdk_io_channel *io_ch;
948 	struct spdk_bdev_opts bdev_opts = {
949 		.bdev_io_pool_size = 512,
950 		.bdev_io_cache_size = 64,
951 	};
952 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
953 	struct ut_expected_io *expected_io;
954 	uint64_t i;
955 	int rc;
956 
957 	rc = spdk_bdev_set_opts(&bdev_opts);
958 	CU_ASSERT(rc == 0);
959 	spdk_bdev_initialize(bdev_init_cb, NULL);
960 
961 	bdev = allocate_bdev("bdev0");
962 
963 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
964 	CU_ASSERT(rc == 0);
965 	SPDK_CU_ASSERT_FATAL(desc != NULL);
966 	io_ch = spdk_bdev_get_io_channel(desc);
967 	CU_ASSERT(io_ch != NULL);
968 
969 	bdev->optimal_io_boundary = 16;
970 	bdev->split_on_optimal_io_boundary = false;
971 
972 	g_io_done = false;
973 
974 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
975 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
976 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
977 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
978 
979 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
980 	CU_ASSERT(rc == 0);
981 	CU_ASSERT(g_io_done == false);
982 
983 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
984 	stub_complete_io(1);
985 	CU_ASSERT(g_io_done == true);
986 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
987 
988 	bdev->split_on_optimal_io_boundary = true;
989 
990 	/* Now test that a single-vector command is split correctly.
991 	 * Offset 14, length 8, payload 0xF000
992 	 *  Child - Offset 14, length 2, payload 0xF000
993 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
994 	 *
995 	 * Set up the expected values before calling spdk_bdev_read_blocks
996 	 */
997 	g_io_done = false;
998 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
999 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1000 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1001 
1002 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1003 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1004 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1005 
1006 	/* spdk_bdev_read_blocks will submit the first child immediately. */
1007 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1008 	CU_ASSERT(rc == 0);
1009 	CU_ASSERT(g_io_done == false);
1010 
1011 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1012 	stub_complete_io(2);
1013 	CU_ASSERT(g_io_done == true);
1014 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1015 
1016 	/* Now set up a more complex, multi-vector command that needs to be split,
1017 	 *  including splitting iovecs.
1018 	 */
1019 	iov[0].iov_base = (void *)0x10000;
1020 	iov[0].iov_len = 512;
1021 	iov[1].iov_base = (void *)0x20000;
1022 	iov[1].iov_len = 20 * 512;
1023 	iov[2].iov_base = (void *)0x30000;
1024 	iov[2].iov_len = 11 * 512;
1025 
1026 	g_io_done = false;
1027 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1028 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1029 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1030 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1031 
1032 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1033 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1034 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1035 
1036 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1037 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1038 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1039 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1040 
1041 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1042 	CU_ASSERT(rc == 0);
1043 	CU_ASSERT(g_io_done == false);
1044 
1045 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1046 	stub_complete_io(3);
1047 	CU_ASSERT(g_io_done == true);
1048 
1049 	/* Test multi vector command that needs to be split by strip and then needs to be
1050 	 * split further due to the capacity of child iovs.
1051 	 */
1052 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
1053 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1054 		iov[i].iov_len = 512;
1055 	}
1056 
1057 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1058 	g_io_done = false;
1059 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
1060 					   BDEV_IO_NUM_CHILD_IOV);
1061 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1062 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
1063 	}
1064 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1065 
1066 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1067 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
1068 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1069 		ut_expected_io_set_iov(expected_io, i,
1070 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1071 	}
1072 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1073 
1074 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1075 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1076 	CU_ASSERT(rc == 0);
1077 	CU_ASSERT(g_io_done == false);
1078 
1079 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1080 	stub_complete_io(1);
1081 	CU_ASSERT(g_io_done == false);
1082 
1083 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1084 	stub_complete_io(1);
1085 	CU_ASSERT(g_io_done == true);
1086 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1087 
1088 	/* Test multi vector command that needs to be split by strip and then needs to be
1089 	 * split further due to the capacity of child iovs. In this case, the length of
1090 	 * the rest of iovec array with an I/O boundary is the multiple of block size.
1091 	 */
1092 
1093 	/* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1094 	 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1095 	 */
1096 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1097 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1098 		iov[i].iov_len = 512;
1099 	}
1100 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1101 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1102 		iov[i].iov_len = 256;
1103 	}
1104 	iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1105 	iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1106 
1107 	/* Add an extra iovec to trigger split */
1108 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1109 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1110 
1111 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1112 	g_io_done = false;
1113 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1114 					   BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV);
1115 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1116 		ut_expected_io_set_iov(expected_io, i,
1117 				       (void *)((i + 1) * 0x10000), 512);
1118 	}
1119 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1120 		ut_expected_io_set_iov(expected_io, i,
1121 				       (void *)((i + 1) * 0x10000), 256);
1122 	}
1123 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1124 
1125 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1126 					   1, 1);
1127 	ut_expected_io_set_iov(expected_io, 0,
1128 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1129 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1130 
1131 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1132 					   1, 1);
1133 	ut_expected_io_set_iov(expected_io, 0,
1134 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1135 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1136 
1137 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0,
1138 				    BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1139 	CU_ASSERT(rc == 0);
1140 	CU_ASSERT(g_io_done == false);
1141 
1142 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1143 	stub_complete_io(1);
1144 	CU_ASSERT(g_io_done == false);
1145 
1146 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1147 	stub_complete_io(2);
1148 	CU_ASSERT(g_io_done == true);
1149 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1150 
1151 	/* Test multi vector command that needs to be split by strip and then needs to be
1152 	 * split further due to the capacity of child iovs, the child request offset should
1153 	 * be rewind to last aligned offset and go success without error.
1154 	 */
1155 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1156 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1157 		iov[i].iov_len = 512;
1158 	}
1159 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1160 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1161 
1162 	iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1163 	iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1164 
1165 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1166 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1167 
1168 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1169 	g_io_done = false;
1170 	g_io_status = 0;
1171 	/* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */
1172 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1173 					   BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1);
1174 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1175 		ut_expected_io_set_iov(expected_io, i,
1176 				       (void *)((i + 1) * 0x10000), 512);
1177 	}
1178 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1179 	/* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */
1180 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1181 					   1, 2);
1182 	ut_expected_io_set_iov(expected_io, 0,
1183 			       (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
1184 	ut_expected_io_set_iov(expected_io, 1,
1185 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
1186 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1187 	/* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */
1188 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1189 					   1, 1);
1190 	ut_expected_io_set_iov(expected_io, 0,
1191 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1192 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1193 
1194 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1195 				    BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1196 	CU_ASSERT(rc == 0);
1197 	CU_ASSERT(g_io_done == false);
1198 
1199 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1200 	stub_complete_io(1);
1201 	CU_ASSERT(g_io_done == false);
1202 
1203 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1204 	stub_complete_io(2);
1205 	CU_ASSERT(g_io_done == true);
1206 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1207 
1208 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1209 	 * split, so test that.
1210 	 */
1211 	bdev->optimal_io_boundary = 15;
1212 	g_io_done = false;
1213 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1214 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1215 
1216 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1217 	CU_ASSERT(rc == 0);
1218 	CU_ASSERT(g_io_done == false);
1219 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1220 	stub_complete_io(1);
1221 	CU_ASSERT(g_io_done == true);
1222 
1223 	/* Test an UNMAP.  This should also not be split. */
1224 	bdev->optimal_io_boundary = 16;
1225 	g_io_done = false;
1226 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1227 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1228 
1229 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1230 	CU_ASSERT(rc == 0);
1231 	CU_ASSERT(g_io_done == false);
1232 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1233 	stub_complete_io(1);
1234 	CU_ASSERT(g_io_done == true);
1235 
1236 	/* Test a FLUSH.  This should also not be split. */
1237 	bdev->optimal_io_boundary = 16;
1238 	g_io_done = false;
1239 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1240 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1241 
1242 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1243 	CU_ASSERT(rc == 0);
1244 	CU_ASSERT(g_io_done == false);
1245 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1246 	stub_complete_io(1);
1247 	CU_ASSERT(g_io_done == true);
1248 
1249 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1250 
1251 	/* Children requests return an error status */
1252 	bdev->optimal_io_boundary = 16;
1253 	iov[0].iov_base = (void *)0x10000;
1254 	iov[0].iov_len = 512 * 64;
1255 	g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1256 	g_io_done = false;
1257 	g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1258 
1259 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1260 	CU_ASSERT(rc == 0);
1261 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1262 	stub_complete_io(4);
1263 	CU_ASSERT(g_io_done == false);
1264 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1265 	stub_complete_io(1);
1266 	CU_ASSERT(g_io_done == true);
1267 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1268 
1269 	spdk_put_io_channel(io_ch);
1270 	spdk_bdev_close(desc);
1271 	free_bdev(bdev);
1272 	spdk_bdev_finish(bdev_fini_cb, NULL);
1273 	poll_threads();
1274 }
1275 
1276 static void
1277 bdev_io_split_with_io_wait(void)
1278 {
1279 	struct spdk_bdev *bdev;
1280 	struct spdk_bdev_desc *desc = NULL;
1281 	struct spdk_io_channel *io_ch;
1282 	struct spdk_bdev_channel *channel;
1283 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1284 	struct spdk_bdev_opts bdev_opts = {
1285 		.bdev_io_pool_size = 2,
1286 		.bdev_io_cache_size = 1,
1287 	};
1288 	struct iovec iov[3];
1289 	struct ut_expected_io *expected_io;
1290 	int rc;
1291 
1292 	rc = spdk_bdev_set_opts(&bdev_opts);
1293 	CU_ASSERT(rc == 0);
1294 	spdk_bdev_initialize(bdev_init_cb, NULL);
1295 
1296 	bdev = allocate_bdev("bdev0");
1297 
1298 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1299 	CU_ASSERT(rc == 0);
1300 	CU_ASSERT(desc != NULL);
1301 	io_ch = spdk_bdev_get_io_channel(desc);
1302 	CU_ASSERT(io_ch != NULL);
1303 	channel = spdk_io_channel_get_ctx(io_ch);
1304 	mgmt_ch = channel->shared_resource->mgmt_ch;
1305 
1306 	bdev->optimal_io_boundary = 16;
1307 	bdev->split_on_optimal_io_boundary = true;
1308 
1309 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1310 	CU_ASSERT(rc == 0);
1311 
1312 	/* Now test that a single-vector command is split correctly.
1313 	 * Offset 14, length 8, payload 0xF000
1314 	 *  Child - Offset 14, length 2, payload 0xF000
1315 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1316 	 *
1317 	 * Set up the expected values before calling spdk_bdev_read_blocks
1318 	 */
1319 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1320 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1321 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1322 
1323 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1324 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1325 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1326 
1327 	/* The following children will be submitted sequentially due to the capacity of
1328 	 * spdk_bdev_io.
1329 	 */
1330 
1331 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1332 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1333 	CU_ASSERT(rc == 0);
1334 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1335 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1336 
1337 	/* Completing the first read I/O will submit the first child */
1338 	stub_complete_io(1);
1339 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1340 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1341 
1342 	/* Completing the first child will submit the second child */
1343 	stub_complete_io(1);
1344 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1345 
1346 	/* Complete the second child I/O.  This should result in our callback getting
1347 	 * invoked since the parent I/O is now complete.
1348 	 */
1349 	stub_complete_io(1);
1350 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1351 
1352 	/* Now set up a more complex, multi-vector command that needs to be split,
1353 	 *  including splitting iovecs.
1354 	 */
1355 	iov[0].iov_base = (void *)0x10000;
1356 	iov[0].iov_len = 512;
1357 	iov[1].iov_base = (void *)0x20000;
1358 	iov[1].iov_len = 20 * 512;
1359 	iov[2].iov_base = (void *)0x30000;
1360 	iov[2].iov_len = 11 * 512;
1361 
1362 	g_io_done = false;
1363 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1364 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1365 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1366 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1367 
1368 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1369 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1370 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1371 
1372 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1373 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1374 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1375 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1376 
1377 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1378 	CU_ASSERT(rc == 0);
1379 	CU_ASSERT(g_io_done == false);
1380 
1381 	/* The following children will be submitted sequentially due to the capacity of
1382 	 * spdk_bdev_io.
1383 	 */
1384 
1385 	/* Completing the first child will submit the second child */
1386 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1387 	stub_complete_io(1);
1388 	CU_ASSERT(g_io_done == false);
1389 
1390 	/* Completing the second child will submit the third child */
1391 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1392 	stub_complete_io(1);
1393 	CU_ASSERT(g_io_done == false);
1394 
1395 	/* Completing the third child will result in our callback getting invoked
1396 	 * since the parent I/O is now complete.
1397 	 */
1398 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1399 	stub_complete_io(1);
1400 	CU_ASSERT(g_io_done == true);
1401 
1402 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1403 
1404 	spdk_put_io_channel(io_ch);
1405 	spdk_bdev_close(desc);
1406 	free_bdev(bdev);
1407 	spdk_bdev_finish(bdev_fini_cb, NULL);
1408 	poll_threads();
1409 }
1410 
1411 static void
1412 bdev_io_alignment(void)
1413 {
1414 	struct spdk_bdev *bdev;
1415 	struct spdk_bdev_desc *desc;
1416 	struct spdk_io_channel *io_ch;
1417 	struct spdk_bdev_opts bdev_opts = {
1418 		.bdev_io_pool_size = 20,
1419 		.bdev_io_cache_size = 2,
1420 	};
1421 	int rc;
1422 	void *buf;
1423 	struct iovec iovs[2];
1424 	int iovcnt;
1425 	uint64_t alignment;
1426 
1427 	rc = spdk_bdev_set_opts(&bdev_opts);
1428 	CU_ASSERT(rc == 0);
1429 	spdk_bdev_initialize(bdev_init_cb, NULL);
1430 
1431 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1432 	bdev = allocate_bdev("bdev0");
1433 
1434 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1435 	CU_ASSERT(rc == 0);
1436 	CU_ASSERT(desc != NULL);
1437 	io_ch = spdk_bdev_get_io_channel(desc);
1438 	CU_ASSERT(io_ch != NULL);
1439 
1440 	/* Create aligned buffer */
1441 	rc = posix_memalign(&buf, 4096, 8192);
1442 	SPDK_CU_ASSERT_FATAL(rc == 0);
1443 
1444 	/* Pass aligned single buffer with no alignment required */
1445 	alignment = 1;
1446 	bdev->required_alignment = spdk_u32log2(alignment);
1447 
1448 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1449 	CU_ASSERT(rc == 0);
1450 	stub_complete_io(1);
1451 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1452 				    alignment));
1453 
1454 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1455 	CU_ASSERT(rc == 0);
1456 	stub_complete_io(1);
1457 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1458 				    alignment));
1459 
1460 	/* Pass unaligned single buffer with no alignment required */
1461 	alignment = 1;
1462 	bdev->required_alignment = spdk_u32log2(alignment);
1463 
1464 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1465 	CU_ASSERT(rc == 0);
1466 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1467 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1468 	stub_complete_io(1);
1469 
1470 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1471 	CU_ASSERT(rc == 0);
1472 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1473 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1474 	stub_complete_io(1);
1475 
1476 	/* Pass unaligned single buffer with 512 alignment required */
1477 	alignment = 512;
1478 	bdev->required_alignment = spdk_u32log2(alignment);
1479 
1480 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1481 	CU_ASSERT(rc == 0);
1482 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1483 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1484 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1485 				    alignment));
1486 	stub_complete_io(1);
1487 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1488 
1489 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1490 	CU_ASSERT(rc == 0);
1491 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1492 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1493 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1494 				    alignment));
1495 	stub_complete_io(1);
1496 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1497 
1498 	/* Pass unaligned single buffer with 4096 alignment required */
1499 	alignment = 4096;
1500 	bdev->required_alignment = spdk_u32log2(alignment);
1501 
1502 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1503 	CU_ASSERT(rc == 0);
1504 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1505 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1506 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1507 				    alignment));
1508 	stub_complete_io(1);
1509 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1510 
1511 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1512 	CU_ASSERT(rc == 0);
1513 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1514 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1515 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1516 				    alignment));
1517 	stub_complete_io(1);
1518 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1519 
1520 	/* Pass aligned iovs with no alignment required */
1521 	alignment = 1;
1522 	bdev->required_alignment = spdk_u32log2(alignment);
1523 
1524 	iovcnt = 1;
1525 	iovs[0].iov_base = buf;
1526 	iovs[0].iov_len = 512;
1527 
1528 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1529 	CU_ASSERT(rc == 0);
1530 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1531 	stub_complete_io(1);
1532 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1533 
1534 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1535 	CU_ASSERT(rc == 0);
1536 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1537 	stub_complete_io(1);
1538 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1539 
1540 	/* Pass unaligned iovs with no alignment required */
1541 	alignment = 1;
1542 	bdev->required_alignment = spdk_u32log2(alignment);
1543 
1544 	iovcnt = 2;
1545 	iovs[0].iov_base = buf + 16;
1546 	iovs[0].iov_len = 256;
1547 	iovs[1].iov_base = buf + 16 + 256 + 32;
1548 	iovs[1].iov_len = 256;
1549 
1550 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1551 	CU_ASSERT(rc == 0);
1552 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1553 	stub_complete_io(1);
1554 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1555 
1556 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1557 	CU_ASSERT(rc == 0);
1558 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1559 	stub_complete_io(1);
1560 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1561 
1562 	/* Pass unaligned iov with 2048 alignment required */
1563 	alignment = 2048;
1564 	bdev->required_alignment = spdk_u32log2(alignment);
1565 
1566 	iovcnt = 2;
1567 	iovs[0].iov_base = buf + 16;
1568 	iovs[0].iov_len = 256;
1569 	iovs[1].iov_base = buf + 16 + 256 + 32;
1570 	iovs[1].iov_len = 256;
1571 
1572 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1573 	CU_ASSERT(rc == 0);
1574 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1575 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1576 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1577 				    alignment));
1578 	stub_complete_io(1);
1579 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1580 
1581 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1582 	CU_ASSERT(rc == 0);
1583 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1584 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1585 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1586 				    alignment));
1587 	stub_complete_io(1);
1588 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1589 
1590 	/* Pass iov without allocated buffer without alignment required */
1591 	alignment = 1;
1592 	bdev->required_alignment = spdk_u32log2(alignment);
1593 
1594 	iovcnt = 1;
1595 	iovs[0].iov_base = NULL;
1596 	iovs[0].iov_len = 0;
1597 
1598 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1599 	CU_ASSERT(rc == 0);
1600 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1601 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1602 				    alignment));
1603 	stub_complete_io(1);
1604 
1605 	/* Pass iov without allocated buffer with 1024 alignment required */
1606 	alignment = 1024;
1607 	bdev->required_alignment = spdk_u32log2(alignment);
1608 
1609 	iovcnt = 1;
1610 	iovs[0].iov_base = NULL;
1611 	iovs[0].iov_len = 0;
1612 
1613 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1614 	CU_ASSERT(rc == 0);
1615 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1616 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1617 				    alignment));
1618 	stub_complete_io(1);
1619 
1620 	spdk_put_io_channel(io_ch);
1621 	spdk_bdev_close(desc);
1622 	free_bdev(bdev);
1623 	spdk_bdev_finish(bdev_fini_cb, NULL);
1624 	poll_threads();
1625 
1626 	free(buf);
1627 }
1628 
1629 static void
1630 bdev_io_alignment_with_boundary(void)
1631 {
1632 	struct spdk_bdev *bdev;
1633 	struct spdk_bdev_desc *desc;
1634 	struct spdk_io_channel *io_ch;
1635 	struct spdk_bdev_opts bdev_opts = {
1636 		.bdev_io_pool_size = 20,
1637 		.bdev_io_cache_size = 2,
1638 	};
1639 	int rc;
1640 	void *buf;
1641 	struct iovec iovs[2];
1642 	int iovcnt;
1643 	uint64_t alignment;
1644 
1645 	rc = spdk_bdev_set_opts(&bdev_opts);
1646 	CU_ASSERT(rc == 0);
1647 	spdk_bdev_initialize(bdev_init_cb, NULL);
1648 
1649 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1650 	bdev = allocate_bdev("bdev0");
1651 
1652 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1653 	CU_ASSERT(rc == 0);
1654 	CU_ASSERT(desc != NULL);
1655 	io_ch = spdk_bdev_get_io_channel(desc);
1656 	CU_ASSERT(io_ch != NULL);
1657 
1658 	/* Create aligned buffer */
1659 	rc = posix_memalign(&buf, 4096, 131072);
1660 	SPDK_CU_ASSERT_FATAL(rc == 0);
1661 	g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1662 
1663 	/* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
1664 	alignment = 512;
1665 	bdev->required_alignment = spdk_u32log2(alignment);
1666 	bdev->optimal_io_boundary = 2;
1667 	bdev->split_on_optimal_io_boundary = true;
1668 
1669 	iovcnt = 1;
1670 	iovs[0].iov_base = NULL;
1671 	iovs[0].iov_len = 512 * 3;
1672 
1673 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
1674 	CU_ASSERT(rc == 0);
1675 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1676 	stub_complete_io(2);
1677 
1678 	/* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
1679 	alignment = 512;
1680 	bdev->required_alignment = spdk_u32log2(alignment);
1681 	bdev->optimal_io_boundary = 16;
1682 	bdev->split_on_optimal_io_boundary = true;
1683 
1684 	iovcnt = 1;
1685 	iovs[0].iov_base = NULL;
1686 	iovs[0].iov_len = 512 * 16;
1687 
1688 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
1689 	CU_ASSERT(rc == 0);
1690 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1691 	stub_complete_io(2);
1692 
1693 	/* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
1694 	alignment = 512;
1695 	bdev->required_alignment = spdk_u32log2(alignment);
1696 	bdev->optimal_io_boundary = 128;
1697 	bdev->split_on_optimal_io_boundary = true;
1698 
1699 	iovcnt = 1;
1700 	iovs[0].iov_base = buf + 16;
1701 	iovs[0].iov_len = 512 * 160;
1702 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
1703 	CU_ASSERT(rc == 0);
1704 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1705 	stub_complete_io(2);
1706 
1707 	/* 512 * 3 with 2 IO boundary */
1708 	alignment = 512;
1709 	bdev->required_alignment = spdk_u32log2(alignment);
1710 	bdev->optimal_io_boundary = 2;
1711 	bdev->split_on_optimal_io_boundary = true;
1712 
1713 	iovcnt = 2;
1714 	iovs[0].iov_base = buf + 16;
1715 	iovs[0].iov_len = 512;
1716 	iovs[1].iov_base = buf + 16 + 512 + 32;
1717 	iovs[1].iov_len = 1024;
1718 
1719 	rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
1720 	CU_ASSERT(rc == 0);
1721 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1722 	stub_complete_io(2);
1723 
1724 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
1725 	CU_ASSERT(rc == 0);
1726 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1727 	stub_complete_io(2);
1728 
1729 	/* 512 * 64 with 32 IO boundary */
1730 	bdev->optimal_io_boundary = 32;
1731 	iovcnt = 2;
1732 	iovs[0].iov_base = buf + 16;
1733 	iovs[0].iov_len = 16384;
1734 	iovs[1].iov_base = buf + 16 + 16384 + 32;
1735 	iovs[1].iov_len = 16384;
1736 
1737 	rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
1738 	CU_ASSERT(rc == 0);
1739 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1740 	stub_complete_io(3);
1741 
1742 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
1743 	CU_ASSERT(rc == 0);
1744 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1745 	stub_complete_io(3);
1746 
1747 	/* 512 * 160 with 32 IO boundary */
1748 	iovcnt = 1;
1749 	iovs[0].iov_base = buf + 16;
1750 	iovs[0].iov_len = 16384 + 65536;
1751 
1752 	rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
1753 	CU_ASSERT(rc == 0);
1754 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
1755 	stub_complete_io(6);
1756 
1757 	spdk_put_io_channel(io_ch);
1758 	spdk_bdev_close(desc);
1759 	free_bdev(bdev);
1760 	spdk_bdev_finish(bdev_fini_cb, NULL);
1761 	poll_threads();
1762 
1763 	free(buf);
1764 }
1765 
1766 static void
1767 histogram_status_cb(void *cb_arg, int status)
1768 {
1769 	g_status = status;
1770 }
1771 
1772 static void
1773 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1774 {
1775 	g_status = status;
1776 	g_histogram = histogram;
1777 }
1778 
1779 static void
1780 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1781 		   uint64_t total, uint64_t so_far)
1782 {
1783 	g_count += count;
1784 }
1785 
1786 static void
1787 bdev_histograms(void)
1788 {
1789 	struct spdk_bdev *bdev;
1790 	struct spdk_bdev_desc *desc = NULL;
1791 	struct spdk_io_channel *ch;
1792 	struct spdk_histogram_data *histogram;
1793 	uint8_t buf[4096];
1794 	int rc;
1795 
1796 	spdk_bdev_initialize(bdev_init_cb, NULL);
1797 
1798 	bdev = allocate_bdev("bdev");
1799 
1800 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1801 	CU_ASSERT(rc == 0);
1802 	CU_ASSERT(desc != NULL);
1803 
1804 	ch = spdk_bdev_get_io_channel(desc);
1805 	CU_ASSERT(ch != NULL);
1806 
1807 	/* Enable histogram */
1808 	g_status = -1;
1809 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1810 	poll_threads();
1811 	CU_ASSERT(g_status == 0);
1812 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1813 
1814 	/* Allocate histogram */
1815 	histogram = spdk_histogram_data_alloc();
1816 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1817 
1818 	/* Check if histogram is zeroed */
1819 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1820 	poll_threads();
1821 	CU_ASSERT(g_status == 0);
1822 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1823 
1824 	g_count = 0;
1825 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1826 
1827 	CU_ASSERT(g_count == 0);
1828 
1829 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1830 	CU_ASSERT(rc == 0);
1831 
1832 	spdk_delay_us(10);
1833 	stub_complete_io(1);
1834 	poll_threads();
1835 
1836 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1837 	CU_ASSERT(rc == 0);
1838 
1839 	spdk_delay_us(10);
1840 	stub_complete_io(1);
1841 	poll_threads();
1842 
1843 	/* Check if histogram gathered data from all I/O channels */
1844 	g_histogram = NULL;
1845 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1846 	poll_threads();
1847 	CU_ASSERT(g_status == 0);
1848 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1849 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1850 
1851 	g_count = 0;
1852 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1853 	CU_ASSERT(g_count == 2);
1854 
1855 	/* Disable histogram */
1856 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1857 	poll_threads();
1858 	CU_ASSERT(g_status == 0);
1859 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1860 
1861 	/* Try to run histogram commands on disabled bdev */
1862 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1863 	poll_threads();
1864 	CU_ASSERT(g_status == -EFAULT);
1865 
1866 	spdk_histogram_data_free(histogram);
1867 	spdk_put_io_channel(ch);
1868 	spdk_bdev_close(desc);
1869 	free_bdev(bdev);
1870 	spdk_bdev_finish(bdev_fini_cb, NULL);
1871 	poll_threads();
1872 }
1873 
1874 static void
1875 bdev_write_zeroes(void)
1876 {
1877 	struct spdk_bdev *bdev;
1878 	struct spdk_bdev_desc *desc = NULL;
1879 	struct spdk_io_channel *ioch;
1880 	struct ut_expected_io *expected_io;
1881 	uint64_t offset, num_io_blocks, num_blocks;
1882 	uint32_t num_completed, num_requests;
1883 	int rc;
1884 
1885 	spdk_bdev_initialize(bdev_init_cb, NULL);
1886 	bdev = allocate_bdev("bdev");
1887 
1888 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1889 	CU_ASSERT_EQUAL(rc, 0);
1890 	SPDK_CU_ASSERT_FATAL(desc != NULL);
1891 	ioch = spdk_bdev_get_io_channel(desc);
1892 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
1893 
1894 	fn_table.submit_request = stub_submit_request;
1895 	g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1896 
1897 	/* First test that if the bdev supports write_zeroes, the request won't be split */
1898 	bdev->md_len = 0;
1899 	bdev->blocklen = 4096;
1900 	num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
1901 
1902 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
1903 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1904 	rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
1905 	CU_ASSERT_EQUAL(rc, 0);
1906 	num_completed = stub_complete_io(1);
1907 	CU_ASSERT_EQUAL(num_completed, 1);
1908 
1909 	/* Check that if write zeroes is not supported it'll be replaced by regular writes */
1910 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
1911 	num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
1912 	num_requests = 2;
1913 	num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
1914 
1915 	for (offset = 0; offset < num_requests; ++offset) {
1916 		expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
1917 						   offset * num_io_blocks, num_io_blocks, 0);
1918 		TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1919 	}
1920 
1921 	rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
1922 	CU_ASSERT_EQUAL(rc, 0);
1923 	num_completed = stub_complete_io(num_requests);
1924 	CU_ASSERT_EQUAL(num_completed, num_requests);
1925 
1926 	/* Check that the splitting is correct if bdev has interleaved metadata */
1927 	bdev->md_interleave = true;
1928 	bdev->md_len = 64;
1929 	bdev->blocklen = 4096 + 64;
1930 	num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
1931 
1932 	num_requests = offset = 0;
1933 	while (offset < num_blocks) {
1934 		num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
1935 		expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
1936 						   offset, num_io_blocks, 0);
1937 		TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1938 		offset += num_io_blocks;
1939 		num_requests++;
1940 	}
1941 
1942 	rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
1943 	CU_ASSERT_EQUAL(rc, 0);
1944 	num_completed = stub_complete_io(num_requests);
1945 	CU_ASSERT_EQUAL(num_completed, num_requests);
1946 	num_completed = stub_complete_io(num_requests);
1947 	assert(num_completed == 0);
1948 
1949 	/* Check the the same for separate metadata buffer */
1950 	bdev->md_interleave = false;
1951 	bdev->md_len = 64;
1952 	bdev->blocklen = 4096;
1953 
1954 	num_requests = offset = 0;
1955 	while (offset < num_blocks) {
1956 		num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
1957 		expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
1958 						   offset, num_io_blocks, 0);
1959 		expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
1960 		TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1961 		offset += num_io_blocks;
1962 		num_requests++;
1963 	}
1964 
1965 	rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
1966 	CU_ASSERT_EQUAL(rc, 0);
1967 	num_completed = stub_complete_io(num_requests);
1968 	CU_ASSERT_EQUAL(num_completed, num_requests);
1969 
1970 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
1971 	spdk_put_io_channel(ioch);
1972 	spdk_bdev_close(desc);
1973 	free_bdev(bdev);
1974 	spdk_bdev_finish(bdev_fini_cb, NULL);
1975 	poll_threads();
1976 }
1977 
1978 int
1979 main(int argc, char **argv)
1980 {
1981 	CU_pSuite		suite = NULL;
1982 	unsigned int		num_failures;
1983 
1984 	if (CU_initialize_registry() != CUE_SUCCESS) {
1985 		return CU_get_error();
1986 	}
1987 
1988 	suite = CU_add_suite("bdev", null_init, null_clean);
1989 	if (suite == NULL) {
1990 		CU_cleanup_registry();
1991 		return CU_get_error();
1992 	}
1993 
1994 	if (
1995 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1996 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1997 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1998 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1999 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
2000 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
2001 		CU_add_test(suite, "bdev_io_types", bdev_io_types_test) == NULL ||
2002 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
2003 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
2004 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
2005 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
2006 		CU_add_test(suite, "bdev_io_alignment_with_boundary", bdev_io_alignment_with_boundary) == NULL ||
2007 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
2008 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL ||
2009 		CU_add_test(suite, "bdev_write_zeroes", bdev_write_zeroes) == NULL
2010 	) {
2011 		CU_cleanup_registry();
2012 		return CU_get_error();
2013 	}
2014 
2015 	allocate_threads(1);
2016 	set_thread(0);
2017 
2018 	CU_basic_set_mode(CU_BRM_VERBOSE);
2019 	CU_basic_run_tests();
2020 	num_failures = CU_get_number_of_failures();
2021 	CU_cleanup_registry();
2022 
2023 	free_threads();
2024 
2025 	return num_failures;
2026 }
2027