xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision ea002f5068f504b79a2f4a920655688d7bb15451)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_is_ptr, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 
62 void
63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
64 			 int *sc, int *sk, int *asc, int *ascq)
65 {
66 }
67 
68 static int
69 null_init(void)
70 {
71 	return 0;
72 }
73 
74 static int
75 null_clean(void)
76 {
77 	return 0;
78 }
79 
80 static int
81 stub_destruct(void *ctx)
82 {
83 	return 0;
84 }
85 
86 struct ut_expected_io {
87 	uint8_t				type;
88 	uint64_t			offset;
89 	uint64_t			length;
90 	int				iovcnt;
91 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
92 	TAILQ_ENTRY(ut_expected_io)	link;
93 };
94 
95 struct bdev_ut_channel {
96 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
97 	uint32_t			outstanding_io_count;
98 	TAILQ_HEAD(, ut_expected_io)	expected_io;
99 };
100 
101 static bool g_io_done;
102 static struct spdk_bdev_io *g_bdev_io;
103 static enum spdk_bdev_io_status g_io_status;
104 static uint32_t g_bdev_ut_io_device;
105 static struct bdev_ut_channel *g_bdev_ut_channel;
106 
107 static struct ut_expected_io *
108 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
109 {
110 	struct ut_expected_io *expected_io;
111 
112 	expected_io = calloc(1, sizeof(*expected_io));
113 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
114 
115 	expected_io->type = type;
116 	expected_io->offset = offset;
117 	expected_io->length = length;
118 	expected_io->iovcnt = iovcnt;
119 
120 	return expected_io;
121 }
122 
123 static void
124 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
125 {
126 	expected_io->iov[pos].iov_base = base;
127 	expected_io->iov[pos].iov_len = len;
128 }
129 
130 static void
131 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
132 {
133 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
134 	struct ut_expected_io *expected_io;
135 	struct iovec *iov, *expected_iov;
136 	int i;
137 
138 	g_bdev_io = bdev_io;
139 
140 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
141 	ch->outstanding_io_count++;
142 
143 	expected_io = TAILQ_FIRST(&ch->expected_io);
144 	if (expected_io == NULL) {
145 		return;
146 	}
147 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
148 
149 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
150 		CU_ASSERT(bdev_io->type == expected_io->type);
151 	}
152 
153 	if (expected_io->length == 0) {
154 		free(expected_io);
155 		return;
156 	}
157 
158 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
159 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
160 
161 	if (expected_io->iovcnt == 0) {
162 		free(expected_io);
163 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
164 		return;
165 	}
166 
167 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
168 	for (i = 0; i < expected_io->iovcnt; i++) {
169 		iov = &bdev_io->u.bdev.iovs[i];
170 		expected_iov = &expected_io->iov[i];
171 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
172 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
173 	}
174 
175 	free(expected_io);
176 }
177 
178 static void
179 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
180 {
181 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request,
182 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
183 }
184 
185 static uint32_t
186 stub_complete_io(uint32_t num_to_complete)
187 {
188 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
189 	struct spdk_bdev_io *bdev_io;
190 	uint32_t num_completed = 0;
191 
192 	while (num_completed < num_to_complete) {
193 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
194 			break;
195 		}
196 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
197 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
198 		ch->outstanding_io_count--;
199 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
200 		num_completed++;
201 	}
202 
203 	return num_completed;
204 }
205 
206 static struct spdk_io_channel *
207 bdev_ut_get_io_channel(void *ctx)
208 {
209 	return spdk_get_io_channel(&g_bdev_ut_io_device);
210 }
211 
212 static bool
213 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
214 {
215 	return true;
216 }
217 
218 static struct spdk_bdev_fn_table fn_table = {
219 	.destruct = stub_destruct,
220 	.submit_request = stub_submit_request,
221 	.get_io_channel = bdev_ut_get_io_channel,
222 	.io_type_supported = stub_io_type_supported,
223 };
224 
225 static int
226 bdev_ut_create_ch(void *io_device, void *ctx_buf)
227 {
228 	struct bdev_ut_channel *ch = ctx_buf;
229 
230 	CU_ASSERT(g_bdev_ut_channel == NULL);
231 	g_bdev_ut_channel = ch;
232 
233 	TAILQ_INIT(&ch->outstanding_io);
234 	ch->outstanding_io_count = 0;
235 	TAILQ_INIT(&ch->expected_io);
236 	return 0;
237 }
238 
239 static void
240 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
241 {
242 	CU_ASSERT(g_bdev_ut_channel != NULL);
243 	g_bdev_ut_channel = NULL;
244 }
245 
246 static int
247 bdev_ut_module_init(void)
248 {
249 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
250 				sizeof(struct bdev_ut_channel), NULL);
251 	return 0;
252 }
253 
254 static void
255 bdev_ut_module_fini(void)
256 {
257 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
258 }
259 
260 struct spdk_bdev_module bdev_ut_if = {
261 	.name = "bdev_ut",
262 	.module_init = bdev_ut_module_init,
263 	.module_fini = bdev_ut_module_fini,
264 };
265 
266 static void vbdev_ut_examine(struct spdk_bdev *bdev);
267 
268 static int
269 vbdev_ut_module_init(void)
270 {
271 	return 0;
272 }
273 
274 static void
275 vbdev_ut_module_fini(void)
276 {
277 }
278 
279 struct spdk_bdev_module vbdev_ut_if = {
280 	.name = "vbdev_ut",
281 	.module_init = vbdev_ut_module_init,
282 	.module_fini = vbdev_ut_module_fini,
283 	.examine_config = vbdev_ut_examine,
284 };
285 
286 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
287 SPDK_BDEV_MODULE_REGISTER(&vbdev_ut_if)
288 
289 static void
290 vbdev_ut_examine(struct spdk_bdev *bdev)
291 {
292 	spdk_bdev_module_examine_done(&vbdev_ut_if);
293 }
294 
295 static struct spdk_bdev *
296 allocate_bdev(char *name)
297 {
298 	struct spdk_bdev *bdev;
299 	int rc;
300 
301 	bdev = calloc(1, sizeof(*bdev));
302 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
303 
304 	bdev->name = name;
305 	bdev->fn_table = &fn_table;
306 	bdev->module = &bdev_ut_if;
307 	bdev->blockcnt = 1024;
308 	bdev->blocklen = 512;
309 
310 	rc = spdk_bdev_register(bdev);
311 	CU_ASSERT(rc == 0);
312 
313 	return bdev;
314 }
315 
316 static struct spdk_bdev *
317 allocate_vbdev(char *name, struct spdk_bdev *base1, struct spdk_bdev *base2)
318 {
319 	struct spdk_bdev *bdev;
320 	struct spdk_bdev *array[2];
321 	int rc;
322 
323 	bdev = calloc(1, sizeof(*bdev));
324 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
325 
326 	bdev->name = name;
327 	bdev->fn_table = &fn_table;
328 	bdev->module = &vbdev_ut_if;
329 
330 	/* vbdev must have at least one base bdev */
331 	CU_ASSERT(base1 != NULL);
332 
333 	array[0] = base1;
334 	array[1] = base2;
335 
336 	rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2);
337 	CU_ASSERT(rc == 0);
338 
339 	return bdev;
340 }
341 
342 static void
343 free_bdev(struct spdk_bdev *bdev)
344 {
345 	spdk_bdev_unregister(bdev, NULL, NULL);
346 	poll_threads();
347 	memset(bdev, 0xFF, sizeof(*bdev));
348 	free(bdev);
349 }
350 
351 static void
352 free_vbdev(struct spdk_bdev *bdev)
353 {
354 	spdk_bdev_unregister(bdev, NULL, NULL);
355 	poll_threads();
356 	memset(bdev, 0xFF, sizeof(*bdev));
357 	free(bdev);
358 }
359 
360 static void
361 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
362 {
363 	const char *bdev_name;
364 
365 	CU_ASSERT(bdev != NULL);
366 	CU_ASSERT(rc == 0);
367 	bdev_name = spdk_bdev_get_name(bdev);
368 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
369 
370 	free(stat);
371 	free_bdev(bdev);
372 
373 	*(bool *)cb_arg = true;
374 }
375 
376 static void
377 get_device_stat_test(void)
378 {
379 	struct spdk_bdev *bdev;
380 	struct spdk_bdev_io_stat *stat;
381 	bool done;
382 
383 	bdev = allocate_bdev("bdev0");
384 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
385 	if (stat == NULL) {
386 		free_bdev(bdev);
387 		return;
388 	}
389 
390 	done = false;
391 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
392 	while (!done) { poll_threads(); }
393 
394 
395 }
396 
397 static void
398 open_write_test(void)
399 {
400 	struct spdk_bdev *bdev[9];
401 	struct spdk_bdev_desc *desc[9] = {};
402 	int rc;
403 
404 	/*
405 	 * Create a tree of bdevs to test various open w/ write cases.
406 	 *
407 	 * bdev0 through bdev3 are physical block devices, such as NVMe
408 	 * namespaces or Ceph block devices.
409 	 *
410 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
411 	 * caching or RAID use cases.
412 	 *
413 	 * bdev5 through bdev7 are all virtual bdevs with the same base
414 	 * bdev (except bdev7). This models partitioning or logical volume
415 	 * use cases.
416 	 *
417 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
418 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
419 	 * models caching, RAID, partitioning or logical volumes use cases.
420 	 *
421 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
422 	 * base bdevs are themselves virtual bdevs.
423 	 *
424 	 *                bdev8
425 	 *                  |
426 	 *            +----------+
427 	 *            |          |
428 	 *          bdev4      bdev5   bdev6   bdev7
429 	 *            |          |       |       |
430 	 *        +---+---+      +---+   +   +---+---+
431 	 *        |       |           \  |  /         \
432 	 *      bdev0   bdev1          bdev2         bdev3
433 	 */
434 
435 	bdev[0] = allocate_bdev("bdev0");
436 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
437 	CU_ASSERT(rc == 0);
438 
439 	bdev[1] = allocate_bdev("bdev1");
440 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
441 	CU_ASSERT(rc == 0);
442 
443 	bdev[2] = allocate_bdev("bdev2");
444 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
445 	CU_ASSERT(rc == 0);
446 
447 	bdev[3] = allocate_bdev("bdev3");
448 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
449 	CU_ASSERT(rc == 0);
450 
451 	bdev[4] = allocate_vbdev("bdev4", bdev[0], bdev[1]);
452 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
453 	CU_ASSERT(rc == 0);
454 
455 	bdev[5] = allocate_vbdev("bdev5", bdev[2], NULL);
456 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
457 	CU_ASSERT(rc == 0);
458 
459 	bdev[6] = allocate_vbdev("bdev6", bdev[2], NULL);
460 
461 	bdev[7] = allocate_vbdev("bdev7", bdev[2], bdev[3]);
462 
463 	bdev[8] = allocate_vbdev("bdev8", bdev[4], bdev[5]);
464 
465 	/* Open bdev0 read-only.  This should succeed. */
466 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
467 	CU_ASSERT(rc == 0);
468 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
469 	spdk_bdev_close(desc[0]);
470 
471 	/*
472 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
473 	 * by a vbdev module.
474 	 */
475 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
476 	CU_ASSERT(rc == -EPERM);
477 
478 	/*
479 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
480 	 * by a vbdev module.
481 	 */
482 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
483 	CU_ASSERT(rc == -EPERM);
484 
485 	/* Open bdev4 read-only.  This should succeed. */
486 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
487 	CU_ASSERT(rc == 0);
488 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
489 	spdk_bdev_close(desc[4]);
490 
491 	/*
492 	 * Open bdev8 read/write.  This should succeed since it is a leaf
493 	 * bdev.
494 	 */
495 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
496 	CU_ASSERT(rc == 0);
497 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
498 	spdk_bdev_close(desc[8]);
499 
500 	/*
501 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
502 	 * by a vbdev module.
503 	 */
504 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
505 	CU_ASSERT(rc == -EPERM);
506 
507 	/* Open bdev4 read-only.  This should succeed. */
508 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
509 	CU_ASSERT(rc == 0);
510 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
511 	spdk_bdev_close(desc[5]);
512 
513 	free_vbdev(bdev[8]);
514 
515 	free_vbdev(bdev[5]);
516 	free_vbdev(bdev[6]);
517 	free_vbdev(bdev[7]);
518 
519 	free_vbdev(bdev[4]);
520 
521 	free_bdev(bdev[0]);
522 	free_bdev(bdev[1]);
523 	free_bdev(bdev[2]);
524 	free_bdev(bdev[3]);
525 }
526 
527 static void
528 bytes_to_blocks_test(void)
529 {
530 	struct spdk_bdev bdev;
531 	uint64_t offset_blocks, num_blocks;
532 
533 	memset(&bdev, 0, sizeof(bdev));
534 
535 	bdev.blocklen = 512;
536 
537 	/* All parameters valid */
538 	offset_blocks = 0;
539 	num_blocks = 0;
540 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
541 	CU_ASSERT(offset_blocks == 1);
542 	CU_ASSERT(num_blocks == 2);
543 
544 	/* Offset not a block multiple */
545 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
546 
547 	/* Length not a block multiple */
548 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
549 
550 	/* In case blocklen not the power of two */
551 	bdev.blocklen = 100;
552 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
553 	CU_ASSERT(offset_blocks == 1);
554 	CU_ASSERT(num_blocks == 2);
555 
556 	/* Offset not a block multiple */
557 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
558 
559 	/* Length not a block multiple */
560 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
561 }
562 
563 static void
564 num_blocks_test(void)
565 {
566 	struct spdk_bdev bdev;
567 	struct spdk_bdev_desc *desc = NULL;
568 	int rc;
569 
570 	memset(&bdev, 0, sizeof(bdev));
571 	bdev.name = "num_blocks";
572 	bdev.fn_table = &fn_table;
573 	bdev.module = &bdev_ut_if;
574 	spdk_bdev_register(&bdev);
575 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
576 
577 	/* Growing block number */
578 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
579 	/* Shrinking block number */
580 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
581 
582 	/* In case bdev opened */
583 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
584 	CU_ASSERT(rc == 0);
585 	SPDK_CU_ASSERT_FATAL(desc != NULL);
586 
587 	/* Growing block number */
588 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
589 	/* Shrinking block number */
590 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
591 
592 	spdk_bdev_close(desc);
593 	spdk_bdev_unregister(&bdev, NULL, NULL);
594 
595 	poll_threads();
596 }
597 
598 static void
599 io_valid_test(void)
600 {
601 	struct spdk_bdev bdev;
602 
603 	memset(&bdev, 0, sizeof(bdev));
604 
605 	bdev.blocklen = 512;
606 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
607 
608 	/* All parameters valid */
609 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
610 
611 	/* Last valid block */
612 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
613 
614 	/* Offset past end of bdev */
615 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
616 
617 	/* Offset + length past end of bdev */
618 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
619 
620 	/* Offset near end of uint64_t range (2^64 - 1) */
621 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
622 }
623 
624 static void
625 alias_add_del_test(void)
626 {
627 	struct spdk_bdev *bdev[3];
628 	int rc;
629 
630 	/* Creating and registering bdevs */
631 	bdev[0] = allocate_bdev("bdev0");
632 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
633 
634 	bdev[1] = allocate_bdev("bdev1");
635 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
636 
637 	bdev[2] = allocate_bdev("bdev2");
638 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
639 
640 	poll_threads();
641 
642 	/*
643 	 * Trying adding an alias identical to name.
644 	 * Alias is identical to name, so it can not be added to aliases list
645 	 */
646 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
647 	CU_ASSERT(rc == -EEXIST);
648 
649 	/*
650 	 * Trying to add empty alias,
651 	 * this one should fail
652 	 */
653 	rc = spdk_bdev_alias_add(bdev[0], NULL);
654 	CU_ASSERT(rc == -EINVAL);
655 
656 	/* Trying adding same alias to two different registered bdevs */
657 
658 	/* Alias is used first time, so this one should pass */
659 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
660 	CU_ASSERT(rc == 0);
661 
662 	/* Alias was added to another bdev, so this one should fail */
663 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
664 	CU_ASSERT(rc == -EEXIST);
665 
666 	/* Alias is used first time, so this one should pass */
667 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
668 	CU_ASSERT(rc == 0);
669 
670 	/* Trying removing an alias from registered bdevs */
671 
672 	/* Alias is not on a bdev aliases list, so this one should fail */
673 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
674 	CU_ASSERT(rc == -ENOENT);
675 
676 	/* Alias is present on a bdev aliases list, so this one should pass */
677 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
678 	CU_ASSERT(rc == 0);
679 
680 	/* Alias is present on a bdev aliases list, so this one should pass */
681 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
682 	CU_ASSERT(rc == 0);
683 
684 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
685 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
686 	CU_ASSERT(rc != 0);
687 
688 	/* Trying to del all alias from empty alias list */
689 	spdk_bdev_alias_del_all(bdev[2]);
690 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
691 
692 	/* Trying to del all alias from non-empty alias list */
693 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
694 	CU_ASSERT(rc == 0);
695 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
696 	CU_ASSERT(rc == 0);
697 	spdk_bdev_alias_del_all(bdev[2]);
698 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
699 
700 	/* Unregister and free bdevs */
701 	spdk_bdev_unregister(bdev[0], NULL, NULL);
702 	spdk_bdev_unregister(bdev[1], NULL, NULL);
703 	spdk_bdev_unregister(bdev[2], NULL, NULL);
704 
705 	poll_threads();
706 
707 	free(bdev[0]);
708 	free(bdev[1]);
709 	free(bdev[2]);
710 }
711 
712 static void
713 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
714 {
715 	g_io_done = true;
716 	g_io_status = bdev_io->internal.status;
717 	spdk_bdev_free_io(bdev_io);
718 }
719 
720 static void
721 bdev_init_cb(void *arg, int rc)
722 {
723 	CU_ASSERT(rc == 0);
724 }
725 
726 static void
727 bdev_fini_cb(void *arg)
728 {
729 }
730 
731 struct bdev_ut_io_wait_entry {
732 	struct spdk_bdev_io_wait_entry	entry;
733 	struct spdk_io_channel		*io_ch;
734 	struct spdk_bdev_desc		*desc;
735 	bool				submitted;
736 };
737 
738 static void
739 io_wait_cb(void *arg)
740 {
741 	struct bdev_ut_io_wait_entry *entry = arg;
742 	int rc;
743 
744 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
745 	CU_ASSERT(rc == 0);
746 	entry->submitted = true;
747 }
748 
749 static void
750 bdev_io_wait_test(void)
751 {
752 	struct spdk_bdev *bdev;
753 	struct spdk_bdev_desc *desc = NULL;
754 	struct spdk_io_channel *io_ch;
755 	struct spdk_bdev_opts bdev_opts = {
756 		.bdev_io_pool_size = 4,
757 		.bdev_io_cache_size = 2,
758 	};
759 	struct bdev_ut_io_wait_entry io_wait_entry;
760 	struct bdev_ut_io_wait_entry io_wait_entry2;
761 	int rc;
762 
763 	rc = spdk_bdev_set_opts(&bdev_opts);
764 	CU_ASSERT(rc == 0);
765 	spdk_bdev_initialize(bdev_init_cb, NULL);
766 	poll_threads();
767 
768 	bdev = allocate_bdev("bdev0");
769 
770 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
771 	CU_ASSERT(rc == 0);
772 	poll_threads();
773 	SPDK_CU_ASSERT_FATAL(desc != NULL);
774 	io_ch = spdk_bdev_get_io_channel(desc);
775 	CU_ASSERT(io_ch != NULL);
776 
777 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
778 	CU_ASSERT(rc == 0);
779 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
780 	CU_ASSERT(rc == 0);
781 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
782 	CU_ASSERT(rc == 0);
783 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
784 	CU_ASSERT(rc == 0);
785 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
786 
787 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
788 	CU_ASSERT(rc == -ENOMEM);
789 
790 	io_wait_entry.entry.bdev = bdev;
791 	io_wait_entry.entry.cb_fn = io_wait_cb;
792 	io_wait_entry.entry.cb_arg = &io_wait_entry;
793 	io_wait_entry.io_ch = io_ch;
794 	io_wait_entry.desc = desc;
795 	io_wait_entry.submitted = false;
796 	/* Cannot use the same io_wait_entry for two different calls. */
797 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
798 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
799 
800 	/* Queue two I/O waits. */
801 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
802 	CU_ASSERT(rc == 0);
803 	CU_ASSERT(io_wait_entry.submitted == false);
804 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
805 	CU_ASSERT(rc == 0);
806 	CU_ASSERT(io_wait_entry2.submitted == false);
807 
808 	stub_complete_io(1);
809 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
810 	CU_ASSERT(io_wait_entry.submitted == true);
811 	CU_ASSERT(io_wait_entry2.submitted == false);
812 
813 	stub_complete_io(1);
814 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
815 	CU_ASSERT(io_wait_entry2.submitted == true);
816 
817 	stub_complete_io(4);
818 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
819 
820 	spdk_put_io_channel(io_ch);
821 	spdk_bdev_close(desc);
822 	free_bdev(bdev);
823 	spdk_bdev_finish(bdev_fini_cb, NULL);
824 	poll_threads();
825 }
826 
827 static void
828 bdev_io_spans_boundary_test(void)
829 {
830 	struct spdk_bdev bdev;
831 	struct spdk_bdev_io bdev_io;
832 
833 	memset(&bdev, 0, sizeof(bdev));
834 
835 	bdev.optimal_io_boundary = 0;
836 	bdev_io.bdev = &bdev;
837 
838 	/* bdev has no optimal_io_boundary set - so this should return false. */
839 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
840 
841 	bdev.optimal_io_boundary = 32;
842 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
843 
844 	/* RESETs are not based on LBAs - so this should return false. */
845 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
846 
847 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
848 	bdev_io.u.bdev.offset_blocks = 0;
849 	bdev_io.u.bdev.num_blocks = 32;
850 
851 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
852 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
853 
854 	bdev_io.u.bdev.num_blocks = 33;
855 
856 	/* This I/O spans a boundary. */
857 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
858 }
859 
860 static void
861 bdev_io_split(void)
862 {
863 	struct spdk_bdev *bdev;
864 	struct spdk_bdev_desc *desc = NULL;
865 	struct spdk_io_channel *io_ch;
866 	struct spdk_bdev_opts bdev_opts = {
867 		.bdev_io_pool_size = 512,
868 		.bdev_io_cache_size = 64,
869 	};
870 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
871 	struct ut_expected_io *expected_io;
872 	uint64_t i;
873 	int rc;
874 
875 	rc = spdk_bdev_set_opts(&bdev_opts);
876 	CU_ASSERT(rc == 0);
877 	spdk_bdev_initialize(bdev_init_cb, NULL);
878 
879 	bdev = allocate_bdev("bdev0");
880 
881 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
882 	CU_ASSERT(rc == 0);
883 	SPDK_CU_ASSERT_FATAL(desc != NULL);
884 	io_ch = spdk_bdev_get_io_channel(desc);
885 	CU_ASSERT(io_ch != NULL);
886 
887 	bdev->optimal_io_boundary = 16;
888 	bdev->split_on_optimal_io_boundary = false;
889 
890 	g_io_done = false;
891 
892 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
893 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
894 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
895 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
896 
897 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
898 	CU_ASSERT(rc == 0);
899 	CU_ASSERT(g_io_done == false);
900 
901 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
902 	stub_complete_io(1);
903 	CU_ASSERT(g_io_done == true);
904 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
905 
906 	bdev->split_on_optimal_io_boundary = true;
907 
908 	/* Now test that a single-vector command is split correctly.
909 	 * Offset 14, length 8, payload 0xF000
910 	 *  Child - Offset 14, length 2, payload 0xF000
911 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
912 	 *
913 	 * Set up the expected values before calling spdk_bdev_read_blocks
914 	 */
915 	g_io_done = false;
916 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
917 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
918 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
919 
920 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
921 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
922 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
923 
924 	/* spdk_bdev_read_blocks will submit the first child immediately. */
925 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
926 	CU_ASSERT(rc == 0);
927 	CU_ASSERT(g_io_done == false);
928 
929 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
930 	stub_complete_io(2);
931 	CU_ASSERT(g_io_done == true);
932 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
933 
934 	/* Now set up a more complex, multi-vector command that needs to be split,
935 	 *  including splitting iovecs.
936 	 */
937 	iov[0].iov_base = (void *)0x10000;
938 	iov[0].iov_len = 512;
939 	iov[1].iov_base = (void *)0x20000;
940 	iov[1].iov_len = 20 * 512;
941 	iov[2].iov_base = (void *)0x30000;
942 	iov[2].iov_len = 11 * 512;
943 
944 	g_io_done = false;
945 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
946 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
947 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
948 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
949 
950 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
951 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
952 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
953 
954 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
955 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
956 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
957 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
958 
959 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
960 	CU_ASSERT(rc == 0);
961 	CU_ASSERT(g_io_done == false);
962 
963 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
964 	stub_complete_io(3);
965 	CU_ASSERT(g_io_done == true);
966 
967 	/* Test multi vector command that needs to be split by strip and then needs to be
968 	 * split further due to the capacity of child iovs.
969 	 */
970 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
971 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
972 		iov[i].iov_len = 512;
973 	}
974 
975 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
976 	g_io_done = false;
977 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
978 					   BDEV_IO_NUM_CHILD_IOV);
979 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
980 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
981 	}
982 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
983 
984 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
985 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
986 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
987 		ut_expected_io_set_iov(expected_io, i,
988 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
989 	}
990 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
991 
992 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
993 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
994 	CU_ASSERT(rc == 0);
995 	CU_ASSERT(g_io_done == false);
996 
997 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
998 	stub_complete_io(1);
999 	CU_ASSERT(g_io_done == false);
1000 
1001 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1002 	stub_complete_io(1);
1003 	CU_ASSERT(g_io_done == true);
1004 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1005 
1006 	/* Test multi vector command that needs to be split by strip and then needs to be
1007 	 * split further due to the capacity of child iovs, but fails to split. The cause
1008 	 * of failure of split is that the length of an iovec is not multiple of block size.
1009 	 */
1010 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1011 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1012 		iov[i].iov_len = 512;
1013 	}
1014 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1015 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1016 
1017 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1018 	g_io_done = false;
1019 	g_io_status = 0;
1020 
1021 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1022 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1023 	CU_ASSERT(rc == 0);
1024 	CU_ASSERT(g_io_done == true);
1025 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1026 
1027 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1028 	 * split, so test that.
1029 	 */
1030 	bdev->optimal_io_boundary = 15;
1031 	g_io_done = false;
1032 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1033 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1034 
1035 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1036 	CU_ASSERT(rc == 0);
1037 	CU_ASSERT(g_io_done == false);
1038 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1039 	stub_complete_io(1);
1040 	CU_ASSERT(g_io_done == true);
1041 
1042 	/* Test an UNMAP.  This should also not be split. */
1043 	bdev->optimal_io_boundary = 16;
1044 	g_io_done = false;
1045 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1046 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1047 
1048 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1049 	CU_ASSERT(rc == 0);
1050 	CU_ASSERT(g_io_done == false);
1051 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1052 	stub_complete_io(1);
1053 	CU_ASSERT(g_io_done == true);
1054 
1055 	/* Test a FLUSH.  This should also not be split. */
1056 	bdev->optimal_io_boundary = 16;
1057 	g_io_done = false;
1058 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1059 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1060 
1061 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1062 	CU_ASSERT(rc == 0);
1063 	CU_ASSERT(g_io_done == false);
1064 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1065 	stub_complete_io(1);
1066 	CU_ASSERT(g_io_done == true);
1067 
1068 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1069 
1070 	spdk_put_io_channel(io_ch);
1071 	spdk_bdev_close(desc);
1072 	free_bdev(bdev);
1073 	spdk_bdev_finish(bdev_fini_cb, NULL);
1074 	poll_threads();
1075 }
1076 
1077 static void
1078 bdev_io_split_with_io_wait(void)
1079 {
1080 	struct spdk_bdev *bdev;
1081 	struct spdk_bdev_desc *desc;
1082 	struct spdk_io_channel *io_ch;
1083 	struct spdk_bdev_channel *channel;
1084 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1085 	struct spdk_bdev_opts bdev_opts = {
1086 		.bdev_io_pool_size = 2,
1087 		.bdev_io_cache_size = 1,
1088 	};
1089 	struct iovec iov[3];
1090 	struct ut_expected_io *expected_io;
1091 	int rc;
1092 
1093 	rc = spdk_bdev_set_opts(&bdev_opts);
1094 	CU_ASSERT(rc == 0);
1095 	spdk_bdev_initialize(bdev_init_cb, NULL);
1096 
1097 	bdev = allocate_bdev("bdev0");
1098 
1099 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1100 	CU_ASSERT(rc == 0);
1101 	CU_ASSERT(desc != NULL);
1102 	io_ch = spdk_bdev_get_io_channel(desc);
1103 	CU_ASSERT(io_ch != NULL);
1104 	channel = spdk_io_channel_get_ctx(io_ch);
1105 	mgmt_ch = channel->shared_resource->mgmt_ch;
1106 
1107 	bdev->optimal_io_boundary = 16;
1108 	bdev->split_on_optimal_io_boundary = true;
1109 
1110 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1111 	CU_ASSERT(rc == 0);
1112 
1113 	/* Now test that a single-vector command is split correctly.
1114 	 * Offset 14, length 8, payload 0xF000
1115 	 *  Child - Offset 14, length 2, payload 0xF000
1116 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1117 	 *
1118 	 * Set up the expected values before calling spdk_bdev_read_blocks
1119 	 */
1120 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1121 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1122 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1123 
1124 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1125 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1126 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1127 
1128 	/* The following children will be submitted sequentially due to the capacity of
1129 	 * spdk_bdev_io.
1130 	 */
1131 
1132 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1133 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1134 	CU_ASSERT(rc == 0);
1135 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1136 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1137 
1138 	/* Completing the first read I/O will submit the first child */
1139 	stub_complete_io(1);
1140 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1141 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1142 
1143 	/* Completing the first child will submit the second child */
1144 	stub_complete_io(1);
1145 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1146 
1147 	/* Complete the second child I/O.  This should result in our callback getting
1148 	 * invoked since the parent I/O is now complete.
1149 	 */
1150 	stub_complete_io(1);
1151 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1152 
1153 	/* Now set up a more complex, multi-vector command that needs to be split,
1154 	 *  including splitting iovecs.
1155 	 */
1156 	iov[0].iov_base = (void *)0x10000;
1157 	iov[0].iov_len = 512;
1158 	iov[1].iov_base = (void *)0x20000;
1159 	iov[1].iov_len = 20 * 512;
1160 	iov[2].iov_base = (void *)0x30000;
1161 	iov[2].iov_len = 11 * 512;
1162 
1163 	g_io_done = false;
1164 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1165 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1166 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1167 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1168 
1169 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1170 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1171 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1172 
1173 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1174 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1175 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1176 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1177 
1178 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1179 	CU_ASSERT(rc == 0);
1180 	CU_ASSERT(g_io_done == false);
1181 
1182 	/* The following children will be submitted sequentially due to the capacity of
1183 	 * spdk_bdev_io.
1184 	 */
1185 
1186 	/* Completing the first child will submit the second child */
1187 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1188 	stub_complete_io(1);
1189 	CU_ASSERT(g_io_done == false);
1190 
1191 	/* Completing the second child will submit the third child */
1192 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1193 	stub_complete_io(1);
1194 	CU_ASSERT(g_io_done == false);
1195 
1196 	/* Completing the third child will result in our callback getting invoked
1197 	 * since the parent I/O is now complete.
1198 	 */
1199 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1200 	stub_complete_io(1);
1201 	CU_ASSERT(g_io_done == true);
1202 
1203 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1204 
1205 	spdk_put_io_channel(io_ch);
1206 	spdk_bdev_close(desc);
1207 	free_bdev(bdev);
1208 	spdk_bdev_finish(bdev_fini_cb, NULL);
1209 	poll_threads();
1210 }
1211 
1212 static void
1213 bdev_io_alignment(void)
1214 {
1215 	struct spdk_bdev *bdev;
1216 	struct spdk_bdev_desc *desc;
1217 	struct spdk_io_channel *io_ch;
1218 	struct spdk_bdev_opts bdev_opts = {
1219 		.bdev_io_pool_size = 20,
1220 		.bdev_io_cache_size = 2,
1221 	};
1222 	int rc;
1223 	void *buf;
1224 	struct iovec iovs[2];
1225 	int iovcnt;
1226 	uint64_t alignment;
1227 
1228 	rc = spdk_bdev_set_opts(&bdev_opts);
1229 	CU_ASSERT(rc == 0);
1230 	spdk_bdev_initialize(bdev_init_cb, NULL);
1231 
1232 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1233 	bdev = allocate_bdev("bdev0");
1234 
1235 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1236 	CU_ASSERT(rc == 0);
1237 	CU_ASSERT(desc != NULL);
1238 	io_ch = spdk_bdev_get_io_channel(desc);
1239 	CU_ASSERT(io_ch != NULL);
1240 
1241 	/* Create aligned buffer */
1242 	rc = posix_memalign(&buf, 4096, 8192);
1243 	SPDK_CU_ASSERT_FATAL(rc == 0);
1244 
1245 	/* Pass aligned single buffer with no alignment required */
1246 	alignment = 1;
1247 	bdev->required_alignment = spdk_u32log2(alignment);
1248 
1249 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1250 	CU_ASSERT(rc == 0);
1251 	stub_complete_io(1);
1252 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1253 				    alignment));
1254 
1255 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1256 	CU_ASSERT(rc == 0);
1257 	stub_complete_io(1);
1258 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1259 				    alignment));
1260 
1261 	/* Pass unaligned single buffer with no alignment required */
1262 	alignment = 1;
1263 	bdev->required_alignment = spdk_u32log2(alignment);
1264 
1265 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1266 	CU_ASSERT(rc == 0);
1267 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1268 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1269 	stub_complete_io(1);
1270 
1271 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1272 	CU_ASSERT(rc == 0);
1273 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1274 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1275 	stub_complete_io(1);
1276 
1277 	/* Pass unaligned single buffer with 512 alignment required */
1278 	alignment = 512;
1279 	bdev->required_alignment = spdk_u32log2(alignment);
1280 
1281 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1282 	CU_ASSERT(rc == 0);
1283 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1284 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1285 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1286 				    alignment));
1287 	stub_complete_io(1);
1288 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1289 
1290 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1291 	CU_ASSERT(rc == 0);
1292 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1293 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1294 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1295 				    alignment));
1296 	stub_complete_io(1);
1297 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1298 
1299 	/* Pass unaligned single buffer with 4096 alignment required */
1300 	alignment = 4096;
1301 	bdev->required_alignment = spdk_u32log2(alignment);
1302 
1303 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1304 	CU_ASSERT(rc == 0);
1305 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1306 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1307 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1308 				    alignment));
1309 	stub_complete_io(1);
1310 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1311 
1312 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1313 	CU_ASSERT(rc == 0);
1314 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1315 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1316 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1317 				    alignment));
1318 	stub_complete_io(1);
1319 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1320 
1321 	/* Pass aligned iovs with no alignment required */
1322 	alignment = 1;
1323 	bdev->required_alignment = spdk_u32log2(alignment);
1324 
1325 	iovcnt = 1;
1326 	iovs[0].iov_base = buf;
1327 	iovs[0].iov_len = 512;
1328 
1329 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1330 	CU_ASSERT(rc == 0);
1331 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1332 	stub_complete_io(1);
1333 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1334 
1335 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1336 	CU_ASSERT(rc == 0);
1337 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1338 	stub_complete_io(1);
1339 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1340 
1341 	/* Pass unaligned iovs with no alignment required */
1342 	alignment = 1;
1343 	bdev->required_alignment = spdk_u32log2(alignment);
1344 
1345 	iovcnt = 2;
1346 	iovs[0].iov_base = buf + 16;
1347 	iovs[0].iov_len = 256;
1348 	iovs[1].iov_base = buf + 16 + 256 + 32;
1349 	iovs[1].iov_len = 256;
1350 
1351 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1352 	CU_ASSERT(rc == 0);
1353 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1354 	stub_complete_io(1);
1355 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1356 
1357 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1358 	CU_ASSERT(rc == 0);
1359 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1360 	stub_complete_io(1);
1361 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1362 
1363 	/* Pass unaligned iov with 2048 alignment required */
1364 	alignment = 2048;
1365 	bdev->required_alignment = spdk_u32log2(alignment);
1366 
1367 	iovcnt = 2;
1368 	iovs[0].iov_base = buf + 16;
1369 	iovs[0].iov_len = 256;
1370 	iovs[1].iov_base = buf + 16 + 256 + 32;
1371 	iovs[1].iov_len = 256;
1372 
1373 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1374 	CU_ASSERT(rc == 0);
1375 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1376 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1377 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1378 				    alignment));
1379 	stub_complete_io(1);
1380 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1381 
1382 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1383 	CU_ASSERT(rc == 0);
1384 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1385 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1386 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1387 				    alignment));
1388 	stub_complete_io(1);
1389 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1390 
1391 	/* Pass iov without allocated buffer without alignment required */
1392 	alignment = 1;
1393 	bdev->required_alignment = spdk_u32log2(alignment);
1394 
1395 	iovcnt = 1;
1396 	iovs[0].iov_base = NULL;
1397 	iovs[0].iov_len = 0;
1398 
1399 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1400 	CU_ASSERT(rc == 0);
1401 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1402 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1403 				    alignment));
1404 	stub_complete_io(1);
1405 
1406 	/* Pass iov without allocated buffer with 1024 alignment required */
1407 	alignment = 1024;
1408 	bdev->required_alignment = spdk_u32log2(alignment);
1409 
1410 	iovcnt = 1;
1411 	iovs[0].iov_base = NULL;
1412 	iovs[0].iov_len = 0;
1413 
1414 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1415 	CU_ASSERT(rc == 0);
1416 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1417 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1418 				    alignment));
1419 	stub_complete_io(1);
1420 
1421 	spdk_put_io_channel(io_ch);
1422 	spdk_bdev_close(desc);
1423 	free_bdev(bdev);
1424 	spdk_bdev_finish(bdev_fini_cb, NULL);
1425 	poll_threads();
1426 
1427 	free(buf);
1428 }
1429 
1430 int
1431 main(int argc, char **argv)
1432 {
1433 	CU_pSuite		suite = NULL;
1434 	unsigned int		num_failures;
1435 
1436 	if (CU_initialize_registry() != CUE_SUCCESS) {
1437 		return CU_get_error();
1438 	}
1439 
1440 	suite = CU_add_suite("bdev", null_init, null_clean);
1441 	if (suite == NULL) {
1442 		CU_cleanup_registry();
1443 		return CU_get_error();
1444 	}
1445 
1446 	if (
1447 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1448 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1449 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1450 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1451 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1452 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1453 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1454 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1455 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1456 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1457 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL
1458 	) {
1459 		CU_cleanup_registry();
1460 		return CU_get_error();
1461 	}
1462 
1463 	allocate_threads(1);
1464 	set_thread(0);
1465 
1466 	CU_basic_set_mode(CU_BRM_VERBOSE);
1467 	CU_basic_run_tests();
1468 	num_failures = CU_get_number_of_failures();
1469 	CU_cleanup_registry();
1470 
1471 	free_threads();
1472 
1473 	return num_failures;
1474 }
1475