xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision c10f8e160e42a2a642e8a593b60c2f84561d5eba)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_is_ptr, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 
62 static void
63 _bdev_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
64 {
65 	fn(ctx);
66 }
67 
68 void
69 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
70 			 int *sc, int *sk, int *asc, int *ascq)
71 {
72 }
73 
74 static int
75 null_init(void)
76 {
77 	return 0;
78 }
79 
80 static int
81 null_clean(void)
82 {
83 	return 0;
84 }
85 
86 static int
87 stub_destruct(void *ctx)
88 {
89 	return 0;
90 }
91 
92 struct ut_expected_io {
93 	uint8_t				type;
94 	uint64_t			offset;
95 	uint64_t			length;
96 	int				iovcnt;
97 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
98 	TAILQ_ENTRY(ut_expected_io)	link;
99 };
100 
101 struct bdev_ut_channel {
102 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
103 	uint32_t			outstanding_io_count;
104 	TAILQ_HEAD(, ut_expected_io)	expected_io;
105 };
106 
107 static bool g_io_done;
108 static enum spdk_bdev_io_status g_io_status;
109 static uint32_t g_bdev_ut_io_device;
110 static struct bdev_ut_channel *g_bdev_ut_channel;
111 
112 static struct ut_expected_io *
113 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
114 {
115 	struct ut_expected_io *expected_io;
116 
117 	expected_io = calloc(1, sizeof(*expected_io));
118 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
119 
120 	expected_io->type = type;
121 	expected_io->offset = offset;
122 	expected_io->length = length;
123 	expected_io->iovcnt = iovcnt;
124 
125 	return expected_io;
126 }
127 
128 static void
129 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
130 {
131 	expected_io->iov[pos].iov_base = base;
132 	expected_io->iov[pos].iov_len = len;
133 }
134 
135 static void
136 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
137 {
138 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
139 	struct ut_expected_io *expected_io;
140 	struct iovec *iov, *expected_iov;
141 	int i;
142 
143 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
144 	ch->outstanding_io_count++;
145 
146 	expected_io = TAILQ_FIRST(&ch->expected_io);
147 	if (expected_io == NULL) {
148 		return;
149 	}
150 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
151 
152 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
153 		CU_ASSERT(bdev_io->type == expected_io->type);
154 	}
155 
156 	if (expected_io->length == 0) {
157 		free(expected_io);
158 		return;
159 	}
160 
161 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
162 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
163 
164 	if (expected_io->iovcnt == 0) {
165 		free(expected_io);
166 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
167 		return;
168 	}
169 
170 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
171 	for (i = 0; i < expected_io->iovcnt; i++) {
172 		iov = &bdev_io->u.bdev.iovs[i];
173 		expected_iov = &expected_io->iov[i];
174 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
175 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
176 	}
177 
178 	free(expected_io);
179 }
180 
181 static uint32_t
182 stub_complete_io(uint32_t num_to_complete)
183 {
184 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
185 	struct spdk_bdev_io *bdev_io;
186 	uint32_t num_completed = 0;
187 
188 	while (num_completed < num_to_complete) {
189 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
190 			break;
191 		}
192 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
193 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
194 		ch->outstanding_io_count--;
195 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
196 		num_completed++;
197 	}
198 
199 	return num_completed;
200 }
201 
202 static struct spdk_io_channel *
203 bdev_ut_get_io_channel(void *ctx)
204 {
205 	return spdk_get_io_channel(&g_bdev_ut_io_device);
206 }
207 
208 static bool
209 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
210 {
211 	return true;
212 }
213 
214 static struct spdk_bdev_fn_table fn_table = {
215 	.destruct = stub_destruct,
216 	.submit_request = stub_submit_request,
217 	.get_io_channel = bdev_ut_get_io_channel,
218 	.io_type_supported = stub_io_type_supported,
219 };
220 
221 static int
222 bdev_ut_create_ch(void *io_device, void *ctx_buf)
223 {
224 	struct bdev_ut_channel *ch = ctx_buf;
225 
226 	CU_ASSERT(g_bdev_ut_channel == NULL);
227 	g_bdev_ut_channel = ch;
228 
229 	TAILQ_INIT(&ch->outstanding_io);
230 	ch->outstanding_io_count = 0;
231 	TAILQ_INIT(&ch->expected_io);
232 	return 0;
233 }
234 
235 static void
236 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
237 {
238 	CU_ASSERT(g_bdev_ut_channel != NULL);
239 	g_bdev_ut_channel = NULL;
240 }
241 
242 static int
243 bdev_ut_module_init(void)
244 {
245 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
246 				sizeof(struct bdev_ut_channel), NULL);
247 	return 0;
248 }
249 
250 static void
251 bdev_ut_module_fini(void)
252 {
253 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
254 }
255 
256 struct spdk_bdev_module bdev_ut_if = {
257 	.name = "bdev_ut",
258 	.module_init = bdev_ut_module_init,
259 	.module_fini = bdev_ut_module_fini,
260 };
261 
262 static void vbdev_ut_examine(struct spdk_bdev *bdev);
263 
264 static int
265 vbdev_ut_module_init(void)
266 {
267 	return 0;
268 }
269 
270 static void
271 vbdev_ut_module_fini(void)
272 {
273 }
274 
275 struct spdk_bdev_module vbdev_ut_if = {
276 	.name = "vbdev_ut",
277 	.module_init = vbdev_ut_module_init,
278 	.module_fini = vbdev_ut_module_fini,
279 	.examine_config = vbdev_ut_examine,
280 };
281 
282 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
283 SPDK_BDEV_MODULE_REGISTER(&vbdev_ut_if)
284 
285 static void
286 vbdev_ut_examine(struct spdk_bdev *bdev)
287 {
288 	spdk_bdev_module_examine_done(&vbdev_ut_if);
289 }
290 
291 static struct spdk_bdev *
292 allocate_bdev(char *name)
293 {
294 	struct spdk_bdev *bdev;
295 	int rc;
296 
297 	bdev = calloc(1, sizeof(*bdev));
298 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
299 
300 	bdev->name = name;
301 	bdev->fn_table = &fn_table;
302 	bdev->module = &bdev_ut_if;
303 	bdev->blockcnt = 1024;
304 	bdev->blocklen = 512;
305 
306 	rc = spdk_bdev_register(bdev);
307 	CU_ASSERT(rc == 0);
308 
309 	return bdev;
310 }
311 
312 static struct spdk_bdev *
313 allocate_vbdev(char *name, struct spdk_bdev *base1, struct spdk_bdev *base2)
314 {
315 	struct spdk_bdev *bdev;
316 	struct spdk_bdev *array[2];
317 	int rc;
318 
319 	bdev = calloc(1, sizeof(*bdev));
320 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
321 
322 	bdev->name = name;
323 	bdev->fn_table = &fn_table;
324 	bdev->module = &vbdev_ut_if;
325 
326 	/* vbdev must have at least one base bdev */
327 	CU_ASSERT(base1 != NULL);
328 
329 	array[0] = base1;
330 	array[1] = base2;
331 
332 	rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2);
333 	CU_ASSERT(rc == 0);
334 
335 	return bdev;
336 }
337 
338 static void
339 free_bdev(struct spdk_bdev *bdev)
340 {
341 	spdk_bdev_unregister(bdev, NULL, NULL);
342 	memset(bdev, 0xFF, sizeof(*bdev));
343 	free(bdev);
344 }
345 
346 static void
347 free_vbdev(struct spdk_bdev *bdev)
348 {
349 	spdk_bdev_unregister(bdev, NULL, NULL);
350 	memset(bdev, 0xFF, sizeof(*bdev));
351 	free(bdev);
352 }
353 
354 static void
355 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
356 {
357 	const char *bdev_name;
358 
359 	CU_ASSERT(bdev != NULL);
360 	CU_ASSERT(rc == 0);
361 	bdev_name = spdk_bdev_get_name(bdev);
362 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
363 
364 	free(stat);
365 	free_bdev(bdev);
366 }
367 
368 static void
369 get_device_stat_test(void)
370 {
371 	struct spdk_bdev *bdev;
372 	struct spdk_bdev_io_stat *stat;
373 
374 	bdev = allocate_bdev("bdev0");
375 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
376 	if (stat == NULL) {
377 		free_bdev(bdev);
378 		return;
379 	}
380 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, NULL);
381 }
382 
383 static void
384 open_write_test(void)
385 {
386 	struct spdk_bdev *bdev[9];
387 	struct spdk_bdev_desc *desc[9] = {};
388 	int rc;
389 
390 	/*
391 	 * Create a tree of bdevs to test various open w/ write cases.
392 	 *
393 	 * bdev0 through bdev3 are physical block devices, such as NVMe
394 	 * namespaces or Ceph block devices.
395 	 *
396 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
397 	 * caching or RAID use cases.
398 	 *
399 	 * bdev5 through bdev7 are all virtual bdevs with the same base
400 	 * bdev (except bdev7). This models partitioning or logical volume
401 	 * use cases.
402 	 *
403 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
404 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
405 	 * models caching, RAID, partitioning or logical volumes use cases.
406 	 *
407 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
408 	 * base bdevs are themselves virtual bdevs.
409 	 *
410 	 *                bdev8
411 	 *                  |
412 	 *            +----------+
413 	 *            |          |
414 	 *          bdev4      bdev5   bdev6   bdev7
415 	 *            |          |       |       |
416 	 *        +---+---+      +---+   +   +---+---+
417 	 *        |       |           \  |  /         \
418 	 *      bdev0   bdev1          bdev2         bdev3
419 	 */
420 
421 	bdev[0] = allocate_bdev("bdev0");
422 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
423 	CU_ASSERT(rc == 0);
424 
425 	bdev[1] = allocate_bdev("bdev1");
426 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
427 	CU_ASSERT(rc == 0);
428 
429 	bdev[2] = allocate_bdev("bdev2");
430 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
431 	CU_ASSERT(rc == 0);
432 
433 	bdev[3] = allocate_bdev("bdev3");
434 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
435 	CU_ASSERT(rc == 0);
436 
437 	bdev[4] = allocate_vbdev("bdev4", bdev[0], bdev[1]);
438 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
439 	CU_ASSERT(rc == 0);
440 
441 	bdev[5] = allocate_vbdev("bdev5", bdev[2], NULL);
442 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
443 	CU_ASSERT(rc == 0);
444 
445 	bdev[6] = allocate_vbdev("bdev6", bdev[2], NULL);
446 
447 	bdev[7] = allocate_vbdev("bdev7", bdev[2], bdev[3]);
448 
449 	bdev[8] = allocate_vbdev("bdev8", bdev[4], bdev[5]);
450 
451 	/* Open bdev0 read-only.  This should succeed. */
452 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
453 	CU_ASSERT(rc == 0);
454 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
455 	spdk_bdev_close(desc[0]);
456 
457 	/*
458 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
459 	 * by a vbdev module.
460 	 */
461 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
462 	CU_ASSERT(rc == -EPERM);
463 
464 	/*
465 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
466 	 * by a vbdev module.
467 	 */
468 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
469 	CU_ASSERT(rc == -EPERM);
470 
471 	/* Open bdev4 read-only.  This should succeed. */
472 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
473 	CU_ASSERT(rc == 0);
474 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
475 	spdk_bdev_close(desc[4]);
476 
477 	/*
478 	 * Open bdev8 read/write.  This should succeed since it is a leaf
479 	 * bdev.
480 	 */
481 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
482 	CU_ASSERT(rc == 0);
483 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
484 	spdk_bdev_close(desc[8]);
485 
486 	/*
487 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
488 	 * by a vbdev module.
489 	 */
490 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
491 	CU_ASSERT(rc == -EPERM);
492 
493 	/* Open bdev4 read-only.  This should succeed. */
494 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
495 	CU_ASSERT(rc == 0);
496 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
497 	spdk_bdev_close(desc[5]);
498 
499 	free_vbdev(bdev[8]);
500 
501 	free_vbdev(bdev[5]);
502 	free_vbdev(bdev[6]);
503 	free_vbdev(bdev[7]);
504 
505 	free_vbdev(bdev[4]);
506 
507 	free_bdev(bdev[0]);
508 	free_bdev(bdev[1]);
509 	free_bdev(bdev[2]);
510 	free_bdev(bdev[3]);
511 }
512 
513 static void
514 bytes_to_blocks_test(void)
515 {
516 	struct spdk_bdev bdev;
517 	uint64_t offset_blocks, num_blocks;
518 
519 	memset(&bdev, 0, sizeof(bdev));
520 
521 	bdev.blocklen = 512;
522 
523 	/* All parameters valid */
524 	offset_blocks = 0;
525 	num_blocks = 0;
526 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
527 	CU_ASSERT(offset_blocks == 1);
528 	CU_ASSERT(num_blocks == 2);
529 
530 	/* Offset not a block multiple */
531 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
532 
533 	/* Length not a block multiple */
534 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
535 }
536 
537 static void
538 num_blocks_test(void)
539 {
540 	struct spdk_bdev bdev;
541 	struct spdk_bdev_desc *desc = NULL;
542 	int rc;
543 
544 	memset(&bdev, 0, sizeof(bdev));
545 	bdev.name = "num_blocks";
546 	bdev.fn_table = &fn_table;
547 	bdev.module = &bdev_ut_if;
548 	spdk_bdev_register(&bdev);
549 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
550 
551 	/* Growing block number */
552 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
553 	/* Shrinking block number */
554 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
555 
556 	/* In case bdev opened */
557 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
558 	CU_ASSERT(rc == 0);
559 	SPDK_CU_ASSERT_FATAL(desc != NULL);
560 
561 	/* Growing block number */
562 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
563 	/* Shrinking block number */
564 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
565 
566 	spdk_bdev_close(desc);
567 	spdk_bdev_unregister(&bdev, NULL, NULL);
568 }
569 
570 static void
571 io_valid_test(void)
572 {
573 	struct spdk_bdev bdev;
574 
575 	memset(&bdev, 0, sizeof(bdev));
576 
577 	bdev.blocklen = 512;
578 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
579 
580 	/* All parameters valid */
581 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
582 
583 	/* Last valid block */
584 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
585 
586 	/* Offset past end of bdev */
587 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
588 
589 	/* Offset + length past end of bdev */
590 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
591 
592 	/* Offset near end of uint64_t range (2^64 - 1) */
593 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
594 }
595 
596 static void
597 alias_add_del_test(void)
598 {
599 	struct spdk_bdev *bdev[3];
600 	int rc;
601 
602 	/* Creating and registering bdevs */
603 	bdev[0] = allocate_bdev("bdev0");
604 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
605 
606 	bdev[1] = allocate_bdev("bdev1");
607 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
608 
609 	bdev[2] = allocate_bdev("bdev2");
610 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
611 
612 	/*
613 	 * Trying adding an alias identical to name.
614 	 * Alias is identical to name, so it can not be added to aliases list
615 	 */
616 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
617 	CU_ASSERT(rc == -EEXIST);
618 
619 	/*
620 	 * Trying to add empty alias,
621 	 * this one should fail
622 	 */
623 	rc = spdk_bdev_alias_add(bdev[0], NULL);
624 	CU_ASSERT(rc == -EINVAL);
625 
626 	/* Trying adding same alias to two different registered bdevs */
627 
628 	/* Alias is used first time, so this one should pass */
629 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
630 	CU_ASSERT(rc == 0);
631 
632 	/* Alias was added to another bdev, so this one should fail */
633 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
634 	CU_ASSERT(rc == -EEXIST);
635 
636 	/* Alias is used first time, so this one should pass */
637 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
638 	CU_ASSERT(rc == 0);
639 
640 	/* Trying removing an alias from registered bdevs */
641 
642 	/* Alias is not on a bdev aliases list, so this one should fail */
643 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
644 	CU_ASSERT(rc == -ENOENT);
645 
646 	/* Alias is present on a bdev aliases list, so this one should pass */
647 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
648 	CU_ASSERT(rc == 0);
649 
650 	/* Alias is present on a bdev aliases list, so this one should pass */
651 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
652 	CU_ASSERT(rc == 0);
653 
654 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
655 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
656 	CU_ASSERT(rc != 0);
657 
658 	/* Trying to del all alias from empty alias list */
659 	spdk_bdev_alias_del_all(bdev[2]);
660 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
661 
662 	/* Trying to del all alias from non-empty alias list */
663 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
664 	CU_ASSERT(rc == 0);
665 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
666 	CU_ASSERT(rc == 0);
667 	spdk_bdev_alias_del_all(bdev[2]);
668 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
669 
670 	/* Unregister and free bdevs */
671 	spdk_bdev_unregister(bdev[0], NULL, NULL);
672 	spdk_bdev_unregister(bdev[1], NULL, NULL);
673 	spdk_bdev_unregister(bdev[2], NULL, NULL);
674 
675 	free(bdev[0]);
676 	free(bdev[1]);
677 	free(bdev[2]);
678 }
679 
680 static void
681 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
682 {
683 	g_io_done = true;
684 	g_io_status = bdev_io->internal.status;
685 	spdk_bdev_free_io(bdev_io);
686 }
687 
688 static void
689 bdev_init_cb(void *arg, int rc)
690 {
691 	CU_ASSERT(rc == 0);
692 }
693 
694 static void
695 bdev_fini_cb(void *arg)
696 {
697 }
698 
699 struct bdev_ut_io_wait_entry {
700 	struct spdk_bdev_io_wait_entry	entry;
701 	struct spdk_io_channel		*io_ch;
702 	struct spdk_bdev_desc		*desc;
703 	bool				submitted;
704 };
705 
706 static void
707 io_wait_cb(void *arg)
708 {
709 	struct bdev_ut_io_wait_entry *entry = arg;
710 	int rc;
711 
712 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
713 	CU_ASSERT(rc == 0);
714 	entry->submitted = true;
715 }
716 
717 static void
718 bdev_io_wait_test(void)
719 {
720 	struct spdk_bdev *bdev;
721 	struct spdk_bdev_desc *desc = NULL;
722 	struct spdk_io_channel *io_ch;
723 	struct spdk_bdev_opts bdev_opts = {
724 		.bdev_io_pool_size = 4,
725 		.bdev_io_cache_size = 2,
726 	};
727 	struct bdev_ut_io_wait_entry io_wait_entry;
728 	struct bdev_ut_io_wait_entry io_wait_entry2;
729 	int rc;
730 
731 	rc = spdk_bdev_set_opts(&bdev_opts);
732 	CU_ASSERT(rc == 0);
733 	spdk_bdev_initialize(bdev_init_cb, NULL);
734 
735 	bdev = allocate_bdev("bdev0");
736 
737 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
738 	CU_ASSERT(rc == 0);
739 	SPDK_CU_ASSERT_FATAL(desc != NULL);
740 	io_ch = spdk_bdev_get_io_channel(desc);
741 	CU_ASSERT(io_ch != NULL);
742 
743 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
744 	CU_ASSERT(rc == 0);
745 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
746 	CU_ASSERT(rc == 0);
747 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
748 	CU_ASSERT(rc == 0);
749 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
750 	CU_ASSERT(rc == 0);
751 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
752 
753 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
754 	CU_ASSERT(rc == -ENOMEM);
755 
756 	io_wait_entry.entry.bdev = bdev;
757 	io_wait_entry.entry.cb_fn = io_wait_cb;
758 	io_wait_entry.entry.cb_arg = &io_wait_entry;
759 	io_wait_entry.io_ch = io_ch;
760 	io_wait_entry.desc = desc;
761 	io_wait_entry.submitted = false;
762 	/* Cannot use the same io_wait_entry for two different calls. */
763 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
764 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
765 
766 	/* Queue two I/O waits. */
767 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
768 	CU_ASSERT(rc == 0);
769 	CU_ASSERT(io_wait_entry.submitted == false);
770 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
771 	CU_ASSERT(rc == 0);
772 	CU_ASSERT(io_wait_entry2.submitted == false);
773 
774 	stub_complete_io(1);
775 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
776 	CU_ASSERT(io_wait_entry.submitted == true);
777 	CU_ASSERT(io_wait_entry2.submitted == false);
778 
779 	stub_complete_io(1);
780 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
781 	CU_ASSERT(io_wait_entry2.submitted == true);
782 
783 	stub_complete_io(4);
784 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
785 
786 	spdk_put_io_channel(io_ch);
787 	spdk_bdev_close(desc);
788 	free_bdev(bdev);
789 	spdk_bdev_finish(bdev_fini_cb, NULL);
790 }
791 
792 static void
793 bdev_io_spans_boundary_test(void)
794 {
795 	struct spdk_bdev bdev;
796 	struct spdk_bdev_io bdev_io;
797 
798 	memset(&bdev, 0, sizeof(bdev));
799 
800 	bdev.optimal_io_boundary = 0;
801 	bdev_io.bdev = &bdev;
802 
803 	/* bdev has no optimal_io_boundary set - so this should return false. */
804 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
805 
806 	bdev.optimal_io_boundary = 32;
807 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
808 
809 	/* RESETs are not based on LBAs - so this should return false. */
810 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
811 
812 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
813 	bdev_io.u.bdev.offset_blocks = 0;
814 	bdev_io.u.bdev.num_blocks = 32;
815 
816 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
817 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
818 
819 	bdev_io.u.bdev.num_blocks = 33;
820 
821 	/* This I/O spans a boundary. */
822 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
823 }
824 
825 static void
826 bdev_io_split(void)
827 {
828 	struct spdk_bdev *bdev;
829 	struct spdk_bdev_desc *desc = NULL;
830 	struct spdk_io_channel *io_ch;
831 	struct spdk_bdev_opts bdev_opts = {
832 		.bdev_io_pool_size = 512,
833 		.bdev_io_cache_size = 64,
834 	};
835 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
836 	struct ut_expected_io *expected_io;
837 	uint64_t i;
838 	int rc;
839 
840 	rc = spdk_bdev_set_opts(&bdev_opts);
841 	CU_ASSERT(rc == 0);
842 	spdk_bdev_initialize(bdev_init_cb, NULL);
843 
844 	bdev = allocate_bdev("bdev0");
845 
846 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
847 	CU_ASSERT(rc == 0);
848 	SPDK_CU_ASSERT_FATAL(desc != NULL);
849 	io_ch = spdk_bdev_get_io_channel(desc);
850 	CU_ASSERT(io_ch != NULL);
851 
852 	bdev->optimal_io_boundary = 16;
853 	bdev->split_on_optimal_io_boundary = false;
854 
855 	g_io_done = false;
856 
857 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
858 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
859 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
860 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
861 
862 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
863 	CU_ASSERT(rc == 0);
864 	CU_ASSERT(g_io_done == false);
865 
866 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
867 	stub_complete_io(1);
868 	CU_ASSERT(g_io_done == true);
869 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
870 
871 	bdev->split_on_optimal_io_boundary = true;
872 
873 	/* Now test that a single-vector command is split correctly.
874 	 * Offset 14, length 8, payload 0xF000
875 	 *  Child - Offset 14, length 2, payload 0xF000
876 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
877 	 *
878 	 * Set up the expected values before calling spdk_bdev_read_blocks
879 	 */
880 	g_io_done = false;
881 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
882 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
883 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
884 
885 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
886 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
887 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
888 
889 	/* spdk_bdev_read_blocks will submit the first child immediately. */
890 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
891 	CU_ASSERT(rc == 0);
892 	CU_ASSERT(g_io_done == false);
893 
894 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
895 	stub_complete_io(2);
896 	CU_ASSERT(g_io_done == true);
897 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
898 
899 	/* Now set up a more complex, multi-vector command that needs to be split,
900 	 *  including splitting iovecs.
901 	 */
902 	iov[0].iov_base = (void *)0x10000;
903 	iov[0].iov_len = 512;
904 	iov[1].iov_base = (void *)0x20000;
905 	iov[1].iov_len = 20 * 512;
906 	iov[2].iov_base = (void *)0x30000;
907 	iov[2].iov_len = 11 * 512;
908 
909 	g_io_done = false;
910 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
911 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
912 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
913 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
914 
915 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
916 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
917 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
918 
919 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
920 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
921 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
922 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
923 
924 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
925 	CU_ASSERT(rc == 0);
926 	CU_ASSERT(g_io_done == false);
927 
928 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
929 	stub_complete_io(3);
930 	CU_ASSERT(g_io_done == true);
931 
932 	/* Test multi vector command that needs to be split by strip and then needs to be
933 	 * split further due to the capacity of child iovs.
934 	 */
935 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
936 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
937 		iov[i].iov_len = 512;
938 	}
939 
940 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
941 	g_io_done = false;
942 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
943 					   BDEV_IO_NUM_CHILD_IOV);
944 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
945 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
946 	}
947 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
948 
949 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
950 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
951 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
952 		ut_expected_io_set_iov(expected_io, i,
953 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
954 	}
955 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
956 
957 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
958 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
959 	CU_ASSERT(rc == 0);
960 	CU_ASSERT(g_io_done == false);
961 
962 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
963 	stub_complete_io(1);
964 	CU_ASSERT(g_io_done == false);
965 
966 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
967 	stub_complete_io(1);
968 	CU_ASSERT(g_io_done == true);
969 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
970 
971 	/* Test multi vector command that needs to be split by strip and then needs to be
972 	 * split further due to the capacity of child iovs, but fails to split. The cause
973 	 * of failure of split is that the length of an iovec is not multiple of block size.
974 	 */
975 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
976 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
977 		iov[i].iov_len = 512;
978 	}
979 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
980 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
981 
982 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
983 	g_io_done = false;
984 	g_io_status = 0;
985 
986 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
987 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
988 	CU_ASSERT(rc == 0);
989 	CU_ASSERT(g_io_done == true);
990 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
991 
992 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
993 	 * split, so test that.
994 	 */
995 	bdev->optimal_io_boundary = 15;
996 	g_io_done = false;
997 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
998 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
999 
1000 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1001 	CU_ASSERT(rc == 0);
1002 	CU_ASSERT(g_io_done == false);
1003 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1004 	stub_complete_io(1);
1005 	CU_ASSERT(g_io_done == true);
1006 
1007 	/* Test an UNMAP.  This should also not be split. */
1008 	bdev->optimal_io_boundary = 16;
1009 	g_io_done = false;
1010 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1011 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1012 
1013 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1014 	CU_ASSERT(rc == 0);
1015 	CU_ASSERT(g_io_done == false);
1016 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1017 	stub_complete_io(1);
1018 	CU_ASSERT(g_io_done == true);
1019 
1020 	/* Test a FLUSH.  This should also not be split. */
1021 	bdev->optimal_io_boundary = 16;
1022 	g_io_done = false;
1023 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1024 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1025 
1026 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1027 	CU_ASSERT(rc == 0);
1028 	CU_ASSERT(g_io_done == false);
1029 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1030 	stub_complete_io(1);
1031 	CU_ASSERT(g_io_done == true);
1032 
1033 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1034 
1035 	spdk_put_io_channel(io_ch);
1036 	spdk_bdev_close(desc);
1037 	free_bdev(bdev);
1038 	spdk_bdev_finish(bdev_fini_cb, NULL);
1039 }
1040 
1041 static void
1042 bdev_io_split_with_io_wait(void)
1043 {
1044 	struct spdk_bdev *bdev;
1045 	struct spdk_bdev_desc *desc;
1046 	struct spdk_io_channel *io_ch;
1047 	struct spdk_bdev_channel *channel;
1048 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1049 	struct spdk_bdev_opts bdev_opts = {
1050 		.bdev_io_pool_size = 2,
1051 		.bdev_io_cache_size = 1,
1052 	};
1053 	struct iovec iov[3];
1054 	struct ut_expected_io *expected_io;
1055 	int rc;
1056 
1057 	rc = spdk_bdev_set_opts(&bdev_opts);
1058 	CU_ASSERT(rc == 0);
1059 	spdk_bdev_initialize(bdev_init_cb, NULL);
1060 
1061 	bdev = allocate_bdev("bdev0");
1062 
1063 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1064 	CU_ASSERT(rc == 0);
1065 	CU_ASSERT(desc != NULL);
1066 	io_ch = spdk_bdev_get_io_channel(desc);
1067 	CU_ASSERT(io_ch != NULL);
1068 	channel = spdk_io_channel_get_ctx(io_ch);
1069 	mgmt_ch = channel->shared_resource->mgmt_ch;
1070 
1071 	bdev->optimal_io_boundary = 16;
1072 	bdev->split_on_optimal_io_boundary = true;
1073 
1074 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1075 	CU_ASSERT(rc == 0);
1076 
1077 	/* Now test that a single-vector command is split correctly.
1078 	 * Offset 14, length 8, payload 0xF000
1079 	 *  Child - Offset 14, length 2, payload 0xF000
1080 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1081 	 *
1082 	 * Set up the expected values before calling spdk_bdev_read_blocks
1083 	 */
1084 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1085 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1086 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1087 
1088 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1089 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1090 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1091 
1092 	/* The following children will be submitted sequentially due to the capacity of
1093 	 * spdk_bdev_io.
1094 	 */
1095 
1096 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1097 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1098 	CU_ASSERT(rc == 0);
1099 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1100 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1101 
1102 	/* Completing the first read I/O will submit the first child */
1103 	stub_complete_io(1);
1104 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1105 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1106 
1107 	/* Completing the first child will submit the second child */
1108 	stub_complete_io(1);
1109 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1110 
1111 	/* Complete the second child I/O.  This should result in our callback getting
1112 	 * invoked since the parent I/O is now complete.
1113 	 */
1114 	stub_complete_io(1);
1115 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1116 
1117 	/* Now set up a more complex, multi-vector command that needs to be split,
1118 	 *  including splitting iovecs.
1119 	 */
1120 	iov[0].iov_base = (void *)0x10000;
1121 	iov[0].iov_len = 512;
1122 	iov[1].iov_base = (void *)0x20000;
1123 	iov[1].iov_len = 20 * 512;
1124 	iov[2].iov_base = (void *)0x30000;
1125 	iov[2].iov_len = 11 * 512;
1126 
1127 	g_io_done = false;
1128 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1129 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1130 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1131 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1132 
1133 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1134 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1135 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1136 
1137 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1138 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1139 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1140 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1141 
1142 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1143 	CU_ASSERT(rc == 0);
1144 	CU_ASSERT(g_io_done == false);
1145 
1146 	/* The following children will be submitted sequentially due to the capacity of
1147 	 * spdk_bdev_io.
1148 	 */
1149 
1150 	/* Completing the first child will submit the second child */
1151 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1152 	stub_complete_io(1);
1153 	CU_ASSERT(g_io_done == false);
1154 
1155 	/* Completing the second child will submit the third child */
1156 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1157 	stub_complete_io(1);
1158 	CU_ASSERT(g_io_done == false);
1159 
1160 	/* Completing the third child will result in our callback getting invoked
1161 	 * since the parent I/O is now complete.
1162 	 */
1163 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1164 	stub_complete_io(1);
1165 	CU_ASSERT(g_io_done == true);
1166 
1167 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1168 
1169 	spdk_put_io_channel(io_ch);
1170 	spdk_bdev_close(desc);
1171 	free_bdev(bdev);
1172 	spdk_bdev_finish(bdev_fini_cb, NULL);
1173 }
1174 
1175 int
1176 main(int argc, char **argv)
1177 {
1178 	CU_pSuite	suite = NULL;
1179 	unsigned int	num_failures;
1180 
1181 	if (CU_initialize_registry() != CUE_SUCCESS) {
1182 		return CU_get_error();
1183 	}
1184 
1185 	suite = CU_add_suite("bdev", null_init, null_clean);
1186 	if (suite == NULL) {
1187 		CU_cleanup_registry();
1188 		return CU_get_error();
1189 	}
1190 
1191 	if (
1192 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1193 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1194 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1195 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1196 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1197 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1198 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1199 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1200 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1201 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL
1202 	) {
1203 		CU_cleanup_registry();
1204 		return CU_get_error();
1205 	}
1206 
1207 	spdk_allocate_thread(_bdev_send_msg, NULL, NULL, NULL, "thread0");
1208 	CU_basic_set_mode(CU_BRM_VERBOSE);
1209 	CU_basic_run_tests();
1210 	num_failures = CU_get_number_of_failures();
1211 	CU_cleanup_registry();
1212 	spdk_free_thread();
1213 	return num_failures;
1214 }
1215