xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision 8a2527836d387a4c7dcb576cbb33ad605ee28175)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_type, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
63 
64 
65 int g_status;
66 int g_count;
67 enum spdk_bdev_event_type g_event_type1;
68 enum spdk_bdev_event_type g_event_type2;
69 struct spdk_histogram_data *g_histogram;
70 
71 void
72 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
73 			 int *sc, int *sk, int *asc, int *ascq)
74 {
75 }
76 
77 static int
78 null_init(void)
79 {
80 	return 0;
81 }
82 
83 static int
84 null_clean(void)
85 {
86 	return 0;
87 }
88 
89 static int
90 stub_destruct(void *ctx)
91 {
92 	return 0;
93 }
94 
95 struct ut_expected_io {
96 	uint8_t				type;
97 	uint64_t			offset;
98 	uint64_t			length;
99 	int				iovcnt;
100 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
101 	void				*md_buf;
102 	TAILQ_ENTRY(ut_expected_io)	link;
103 };
104 
105 struct bdev_ut_channel {
106 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
107 	uint32_t			outstanding_io_count;
108 	TAILQ_HEAD(, ut_expected_io)	expected_io;
109 };
110 
111 static bool g_io_done;
112 static struct spdk_bdev_io *g_bdev_io;
113 static enum spdk_bdev_io_status g_io_status;
114 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
115 static uint32_t g_bdev_ut_io_device;
116 static struct bdev_ut_channel *g_bdev_ut_channel;
117 
118 static struct ut_expected_io *
119 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
120 {
121 	struct ut_expected_io *expected_io;
122 
123 	expected_io = calloc(1, sizeof(*expected_io));
124 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
125 
126 	expected_io->type = type;
127 	expected_io->offset = offset;
128 	expected_io->length = length;
129 	expected_io->iovcnt = iovcnt;
130 
131 	return expected_io;
132 }
133 
134 static void
135 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
136 {
137 	expected_io->iov[pos].iov_base = base;
138 	expected_io->iov[pos].iov_len = len;
139 }
140 
141 static void
142 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
143 {
144 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
145 	struct ut_expected_io *expected_io;
146 	struct iovec *iov, *expected_iov;
147 	int i;
148 
149 	g_bdev_io = bdev_io;
150 
151 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
152 	ch->outstanding_io_count++;
153 
154 	expected_io = TAILQ_FIRST(&ch->expected_io);
155 	if (expected_io == NULL) {
156 		return;
157 	}
158 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
159 
160 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
161 		CU_ASSERT(bdev_io->type == expected_io->type);
162 	}
163 
164 	if (expected_io->md_buf != NULL) {
165 		CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
166 	}
167 
168 	if (expected_io->length == 0) {
169 		free(expected_io);
170 		return;
171 	}
172 
173 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
174 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
175 
176 	if (expected_io->iovcnt == 0) {
177 		free(expected_io);
178 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
179 		return;
180 	}
181 
182 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
183 	for (i = 0; i < expected_io->iovcnt; i++) {
184 		iov = &bdev_io->u.bdev.iovs[i];
185 		expected_iov = &expected_io->iov[i];
186 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
187 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
188 	}
189 
190 	free(expected_io);
191 }
192 
193 static void
194 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
195 				      struct spdk_bdev_io *bdev_io, bool success)
196 {
197 	CU_ASSERT(success == true);
198 
199 	stub_submit_request(_ch, bdev_io);
200 }
201 
202 static void
203 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
204 {
205 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
206 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
207 }
208 
209 static uint32_t
210 stub_complete_io(uint32_t num_to_complete)
211 {
212 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
213 	struct spdk_bdev_io *bdev_io;
214 	static enum spdk_bdev_io_status io_status;
215 	uint32_t num_completed = 0;
216 
217 	while (num_completed < num_to_complete) {
218 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
219 			break;
220 		}
221 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
222 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
223 		ch->outstanding_io_count--;
224 		io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
225 			    g_io_exp_status;
226 		spdk_bdev_io_complete(bdev_io, io_status);
227 		num_completed++;
228 	}
229 
230 	return num_completed;
231 }
232 
233 static struct spdk_io_channel *
234 bdev_ut_get_io_channel(void *ctx)
235 {
236 	return spdk_get_io_channel(&g_bdev_ut_io_device);
237 }
238 
239 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
240 	[SPDK_BDEV_IO_TYPE_READ]		= true,
241 	[SPDK_BDEV_IO_TYPE_WRITE]		= true,
242 	[SPDK_BDEV_IO_TYPE_UNMAP]		= true,
243 	[SPDK_BDEV_IO_TYPE_FLUSH]		= true,
244 	[SPDK_BDEV_IO_TYPE_RESET]		= true,
245 	[SPDK_BDEV_IO_TYPE_NVME_ADMIN]		= true,
246 	[SPDK_BDEV_IO_TYPE_NVME_IO]		= true,
247 	[SPDK_BDEV_IO_TYPE_NVME_IO_MD]		= true,
248 	[SPDK_BDEV_IO_TYPE_WRITE_ZEROES]	= true,
249 	[SPDK_BDEV_IO_TYPE_ZCOPY]		= true,
250 };
251 
252 static void
253 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
254 {
255 	g_io_types_supported[io_type] = enable;
256 }
257 
258 static bool
259 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
260 {
261 	return g_io_types_supported[io_type];
262 }
263 
264 static struct spdk_bdev_fn_table fn_table = {
265 	.destruct = stub_destruct,
266 	.submit_request = stub_submit_request,
267 	.get_io_channel = bdev_ut_get_io_channel,
268 	.io_type_supported = stub_io_type_supported,
269 };
270 
271 static int
272 bdev_ut_create_ch(void *io_device, void *ctx_buf)
273 {
274 	struct bdev_ut_channel *ch = ctx_buf;
275 
276 	CU_ASSERT(g_bdev_ut_channel == NULL);
277 	g_bdev_ut_channel = ch;
278 
279 	TAILQ_INIT(&ch->outstanding_io);
280 	ch->outstanding_io_count = 0;
281 	TAILQ_INIT(&ch->expected_io);
282 	return 0;
283 }
284 
285 static void
286 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
287 {
288 	CU_ASSERT(g_bdev_ut_channel != NULL);
289 	g_bdev_ut_channel = NULL;
290 }
291 
292 struct spdk_bdev_module bdev_ut_if;
293 
294 static int
295 bdev_ut_module_init(void)
296 {
297 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
298 				sizeof(struct bdev_ut_channel), NULL);
299 	spdk_bdev_module_init_done(&bdev_ut_if);
300 	return 0;
301 }
302 
303 static void
304 bdev_ut_module_fini(void)
305 {
306 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
307 }
308 
309 struct spdk_bdev_module bdev_ut_if = {
310 	.name = "bdev_ut",
311 	.module_init = bdev_ut_module_init,
312 	.module_fini = bdev_ut_module_fini,
313 	.async_init = true,
314 };
315 
316 static void vbdev_ut_examine(struct spdk_bdev *bdev);
317 
318 static int
319 vbdev_ut_module_init(void)
320 {
321 	return 0;
322 }
323 
324 static void
325 vbdev_ut_module_fini(void)
326 {
327 }
328 
329 struct spdk_bdev_module vbdev_ut_if = {
330 	.name = "vbdev_ut",
331 	.module_init = vbdev_ut_module_init,
332 	.module_fini = vbdev_ut_module_fini,
333 	.examine_config = vbdev_ut_examine,
334 };
335 
336 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
337 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
338 
339 static void
340 vbdev_ut_examine(struct spdk_bdev *bdev)
341 {
342 	spdk_bdev_module_examine_done(&vbdev_ut_if);
343 }
344 
345 static struct spdk_bdev *
346 allocate_bdev(char *name)
347 {
348 	struct spdk_bdev *bdev;
349 	int rc;
350 
351 	bdev = calloc(1, sizeof(*bdev));
352 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
353 
354 	bdev->name = name;
355 	bdev->fn_table = &fn_table;
356 	bdev->module = &bdev_ut_if;
357 	bdev->blockcnt = 1024;
358 	bdev->blocklen = 512;
359 
360 	rc = spdk_bdev_register(bdev);
361 	CU_ASSERT(rc == 0);
362 
363 	return bdev;
364 }
365 
366 static struct spdk_bdev *
367 allocate_vbdev(char *name)
368 {
369 	struct spdk_bdev *bdev;
370 	int rc;
371 
372 	bdev = calloc(1, sizeof(*bdev));
373 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
374 
375 	bdev->name = name;
376 	bdev->fn_table = &fn_table;
377 	bdev->module = &vbdev_ut_if;
378 
379 	rc = spdk_bdev_register(bdev);
380 	CU_ASSERT(rc == 0);
381 
382 	return bdev;
383 }
384 
385 static void
386 free_bdev(struct spdk_bdev *bdev)
387 {
388 	spdk_bdev_unregister(bdev, NULL, NULL);
389 	poll_threads();
390 	memset(bdev, 0xFF, sizeof(*bdev));
391 	free(bdev);
392 }
393 
394 static void
395 free_vbdev(struct spdk_bdev *bdev)
396 {
397 	spdk_bdev_unregister(bdev, NULL, NULL);
398 	poll_threads();
399 	memset(bdev, 0xFF, sizeof(*bdev));
400 	free(bdev);
401 }
402 
403 static void
404 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
405 {
406 	const char *bdev_name;
407 
408 	CU_ASSERT(bdev != NULL);
409 	CU_ASSERT(rc == 0);
410 	bdev_name = spdk_bdev_get_name(bdev);
411 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
412 
413 	free(stat);
414 	free_bdev(bdev);
415 
416 	*(bool *)cb_arg = true;
417 }
418 
419 static void
420 get_device_stat_test(void)
421 {
422 	struct spdk_bdev *bdev;
423 	struct spdk_bdev_io_stat *stat;
424 	bool done;
425 
426 	bdev = allocate_bdev("bdev0");
427 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
428 	if (stat == NULL) {
429 		free_bdev(bdev);
430 		return;
431 	}
432 
433 	done = false;
434 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
435 	while (!done) { poll_threads(); }
436 
437 
438 }
439 
440 static void
441 open_write_test(void)
442 {
443 	struct spdk_bdev *bdev[9];
444 	struct spdk_bdev_desc *desc[9] = {};
445 	int rc;
446 
447 	/*
448 	 * Create a tree of bdevs to test various open w/ write cases.
449 	 *
450 	 * bdev0 through bdev3 are physical block devices, such as NVMe
451 	 * namespaces or Ceph block devices.
452 	 *
453 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
454 	 * caching or RAID use cases.
455 	 *
456 	 * bdev5 through bdev7 are all virtual bdevs with the same base
457 	 * bdev (except bdev7). This models partitioning or logical volume
458 	 * use cases.
459 	 *
460 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
461 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
462 	 * models caching, RAID, partitioning or logical volumes use cases.
463 	 *
464 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
465 	 * base bdevs are themselves virtual bdevs.
466 	 *
467 	 *                bdev8
468 	 *                  |
469 	 *            +----------+
470 	 *            |          |
471 	 *          bdev4      bdev5   bdev6   bdev7
472 	 *            |          |       |       |
473 	 *        +---+---+      +---+   +   +---+---+
474 	 *        |       |           \  |  /         \
475 	 *      bdev0   bdev1          bdev2         bdev3
476 	 */
477 
478 	bdev[0] = allocate_bdev("bdev0");
479 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
480 	CU_ASSERT(rc == 0);
481 
482 	bdev[1] = allocate_bdev("bdev1");
483 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
484 	CU_ASSERT(rc == 0);
485 
486 	bdev[2] = allocate_bdev("bdev2");
487 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
488 	CU_ASSERT(rc == 0);
489 
490 	bdev[3] = allocate_bdev("bdev3");
491 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
492 	CU_ASSERT(rc == 0);
493 
494 	bdev[4] = allocate_vbdev("bdev4");
495 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
496 	CU_ASSERT(rc == 0);
497 
498 	bdev[5] = allocate_vbdev("bdev5");
499 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
500 	CU_ASSERT(rc == 0);
501 
502 	bdev[6] = allocate_vbdev("bdev6");
503 
504 	bdev[7] = allocate_vbdev("bdev7");
505 
506 	bdev[8] = allocate_vbdev("bdev8");
507 
508 	/* Open bdev0 read-only.  This should succeed. */
509 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
510 	CU_ASSERT(rc == 0);
511 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
512 	spdk_bdev_close(desc[0]);
513 
514 	/*
515 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
516 	 * by a vbdev module.
517 	 */
518 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
519 	CU_ASSERT(rc == -EPERM);
520 
521 	/*
522 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
523 	 * by a vbdev module.
524 	 */
525 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
526 	CU_ASSERT(rc == -EPERM);
527 
528 	/* Open bdev4 read-only.  This should succeed. */
529 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
530 	CU_ASSERT(rc == 0);
531 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
532 	spdk_bdev_close(desc[4]);
533 
534 	/*
535 	 * Open bdev8 read/write.  This should succeed since it is a leaf
536 	 * bdev.
537 	 */
538 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
539 	CU_ASSERT(rc == 0);
540 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
541 	spdk_bdev_close(desc[8]);
542 
543 	/*
544 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
545 	 * by a vbdev module.
546 	 */
547 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
548 	CU_ASSERT(rc == -EPERM);
549 
550 	/* Open bdev4 read-only.  This should succeed. */
551 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
552 	CU_ASSERT(rc == 0);
553 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
554 	spdk_bdev_close(desc[5]);
555 
556 	free_vbdev(bdev[8]);
557 
558 	free_vbdev(bdev[5]);
559 	free_vbdev(bdev[6]);
560 	free_vbdev(bdev[7]);
561 
562 	free_vbdev(bdev[4]);
563 
564 	free_bdev(bdev[0]);
565 	free_bdev(bdev[1]);
566 	free_bdev(bdev[2]);
567 	free_bdev(bdev[3]);
568 }
569 
570 static void
571 bytes_to_blocks_test(void)
572 {
573 	struct spdk_bdev bdev;
574 	uint64_t offset_blocks, num_blocks;
575 
576 	memset(&bdev, 0, sizeof(bdev));
577 
578 	bdev.blocklen = 512;
579 
580 	/* All parameters valid */
581 	offset_blocks = 0;
582 	num_blocks = 0;
583 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
584 	CU_ASSERT(offset_blocks == 1);
585 	CU_ASSERT(num_blocks == 2);
586 
587 	/* Offset not a block multiple */
588 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
589 
590 	/* Length not a block multiple */
591 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
592 
593 	/* In case blocklen not the power of two */
594 	bdev.blocklen = 100;
595 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
596 	CU_ASSERT(offset_blocks == 1);
597 	CU_ASSERT(num_blocks == 2);
598 
599 	/* Offset not a block multiple */
600 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
601 
602 	/* Length not a block multiple */
603 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
604 }
605 
606 static void
607 num_blocks_test(void)
608 {
609 	struct spdk_bdev bdev;
610 	struct spdk_bdev_desc *desc = NULL;
611 	int rc;
612 
613 	memset(&bdev, 0, sizeof(bdev));
614 	bdev.name = "num_blocks";
615 	bdev.fn_table = &fn_table;
616 	bdev.module = &bdev_ut_if;
617 	spdk_bdev_register(&bdev);
618 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
619 
620 	/* Growing block number */
621 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
622 	/* Shrinking block number */
623 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
624 
625 	/* In case bdev opened */
626 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
627 	CU_ASSERT(rc == 0);
628 	SPDK_CU_ASSERT_FATAL(desc != NULL);
629 
630 	/* Growing block number */
631 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
632 	/* Shrinking block number */
633 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
634 
635 	spdk_bdev_close(desc);
636 	spdk_bdev_unregister(&bdev, NULL, NULL);
637 
638 	poll_threads();
639 }
640 
641 static void
642 io_valid_test(void)
643 {
644 	struct spdk_bdev bdev;
645 
646 	memset(&bdev, 0, sizeof(bdev));
647 
648 	bdev.blocklen = 512;
649 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
650 
651 	/* All parameters valid */
652 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
653 
654 	/* Last valid block */
655 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
656 
657 	/* Offset past end of bdev */
658 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
659 
660 	/* Offset + length past end of bdev */
661 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
662 
663 	/* Offset near end of uint64_t range (2^64 - 1) */
664 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
665 }
666 
667 static void
668 alias_add_del_test(void)
669 {
670 	struct spdk_bdev *bdev[3];
671 	int rc;
672 
673 	/* Creating and registering bdevs */
674 	bdev[0] = allocate_bdev("bdev0");
675 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
676 
677 	bdev[1] = allocate_bdev("bdev1");
678 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
679 
680 	bdev[2] = allocate_bdev("bdev2");
681 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
682 
683 	poll_threads();
684 
685 	/*
686 	 * Trying adding an alias identical to name.
687 	 * Alias is identical to name, so it can not be added to aliases list
688 	 */
689 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
690 	CU_ASSERT(rc == -EEXIST);
691 
692 	/*
693 	 * Trying to add empty alias,
694 	 * this one should fail
695 	 */
696 	rc = spdk_bdev_alias_add(bdev[0], NULL);
697 	CU_ASSERT(rc == -EINVAL);
698 
699 	/* Trying adding same alias to two different registered bdevs */
700 
701 	/* Alias is used first time, so this one should pass */
702 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
703 	CU_ASSERT(rc == 0);
704 
705 	/* Alias was added to another bdev, so this one should fail */
706 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
707 	CU_ASSERT(rc == -EEXIST);
708 
709 	/* Alias is used first time, so this one should pass */
710 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
711 	CU_ASSERT(rc == 0);
712 
713 	/* Trying removing an alias from registered bdevs */
714 
715 	/* Alias is not on a bdev aliases list, so this one should fail */
716 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
717 	CU_ASSERT(rc == -ENOENT);
718 
719 	/* Alias is present on a bdev aliases list, so this one should pass */
720 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
721 	CU_ASSERT(rc == 0);
722 
723 	/* Alias is present on a bdev aliases list, so this one should pass */
724 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
725 	CU_ASSERT(rc == 0);
726 
727 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
728 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
729 	CU_ASSERT(rc != 0);
730 
731 	/* Trying to del all alias from empty alias list */
732 	spdk_bdev_alias_del_all(bdev[2]);
733 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
734 
735 	/* Trying to del all alias from non-empty alias list */
736 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
737 	CU_ASSERT(rc == 0);
738 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
739 	CU_ASSERT(rc == 0);
740 	spdk_bdev_alias_del_all(bdev[2]);
741 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
742 
743 	/* Unregister and free bdevs */
744 	spdk_bdev_unregister(bdev[0], NULL, NULL);
745 	spdk_bdev_unregister(bdev[1], NULL, NULL);
746 	spdk_bdev_unregister(bdev[2], NULL, NULL);
747 
748 	poll_threads();
749 
750 	free(bdev[0]);
751 	free(bdev[1]);
752 	free(bdev[2]);
753 }
754 
755 static void
756 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
757 {
758 	g_io_done = true;
759 	g_io_status = bdev_io->internal.status;
760 	spdk_bdev_free_io(bdev_io);
761 }
762 
763 static void
764 bdev_init_cb(void *arg, int rc)
765 {
766 	CU_ASSERT(rc == 0);
767 }
768 
769 static void
770 bdev_fini_cb(void *arg)
771 {
772 }
773 
774 struct bdev_ut_io_wait_entry {
775 	struct spdk_bdev_io_wait_entry	entry;
776 	struct spdk_io_channel		*io_ch;
777 	struct spdk_bdev_desc		*desc;
778 	bool				submitted;
779 };
780 
781 static void
782 io_wait_cb(void *arg)
783 {
784 	struct bdev_ut_io_wait_entry *entry = arg;
785 	int rc;
786 
787 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
788 	CU_ASSERT(rc == 0);
789 	entry->submitted = true;
790 }
791 
792 static void
793 bdev_io_types_test(void)
794 {
795 	struct spdk_bdev *bdev;
796 	struct spdk_bdev_desc *desc = NULL;
797 	struct spdk_io_channel *io_ch;
798 	struct spdk_bdev_opts bdev_opts = {
799 		.bdev_io_pool_size = 4,
800 		.bdev_io_cache_size = 2,
801 	};
802 	int rc;
803 
804 	rc = spdk_bdev_set_opts(&bdev_opts);
805 	CU_ASSERT(rc == 0);
806 	spdk_bdev_initialize(bdev_init_cb, NULL);
807 	poll_threads();
808 
809 	bdev = allocate_bdev("bdev0");
810 
811 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
812 	CU_ASSERT(rc == 0);
813 	poll_threads();
814 	SPDK_CU_ASSERT_FATAL(desc != NULL);
815 	io_ch = spdk_bdev_get_io_channel(desc);
816 	CU_ASSERT(io_ch != NULL);
817 
818 	/* WRITE and WRITE ZEROES are not supported */
819 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
820 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
821 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
822 	CU_ASSERT(rc == -ENOTSUP);
823 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
824 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
825 
826 	spdk_put_io_channel(io_ch);
827 	spdk_bdev_close(desc);
828 	free_bdev(bdev);
829 	spdk_bdev_finish(bdev_fini_cb, NULL);
830 	poll_threads();
831 }
832 
833 static void
834 bdev_io_wait_test(void)
835 {
836 	struct spdk_bdev *bdev;
837 	struct spdk_bdev_desc *desc = NULL;
838 	struct spdk_io_channel *io_ch;
839 	struct spdk_bdev_opts bdev_opts = {
840 		.bdev_io_pool_size = 4,
841 		.bdev_io_cache_size = 2,
842 	};
843 	struct bdev_ut_io_wait_entry io_wait_entry;
844 	struct bdev_ut_io_wait_entry io_wait_entry2;
845 	int rc;
846 
847 	rc = spdk_bdev_set_opts(&bdev_opts);
848 	CU_ASSERT(rc == 0);
849 	spdk_bdev_initialize(bdev_init_cb, NULL);
850 	poll_threads();
851 
852 	bdev = allocate_bdev("bdev0");
853 
854 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
855 	CU_ASSERT(rc == 0);
856 	poll_threads();
857 	SPDK_CU_ASSERT_FATAL(desc != NULL);
858 	io_ch = spdk_bdev_get_io_channel(desc);
859 	CU_ASSERT(io_ch != NULL);
860 
861 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
862 	CU_ASSERT(rc == 0);
863 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
864 	CU_ASSERT(rc == 0);
865 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
866 	CU_ASSERT(rc == 0);
867 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
868 	CU_ASSERT(rc == 0);
869 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
870 
871 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
872 	CU_ASSERT(rc == -ENOMEM);
873 
874 	io_wait_entry.entry.bdev = bdev;
875 	io_wait_entry.entry.cb_fn = io_wait_cb;
876 	io_wait_entry.entry.cb_arg = &io_wait_entry;
877 	io_wait_entry.io_ch = io_ch;
878 	io_wait_entry.desc = desc;
879 	io_wait_entry.submitted = false;
880 	/* Cannot use the same io_wait_entry for two different calls. */
881 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
882 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
883 
884 	/* Queue two I/O waits. */
885 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
886 	CU_ASSERT(rc == 0);
887 	CU_ASSERT(io_wait_entry.submitted == false);
888 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
889 	CU_ASSERT(rc == 0);
890 	CU_ASSERT(io_wait_entry2.submitted == false);
891 
892 	stub_complete_io(1);
893 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
894 	CU_ASSERT(io_wait_entry.submitted == true);
895 	CU_ASSERT(io_wait_entry2.submitted == false);
896 
897 	stub_complete_io(1);
898 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
899 	CU_ASSERT(io_wait_entry2.submitted == true);
900 
901 	stub_complete_io(4);
902 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
903 
904 	spdk_put_io_channel(io_ch);
905 	spdk_bdev_close(desc);
906 	free_bdev(bdev);
907 	spdk_bdev_finish(bdev_fini_cb, NULL);
908 	poll_threads();
909 }
910 
911 static void
912 bdev_io_spans_boundary_test(void)
913 {
914 	struct spdk_bdev bdev;
915 	struct spdk_bdev_io bdev_io;
916 
917 	memset(&bdev, 0, sizeof(bdev));
918 
919 	bdev.optimal_io_boundary = 0;
920 	bdev_io.bdev = &bdev;
921 
922 	/* bdev has no optimal_io_boundary set - so this should return false. */
923 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
924 
925 	bdev.optimal_io_boundary = 32;
926 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
927 
928 	/* RESETs are not based on LBAs - so this should return false. */
929 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
930 
931 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
932 	bdev_io.u.bdev.offset_blocks = 0;
933 	bdev_io.u.bdev.num_blocks = 32;
934 
935 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
936 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
937 
938 	bdev_io.u.bdev.num_blocks = 33;
939 
940 	/* This I/O spans a boundary. */
941 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
942 }
943 
944 static void
945 bdev_io_split(void)
946 {
947 	struct spdk_bdev *bdev;
948 	struct spdk_bdev_desc *desc = NULL;
949 	struct spdk_io_channel *io_ch;
950 	struct spdk_bdev_opts bdev_opts = {
951 		.bdev_io_pool_size = 512,
952 		.bdev_io_cache_size = 64,
953 	};
954 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
955 	struct ut_expected_io *expected_io;
956 	uint64_t i;
957 	int rc;
958 
959 	rc = spdk_bdev_set_opts(&bdev_opts);
960 	CU_ASSERT(rc == 0);
961 	spdk_bdev_initialize(bdev_init_cb, NULL);
962 
963 	bdev = allocate_bdev("bdev0");
964 
965 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
966 	CU_ASSERT(rc == 0);
967 	SPDK_CU_ASSERT_FATAL(desc != NULL);
968 	io_ch = spdk_bdev_get_io_channel(desc);
969 	CU_ASSERT(io_ch != NULL);
970 
971 	bdev->optimal_io_boundary = 16;
972 	bdev->split_on_optimal_io_boundary = false;
973 
974 	g_io_done = false;
975 
976 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
977 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
978 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
979 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
980 
981 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
982 	CU_ASSERT(rc == 0);
983 	CU_ASSERT(g_io_done == false);
984 
985 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
986 	stub_complete_io(1);
987 	CU_ASSERT(g_io_done == true);
988 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
989 
990 	bdev->split_on_optimal_io_boundary = true;
991 
992 	/* Now test that a single-vector command is split correctly.
993 	 * Offset 14, length 8, payload 0xF000
994 	 *  Child - Offset 14, length 2, payload 0xF000
995 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
996 	 *
997 	 * Set up the expected values before calling spdk_bdev_read_blocks
998 	 */
999 	g_io_done = false;
1000 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1001 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1002 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1003 
1004 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1005 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1006 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1007 
1008 	/* spdk_bdev_read_blocks will submit the first child immediately. */
1009 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1010 	CU_ASSERT(rc == 0);
1011 	CU_ASSERT(g_io_done == false);
1012 
1013 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1014 	stub_complete_io(2);
1015 	CU_ASSERT(g_io_done == true);
1016 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1017 
1018 	/* Now set up a more complex, multi-vector command that needs to be split,
1019 	 *  including splitting iovecs.
1020 	 */
1021 	iov[0].iov_base = (void *)0x10000;
1022 	iov[0].iov_len = 512;
1023 	iov[1].iov_base = (void *)0x20000;
1024 	iov[1].iov_len = 20 * 512;
1025 	iov[2].iov_base = (void *)0x30000;
1026 	iov[2].iov_len = 11 * 512;
1027 
1028 	g_io_done = false;
1029 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1030 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1031 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1032 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1033 
1034 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1035 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1036 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1037 
1038 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1039 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1040 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1041 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1042 
1043 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1044 	CU_ASSERT(rc == 0);
1045 	CU_ASSERT(g_io_done == false);
1046 
1047 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1048 	stub_complete_io(3);
1049 	CU_ASSERT(g_io_done == true);
1050 
1051 	/* Test multi vector command that needs to be split by strip and then needs to be
1052 	 * split further due to the capacity of child iovs.
1053 	 */
1054 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
1055 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1056 		iov[i].iov_len = 512;
1057 	}
1058 
1059 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1060 	g_io_done = false;
1061 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
1062 					   BDEV_IO_NUM_CHILD_IOV);
1063 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1064 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
1065 	}
1066 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1067 
1068 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1069 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
1070 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1071 		ut_expected_io_set_iov(expected_io, i,
1072 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1073 	}
1074 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1075 
1076 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1077 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1078 	CU_ASSERT(rc == 0);
1079 	CU_ASSERT(g_io_done == false);
1080 
1081 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1082 	stub_complete_io(1);
1083 	CU_ASSERT(g_io_done == false);
1084 
1085 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1086 	stub_complete_io(1);
1087 	CU_ASSERT(g_io_done == true);
1088 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1089 
1090 	/* Test multi vector command that needs to be split by strip and then needs to be
1091 	 * split further due to the capacity of child iovs. In this case, the length of
1092 	 * the rest of iovec array with an I/O boundary is the multiple of block size.
1093 	 */
1094 
1095 	/* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1096 	 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1097 	 */
1098 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1099 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1100 		iov[i].iov_len = 512;
1101 	}
1102 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1103 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1104 		iov[i].iov_len = 256;
1105 	}
1106 	iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1107 	iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1108 
1109 	/* Add an extra iovec to trigger split */
1110 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1111 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1112 
1113 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1114 	g_io_done = false;
1115 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1116 					   BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV);
1117 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1118 		ut_expected_io_set_iov(expected_io, i,
1119 				       (void *)((i + 1) * 0x10000), 512);
1120 	}
1121 	for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1122 		ut_expected_io_set_iov(expected_io, i,
1123 				       (void *)((i + 1) * 0x10000), 256);
1124 	}
1125 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1126 
1127 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1128 					   1, 1);
1129 	ut_expected_io_set_iov(expected_io, 0,
1130 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1131 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1132 
1133 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1134 					   1, 1);
1135 	ut_expected_io_set_iov(expected_io, 0,
1136 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1137 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1138 
1139 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0,
1140 				    BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1141 	CU_ASSERT(rc == 0);
1142 	CU_ASSERT(g_io_done == false);
1143 
1144 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1145 	stub_complete_io(1);
1146 	CU_ASSERT(g_io_done == false);
1147 
1148 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1149 	stub_complete_io(2);
1150 	CU_ASSERT(g_io_done == true);
1151 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1152 
1153 	/* Test multi vector command that needs to be split by strip and then needs to be
1154 	 * split further due to the capacity of child iovs, the child request offset should
1155 	 * be rewind to last aligned offset and go success without error.
1156 	 */
1157 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1158 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1159 		iov[i].iov_len = 512;
1160 	}
1161 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1162 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1163 
1164 	iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1165 	iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1166 
1167 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1168 	iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1169 
1170 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1171 	g_io_done = false;
1172 	g_io_status = 0;
1173 	/* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */
1174 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1175 					   BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1);
1176 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1177 		ut_expected_io_set_iov(expected_io, i,
1178 				       (void *)((i + 1) * 0x10000), 512);
1179 	}
1180 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1181 	/* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */
1182 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1183 					   1, 2);
1184 	ut_expected_io_set_iov(expected_io, 0,
1185 			       (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
1186 	ut_expected_io_set_iov(expected_io, 1,
1187 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
1188 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1189 	/* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */
1190 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1191 					   1, 1);
1192 	ut_expected_io_set_iov(expected_io, 0,
1193 			       (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1194 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1195 
1196 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1197 				    BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1198 	CU_ASSERT(rc == 0);
1199 	CU_ASSERT(g_io_done == false);
1200 
1201 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1202 	stub_complete_io(1);
1203 	CU_ASSERT(g_io_done == false);
1204 
1205 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1206 	stub_complete_io(2);
1207 	CU_ASSERT(g_io_done == true);
1208 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1209 
1210 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1211 	 * split, so test that.
1212 	 */
1213 	bdev->optimal_io_boundary = 15;
1214 	g_io_done = false;
1215 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1216 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1217 
1218 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1219 	CU_ASSERT(rc == 0);
1220 	CU_ASSERT(g_io_done == false);
1221 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1222 	stub_complete_io(1);
1223 	CU_ASSERT(g_io_done == true);
1224 
1225 	/* Test an UNMAP.  This should also not be split. */
1226 	bdev->optimal_io_boundary = 16;
1227 	g_io_done = false;
1228 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1229 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1230 
1231 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1232 	CU_ASSERT(rc == 0);
1233 	CU_ASSERT(g_io_done == false);
1234 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1235 	stub_complete_io(1);
1236 	CU_ASSERT(g_io_done == true);
1237 
1238 	/* Test a FLUSH.  This should also not be split. */
1239 	bdev->optimal_io_boundary = 16;
1240 	g_io_done = false;
1241 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1242 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1243 
1244 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1245 	CU_ASSERT(rc == 0);
1246 	CU_ASSERT(g_io_done == false);
1247 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1248 	stub_complete_io(1);
1249 	CU_ASSERT(g_io_done == true);
1250 
1251 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1252 
1253 	/* Children requests return an error status */
1254 	bdev->optimal_io_boundary = 16;
1255 	iov[0].iov_base = (void *)0x10000;
1256 	iov[0].iov_len = 512 * 64;
1257 	g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1258 	g_io_done = false;
1259 	g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1260 
1261 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1262 	CU_ASSERT(rc == 0);
1263 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1264 	stub_complete_io(4);
1265 	CU_ASSERT(g_io_done == false);
1266 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1267 	stub_complete_io(1);
1268 	CU_ASSERT(g_io_done == true);
1269 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1270 
1271 	spdk_put_io_channel(io_ch);
1272 	spdk_bdev_close(desc);
1273 	free_bdev(bdev);
1274 	spdk_bdev_finish(bdev_fini_cb, NULL);
1275 	poll_threads();
1276 }
1277 
1278 static void
1279 bdev_io_split_with_io_wait(void)
1280 {
1281 	struct spdk_bdev *bdev;
1282 	struct spdk_bdev_desc *desc = NULL;
1283 	struct spdk_io_channel *io_ch;
1284 	struct spdk_bdev_channel *channel;
1285 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1286 	struct spdk_bdev_opts bdev_opts = {
1287 		.bdev_io_pool_size = 2,
1288 		.bdev_io_cache_size = 1,
1289 	};
1290 	struct iovec iov[3];
1291 	struct ut_expected_io *expected_io;
1292 	int rc;
1293 
1294 	rc = spdk_bdev_set_opts(&bdev_opts);
1295 	CU_ASSERT(rc == 0);
1296 	spdk_bdev_initialize(bdev_init_cb, NULL);
1297 
1298 	bdev = allocate_bdev("bdev0");
1299 
1300 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1301 	CU_ASSERT(rc == 0);
1302 	CU_ASSERT(desc != NULL);
1303 	io_ch = spdk_bdev_get_io_channel(desc);
1304 	CU_ASSERT(io_ch != NULL);
1305 	channel = spdk_io_channel_get_ctx(io_ch);
1306 	mgmt_ch = channel->shared_resource->mgmt_ch;
1307 
1308 	bdev->optimal_io_boundary = 16;
1309 	bdev->split_on_optimal_io_boundary = true;
1310 
1311 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1312 	CU_ASSERT(rc == 0);
1313 
1314 	/* Now test that a single-vector command is split correctly.
1315 	 * Offset 14, length 8, payload 0xF000
1316 	 *  Child - Offset 14, length 2, payload 0xF000
1317 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1318 	 *
1319 	 * Set up the expected values before calling spdk_bdev_read_blocks
1320 	 */
1321 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1322 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1323 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1324 
1325 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1326 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1327 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1328 
1329 	/* The following children will be submitted sequentially due to the capacity of
1330 	 * spdk_bdev_io.
1331 	 */
1332 
1333 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1334 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1335 	CU_ASSERT(rc == 0);
1336 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1337 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1338 
1339 	/* Completing the first read I/O will submit the first child */
1340 	stub_complete_io(1);
1341 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1342 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1343 
1344 	/* Completing the first child will submit the second child */
1345 	stub_complete_io(1);
1346 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1347 
1348 	/* Complete the second child I/O.  This should result in our callback getting
1349 	 * invoked since the parent I/O is now complete.
1350 	 */
1351 	stub_complete_io(1);
1352 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1353 
1354 	/* Now set up a more complex, multi-vector command that needs to be split,
1355 	 *  including splitting iovecs.
1356 	 */
1357 	iov[0].iov_base = (void *)0x10000;
1358 	iov[0].iov_len = 512;
1359 	iov[1].iov_base = (void *)0x20000;
1360 	iov[1].iov_len = 20 * 512;
1361 	iov[2].iov_base = (void *)0x30000;
1362 	iov[2].iov_len = 11 * 512;
1363 
1364 	g_io_done = false;
1365 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1366 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1367 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1368 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1369 
1370 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1371 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1372 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1373 
1374 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1375 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1376 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1377 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1378 
1379 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1380 	CU_ASSERT(rc == 0);
1381 	CU_ASSERT(g_io_done == false);
1382 
1383 	/* The following children will be submitted sequentially due to the capacity of
1384 	 * spdk_bdev_io.
1385 	 */
1386 
1387 	/* Completing the first child will submit the second child */
1388 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1389 	stub_complete_io(1);
1390 	CU_ASSERT(g_io_done == false);
1391 
1392 	/* Completing the second child will submit the third child */
1393 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1394 	stub_complete_io(1);
1395 	CU_ASSERT(g_io_done == false);
1396 
1397 	/* Completing the third child will result in our callback getting invoked
1398 	 * since the parent I/O is now complete.
1399 	 */
1400 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1401 	stub_complete_io(1);
1402 	CU_ASSERT(g_io_done == true);
1403 
1404 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1405 
1406 	spdk_put_io_channel(io_ch);
1407 	spdk_bdev_close(desc);
1408 	free_bdev(bdev);
1409 	spdk_bdev_finish(bdev_fini_cb, NULL);
1410 	poll_threads();
1411 }
1412 
1413 static void
1414 bdev_io_alignment(void)
1415 {
1416 	struct spdk_bdev *bdev;
1417 	struct spdk_bdev_desc *desc = NULL;
1418 	struct spdk_io_channel *io_ch;
1419 	struct spdk_bdev_opts bdev_opts = {
1420 		.bdev_io_pool_size = 20,
1421 		.bdev_io_cache_size = 2,
1422 	};
1423 	int rc;
1424 	void *buf;
1425 	struct iovec iovs[2];
1426 	int iovcnt;
1427 	uint64_t alignment;
1428 
1429 	rc = spdk_bdev_set_opts(&bdev_opts);
1430 	CU_ASSERT(rc == 0);
1431 	spdk_bdev_initialize(bdev_init_cb, NULL);
1432 
1433 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1434 	bdev = allocate_bdev("bdev0");
1435 
1436 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1437 	CU_ASSERT(rc == 0);
1438 	CU_ASSERT(desc != NULL);
1439 	io_ch = spdk_bdev_get_io_channel(desc);
1440 	CU_ASSERT(io_ch != NULL);
1441 
1442 	/* Create aligned buffer */
1443 	rc = posix_memalign(&buf, 4096, 8192);
1444 	SPDK_CU_ASSERT_FATAL(rc == 0);
1445 
1446 	/* Pass aligned single buffer with no alignment required */
1447 	alignment = 1;
1448 	bdev->required_alignment = spdk_u32log2(alignment);
1449 
1450 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1451 	CU_ASSERT(rc == 0);
1452 	stub_complete_io(1);
1453 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1454 				    alignment));
1455 
1456 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1457 	CU_ASSERT(rc == 0);
1458 	stub_complete_io(1);
1459 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1460 				    alignment));
1461 
1462 	/* Pass unaligned single buffer with no alignment required */
1463 	alignment = 1;
1464 	bdev->required_alignment = spdk_u32log2(alignment);
1465 
1466 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1467 	CU_ASSERT(rc == 0);
1468 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1469 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1470 	stub_complete_io(1);
1471 
1472 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1473 	CU_ASSERT(rc == 0);
1474 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1475 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1476 	stub_complete_io(1);
1477 
1478 	/* Pass unaligned single buffer with 512 alignment required */
1479 	alignment = 512;
1480 	bdev->required_alignment = spdk_u32log2(alignment);
1481 
1482 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1483 	CU_ASSERT(rc == 0);
1484 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1485 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1486 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1487 				    alignment));
1488 	stub_complete_io(1);
1489 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1490 
1491 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1492 	CU_ASSERT(rc == 0);
1493 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1494 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1495 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1496 				    alignment));
1497 	stub_complete_io(1);
1498 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1499 
1500 	/* Pass unaligned single buffer with 4096 alignment required */
1501 	alignment = 4096;
1502 	bdev->required_alignment = spdk_u32log2(alignment);
1503 
1504 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1505 	CU_ASSERT(rc == 0);
1506 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1507 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1508 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1509 				    alignment));
1510 	stub_complete_io(1);
1511 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1512 
1513 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1514 	CU_ASSERT(rc == 0);
1515 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1516 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1517 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1518 				    alignment));
1519 	stub_complete_io(1);
1520 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1521 
1522 	/* Pass aligned iovs with no alignment required */
1523 	alignment = 1;
1524 	bdev->required_alignment = spdk_u32log2(alignment);
1525 
1526 	iovcnt = 1;
1527 	iovs[0].iov_base = buf;
1528 	iovs[0].iov_len = 512;
1529 
1530 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1531 	CU_ASSERT(rc == 0);
1532 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1533 	stub_complete_io(1);
1534 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1535 
1536 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1537 	CU_ASSERT(rc == 0);
1538 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1539 	stub_complete_io(1);
1540 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1541 
1542 	/* Pass unaligned iovs with no alignment required */
1543 	alignment = 1;
1544 	bdev->required_alignment = spdk_u32log2(alignment);
1545 
1546 	iovcnt = 2;
1547 	iovs[0].iov_base = buf + 16;
1548 	iovs[0].iov_len = 256;
1549 	iovs[1].iov_base = buf + 16 + 256 + 32;
1550 	iovs[1].iov_len = 256;
1551 
1552 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1553 	CU_ASSERT(rc == 0);
1554 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1555 	stub_complete_io(1);
1556 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1557 
1558 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1559 	CU_ASSERT(rc == 0);
1560 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1561 	stub_complete_io(1);
1562 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1563 
1564 	/* Pass unaligned iov with 2048 alignment required */
1565 	alignment = 2048;
1566 	bdev->required_alignment = spdk_u32log2(alignment);
1567 
1568 	iovcnt = 2;
1569 	iovs[0].iov_base = buf + 16;
1570 	iovs[0].iov_len = 256;
1571 	iovs[1].iov_base = buf + 16 + 256 + 32;
1572 	iovs[1].iov_len = 256;
1573 
1574 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1575 	CU_ASSERT(rc == 0);
1576 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1577 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1578 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1579 				    alignment));
1580 	stub_complete_io(1);
1581 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1582 
1583 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1584 	CU_ASSERT(rc == 0);
1585 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1586 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1587 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1588 				    alignment));
1589 	stub_complete_io(1);
1590 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1591 
1592 	/* Pass iov without allocated buffer without alignment required */
1593 	alignment = 1;
1594 	bdev->required_alignment = spdk_u32log2(alignment);
1595 
1596 	iovcnt = 1;
1597 	iovs[0].iov_base = NULL;
1598 	iovs[0].iov_len = 0;
1599 
1600 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1601 	CU_ASSERT(rc == 0);
1602 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1603 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1604 				    alignment));
1605 	stub_complete_io(1);
1606 
1607 	/* Pass iov without allocated buffer with 1024 alignment required */
1608 	alignment = 1024;
1609 	bdev->required_alignment = spdk_u32log2(alignment);
1610 
1611 	iovcnt = 1;
1612 	iovs[0].iov_base = NULL;
1613 	iovs[0].iov_len = 0;
1614 
1615 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1616 	CU_ASSERT(rc == 0);
1617 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1618 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1619 				    alignment));
1620 	stub_complete_io(1);
1621 
1622 	spdk_put_io_channel(io_ch);
1623 	spdk_bdev_close(desc);
1624 	free_bdev(bdev);
1625 	spdk_bdev_finish(bdev_fini_cb, NULL);
1626 	poll_threads();
1627 
1628 	free(buf);
1629 }
1630 
1631 static void
1632 bdev_io_alignment_with_boundary(void)
1633 {
1634 	struct spdk_bdev *bdev;
1635 	struct spdk_bdev_desc *desc = NULL;
1636 	struct spdk_io_channel *io_ch;
1637 	struct spdk_bdev_opts bdev_opts = {
1638 		.bdev_io_pool_size = 20,
1639 		.bdev_io_cache_size = 2,
1640 	};
1641 	int rc;
1642 	void *buf;
1643 	struct iovec iovs[2];
1644 	int iovcnt;
1645 	uint64_t alignment;
1646 
1647 	rc = spdk_bdev_set_opts(&bdev_opts);
1648 	CU_ASSERT(rc == 0);
1649 	spdk_bdev_initialize(bdev_init_cb, NULL);
1650 
1651 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1652 	bdev = allocate_bdev("bdev0");
1653 
1654 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1655 	CU_ASSERT(rc == 0);
1656 	CU_ASSERT(desc != NULL);
1657 	io_ch = spdk_bdev_get_io_channel(desc);
1658 	CU_ASSERT(io_ch != NULL);
1659 
1660 	/* Create aligned buffer */
1661 	rc = posix_memalign(&buf, 4096, 131072);
1662 	SPDK_CU_ASSERT_FATAL(rc == 0);
1663 	g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1664 
1665 	/* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
1666 	alignment = 512;
1667 	bdev->required_alignment = spdk_u32log2(alignment);
1668 	bdev->optimal_io_boundary = 2;
1669 	bdev->split_on_optimal_io_boundary = true;
1670 
1671 	iovcnt = 1;
1672 	iovs[0].iov_base = NULL;
1673 	iovs[0].iov_len = 512 * 3;
1674 
1675 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
1676 	CU_ASSERT(rc == 0);
1677 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1678 	stub_complete_io(2);
1679 
1680 	/* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
1681 	alignment = 512;
1682 	bdev->required_alignment = spdk_u32log2(alignment);
1683 	bdev->optimal_io_boundary = 16;
1684 	bdev->split_on_optimal_io_boundary = true;
1685 
1686 	iovcnt = 1;
1687 	iovs[0].iov_base = NULL;
1688 	iovs[0].iov_len = 512 * 16;
1689 
1690 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
1691 	CU_ASSERT(rc == 0);
1692 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1693 	stub_complete_io(2);
1694 
1695 	/* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
1696 	alignment = 512;
1697 	bdev->required_alignment = spdk_u32log2(alignment);
1698 	bdev->optimal_io_boundary = 128;
1699 	bdev->split_on_optimal_io_boundary = true;
1700 
1701 	iovcnt = 1;
1702 	iovs[0].iov_base = buf + 16;
1703 	iovs[0].iov_len = 512 * 160;
1704 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
1705 	CU_ASSERT(rc == 0);
1706 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1707 	stub_complete_io(2);
1708 
1709 	/* 512 * 3 with 2 IO boundary */
1710 	alignment = 512;
1711 	bdev->required_alignment = spdk_u32log2(alignment);
1712 	bdev->optimal_io_boundary = 2;
1713 	bdev->split_on_optimal_io_boundary = true;
1714 
1715 	iovcnt = 2;
1716 	iovs[0].iov_base = buf + 16;
1717 	iovs[0].iov_len = 512;
1718 	iovs[1].iov_base = buf + 16 + 512 + 32;
1719 	iovs[1].iov_len = 1024;
1720 
1721 	rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
1722 	CU_ASSERT(rc == 0);
1723 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1724 	stub_complete_io(2);
1725 
1726 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
1727 	CU_ASSERT(rc == 0);
1728 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1729 	stub_complete_io(2);
1730 
1731 	/* 512 * 64 with 32 IO boundary */
1732 	bdev->optimal_io_boundary = 32;
1733 	iovcnt = 2;
1734 	iovs[0].iov_base = buf + 16;
1735 	iovs[0].iov_len = 16384;
1736 	iovs[1].iov_base = buf + 16 + 16384 + 32;
1737 	iovs[1].iov_len = 16384;
1738 
1739 	rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
1740 	CU_ASSERT(rc == 0);
1741 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1742 	stub_complete_io(3);
1743 
1744 	rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
1745 	CU_ASSERT(rc == 0);
1746 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1747 	stub_complete_io(3);
1748 
1749 	/* 512 * 160 with 32 IO boundary */
1750 	iovcnt = 1;
1751 	iovs[0].iov_base = buf + 16;
1752 	iovs[0].iov_len = 16384 + 65536;
1753 
1754 	rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
1755 	CU_ASSERT(rc == 0);
1756 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
1757 	stub_complete_io(6);
1758 
1759 	spdk_put_io_channel(io_ch);
1760 	spdk_bdev_close(desc);
1761 	free_bdev(bdev);
1762 	spdk_bdev_finish(bdev_fini_cb, NULL);
1763 	poll_threads();
1764 
1765 	free(buf);
1766 }
1767 
1768 static void
1769 histogram_status_cb(void *cb_arg, int status)
1770 {
1771 	g_status = status;
1772 }
1773 
1774 static void
1775 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1776 {
1777 	g_status = status;
1778 	g_histogram = histogram;
1779 }
1780 
1781 static void
1782 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1783 		   uint64_t total, uint64_t so_far)
1784 {
1785 	g_count += count;
1786 }
1787 
1788 static void
1789 bdev_histograms(void)
1790 {
1791 	struct spdk_bdev *bdev;
1792 	struct spdk_bdev_desc *desc = NULL;
1793 	struct spdk_io_channel *ch;
1794 	struct spdk_histogram_data *histogram;
1795 	uint8_t buf[4096];
1796 	int rc;
1797 
1798 	spdk_bdev_initialize(bdev_init_cb, NULL);
1799 
1800 	bdev = allocate_bdev("bdev");
1801 
1802 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1803 	CU_ASSERT(rc == 0);
1804 	CU_ASSERT(desc != NULL);
1805 
1806 	ch = spdk_bdev_get_io_channel(desc);
1807 	CU_ASSERT(ch != NULL);
1808 
1809 	/* Enable histogram */
1810 	g_status = -1;
1811 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1812 	poll_threads();
1813 	CU_ASSERT(g_status == 0);
1814 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1815 
1816 	/* Allocate histogram */
1817 	histogram = spdk_histogram_data_alloc();
1818 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1819 
1820 	/* Check if histogram is zeroed */
1821 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1822 	poll_threads();
1823 	CU_ASSERT(g_status == 0);
1824 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1825 
1826 	g_count = 0;
1827 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1828 
1829 	CU_ASSERT(g_count == 0);
1830 
1831 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1832 	CU_ASSERT(rc == 0);
1833 
1834 	spdk_delay_us(10);
1835 	stub_complete_io(1);
1836 	poll_threads();
1837 
1838 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1839 	CU_ASSERT(rc == 0);
1840 
1841 	spdk_delay_us(10);
1842 	stub_complete_io(1);
1843 	poll_threads();
1844 
1845 	/* Check if histogram gathered data from all I/O channels */
1846 	g_histogram = NULL;
1847 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1848 	poll_threads();
1849 	CU_ASSERT(g_status == 0);
1850 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1851 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1852 
1853 	g_count = 0;
1854 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1855 	CU_ASSERT(g_count == 2);
1856 
1857 	/* Disable histogram */
1858 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1859 	poll_threads();
1860 	CU_ASSERT(g_status == 0);
1861 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1862 
1863 	/* Try to run histogram commands on disabled bdev */
1864 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1865 	poll_threads();
1866 	CU_ASSERT(g_status == -EFAULT);
1867 
1868 	spdk_histogram_data_free(histogram);
1869 	spdk_put_io_channel(ch);
1870 	spdk_bdev_close(desc);
1871 	free_bdev(bdev);
1872 	spdk_bdev_finish(bdev_fini_cb, NULL);
1873 	poll_threads();
1874 }
1875 
1876 static void
1877 bdev_write_zeroes(void)
1878 {
1879 	struct spdk_bdev *bdev;
1880 	struct spdk_bdev_desc *desc = NULL;
1881 	struct spdk_io_channel *ioch;
1882 	struct ut_expected_io *expected_io;
1883 	uint64_t offset, num_io_blocks, num_blocks;
1884 	uint32_t num_completed, num_requests;
1885 	int rc;
1886 
1887 	spdk_bdev_initialize(bdev_init_cb, NULL);
1888 	bdev = allocate_bdev("bdev");
1889 
1890 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1891 	CU_ASSERT_EQUAL(rc, 0);
1892 	SPDK_CU_ASSERT_FATAL(desc != NULL);
1893 	ioch = spdk_bdev_get_io_channel(desc);
1894 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
1895 
1896 	fn_table.submit_request = stub_submit_request;
1897 	g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1898 
1899 	/* First test that if the bdev supports write_zeroes, the request won't be split */
1900 	bdev->md_len = 0;
1901 	bdev->blocklen = 4096;
1902 	num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
1903 
1904 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
1905 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1906 	rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
1907 	CU_ASSERT_EQUAL(rc, 0);
1908 	num_completed = stub_complete_io(1);
1909 	CU_ASSERT_EQUAL(num_completed, 1);
1910 
1911 	/* Check that if write zeroes is not supported it'll be replaced by regular writes */
1912 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
1913 	num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
1914 	num_requests = 2;
1915 	num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
1916 
1917 	for (offset = 0; offset < num_requests; ++offset) {
1918 		expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
1919 						   offset * num_io_blocks, num_io_blocks, 0);
1920 		TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1921 	}
1922 
1923 	rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
1924 	CU_ASSERT_EQUAL(rc, 0);
1925 	num_completed = stub_complete_io(num_requests);
1926 	CU_ASSERT_EQUAL(num_completed, num_requests);
1927 
1928 	/* Check that the splitting is correct if bdev has interleaved metadata */
1929 	bdev->md_interleave = true;
1930 	bdev->md_len = 64;
1931 	bdev->blocklen = 4096 + 64;
1932 	num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
1933 
1934 	num_requests = offset = 0;
1935 	while (offset < num_blocks) {
1936 		num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
1937 		expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
1938 						   offset, num_io_blocks, 0);
1939 		TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1940 		offset += num_io_blocks;
1941 		num_requests++;
1942 	}
1943 
1944 	rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
1945 	CU_ASSERT_EQUAL(rc, 0);
1946 	num_completed = stub_complete_io(num_requests);
1947 	CU_ASSERT_EQUAL(num_completed, num_requests);
1948 	num_completed = stub_complete_io(num_requests);
1949 	assert(num_completed == 0);
1950 
1951 	/* Check the the same for separate metadata buffer */
1952 	bdev->md_interleave = false;
1953 	bdev->md_len = 64;
1954 	bdev->blocklen = 4096;
1955 
1956 	num_requests = offset = 0;
1957 	while (offset < num_blocks) {
1958 		num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
1959 		expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
1960 						   offset, num_io_blocks, 0);
1961 		expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
1962 		TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1963 		offset += num_io_blocks;
1964 		num_requests++;
1965 	}
1966 
1967 	rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
1968 	CU_ASSERT_EQUAL(rc, 0);
1969 	num_completed = stub_complete_io(num_requests);
1970 	CU_ASSERT_EQUAL(num_completed, num_requests);
1971 
1972 	ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
1973 	spdk_put_io_channel(ioch);
1974 	spdk_bdev_close(desc);
1975 	free_bdev(bdev);
1976 	spdk_bdev_finish(bdev_fini_cb, NULL);
1977 	poll_threads();
1978 }
1979 
1980 static void
1981 bdev_open_while_hotremove(void)
1982 {
1983 	struct spdk_bdev *bdev;
1984 	struct spdk_bdev_desc *desc[2] = {};
1985 	int rc;
1986 
1987 	bdev = allocate_bdev("bdev");
1988 
1989 	rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[0]);
1990 	CU_ASSERT(rc == 0);
1991 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
1992 
1993 	spdk_bdev_unregister(bdev, NULL, NULL);
1994 
1995 	rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[1]);
1996 	CU_ASSERT(rc == -ENODEV);
1997 	SPDK_CU_ASSERT_FATAL(desc[1] == NULL);
1998 
1999 	spdk_bdev_close(desc[0]);
2000 	free_bdev(bdev);
2001 }
2002 
2003 static void
2004 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
2005 {
2006 	struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
2007 
2008 	g_event_type1 = type;
2009 	spdk_bdev_close(desc);
2010 }
2011 
2012 static void
2013 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
2014 {
2015 	struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
2016 
2017 	g_event_type2 = type;
2018 	spdk_bdev_close(desc);
2019 }
2020 
2021 static void
2022 bdev_open_ext(void)
2023 {
2024 	struct spdk_bdev *bdev;
2025 	struct spdk_bdev_desc *desc1 = NULL;
2026 	struct spdk_bdev_desc *desc2 = NULL;
2027 	int rc = 0;
2028 
2029 	bdev = allocate_bdev("bdev");
2030 
2031 	rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
2032 	CU_ASSERT_EQUAL(rc, -EINVAL);
2033 
2034 	rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
2035 	CU_ASSERT_EQUAL(rc, 0);
2036 
2037 	rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
2038 	CU_ASSERT_EQUAL(rc, 0);
2039 
2040 	g_event_type1 = 0xFF;
2041 	g_event_type2 = 0xFF;
2042 
2043 	/* Simulate hot-unplug by unregistering bdev */
2044 	spdk_bdev_unregister(bdev, NULL, NULL);
2045 	poll_threads();
2046 
2047 	/* Check if correct events have been triggered in event callback fn */
2048 	CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
2049 	CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
2050 
2051 	free_bdev(bdev);
2052 	poll_threads();
2053 }
2054 
2055 int
2056 main(int argc, char **argv)
2057 {
2058 	CU_pSuite		suite = NULL;
2059 	unsigned int		num_failures;
2060 
2061 	if (CU_initialize_registry() != CUE_SUCCESS) {
2062 		return CU_get_error();
2063 	}
2064 
2065 	suite = CU_add_suite("bdev", null_init, null_clean);
2066 	if (suite == NULL) {
2067 		CU_cleanup_registry();
2068 		return CU_get_error();
2069 	}
2070 
2071 	if (
2072 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
2073 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
2074 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
2075 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
2076 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
2077 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
2078 		CU_add_test(suite, "bdev_io_types", bdev_io_types_test) == NULL ||
2079 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
2080 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
2081 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
2082 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
2083 		CU_add_test(suite, "bdev_io_alignment_with_boundary", bdev_io_alignment_with_boundary) == NULL ||
2084 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
2085 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL ||
2086 		CU_add_test(suite, "bdev_write_zeroes", bdev_write_zeroes) == NULL ||
2087 		CU_add_test(suite, "bdev_open_while_hotremove", bdev_open_while_hotremove) == NULL ||
2088 		CU_add_test(suite, "bdev_open_ext", bdev_open_ext) == NULL
2089 	) {
2090 		CU_cleanup_registry();
2091 		return CU_get_error();
2092 	}
2093 
2094 	allocate_threads(1);
2095 	set_thread(0);
2096 
2097 	CU_basic_set_mode(CU_BRM_VERBOSE);
2098 	CU_basic_run_tests();
2099 	num_failures = CU_get_number_of_failures();
2100 	CU_cleanup_registry();
2101 
2102 	free_threads();
2103 
2104 	return num_failures;
2105 }
2106