xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision bb488d2829a9b7863daab45917dd2174905cc0ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_is_ptr, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
63 
64 
65 int g_status;
66 int g_count;
67 struct spdk_histogram_data *g_histogram;
68 
69 void
70 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
71 			 int *sc, int *sk, int *asc, int *ascq)
72 {
73 }
74 
75 static int
76 null_init(void)
77 {
78 	return 0;
79 }
80 
81 static int
82 null_clean(void)
83 {
84 	return 0;
85 }
86 
87 static int
88 stub_destruct(void *ctx)
89 {
90 	return 0;
91 }
92 
93 struct ut_expected_io {
94 	uint8_t				type;
95 	uint64_t			offset;
96 	uint64_t			length;
97 	int				iovcnt;
98 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
99 	TAILQ_ENTRY(ut_expected_io)	link;
100 };
101 
102 struct bdev_ut_channel {
103 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
104 	uint32_t			outstanding_io_count;
105 	TAILQ_HEAD(, ut_expected_io)	expected_io;
106 };
107 
108 static bool g_io_done;
109 static struct spdk_bdev_io *g_bdev_io;
110 static enum spdk_bdev_io_status g_io_status;
111 static uint32_t g_bdev_ut_io_device;
112 static struct bdev_ut_channel *g_bdev_ut_channel;
113 
114 static struct ut_expected_io *
115 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
116 {
117 	struct ut_expected_io *expected_io;
118 
119 	expected_io = calloc(1, sizeof(*expected_io));
120 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
121 
122 	expected_io->type = type;
123 	expected_io->offset = offset;
124 	expected_io->length = length;
125 	expected_io->iovcnt = iovcnt;
126 
127 	return expected_io;
128 }
129 
130 static void
131 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
132 {
133 	expected_io->iov[pos].iov_base = base;
134 	expected_io->iov[pos].iov_len = len;
135 }
136 
137 static void
138 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
139 {
140 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
141 	struct ut_expected_io *expected_io;
142 	struct iovec *iov, *expected_iov;
143 	int i;
144 
145 	g_bdev_io = bdev_io;
146 
147 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
148 	ch->outstanding_io_count++;
149 
150 	expected_io = TAILQ_FIRST(&ch->expected_io);
151 	if (expected_io == NULL) {
152 		return;
153 	}
154 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
155 
156 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
157 		CU_ASSERT(bdev_io->type == expected_io->type);
158 	}
159 
160 	if (expected_io->length == 0) {
161 		free(expected_io);
162 		return;
163 	}
164 
165 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
166 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
167 
168 	if (expected_io->iovcnt == 0) {
169 		free(expected_io);
170 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
171 		return;
172 	}
173 
174 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
175 	for (i = 0; i < expected_io->iovcnt; i++) {
176 		iov = &bdev_io->u.bdev.iovs[i];
177 		expected_iov = &expected_io->iov[i];
178 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
179 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
180 	}
181 
182 	free(expected_io);
183 }
184 
185 static void
186 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
187 				      struct spdk_bdev_io *bdev_io, bool success)
188 {
189 	CU_ASSERT(success == true);
190 
191 	stub_submit_request(_ch, bdev_io);
192 }
193 
194 static void
195 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
196 {
197 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
198 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
199 }
200 
201 static uint32_t
202 stub_complete_io(uint32_t num_to_complete)
203 {
204 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
205 	struct spdk_bdev_io *bdev_io;
206 	uint32_t num_completed = 0;
207 
208 	while (num_completed < num_to_complete) {
209 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
210 			break;
211 		}
212 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
213 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
214 		ch->outstanding_io_count--;
215 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
216 		num_completed++;
217 	}
218 
219 	return num_completed;
220 }
221 
222 static struct spdk_io_channel *
223 bdev_ut_get_io_channel(void *ctx)
224 {
225 	return spdk_get_io_channel(&g_bdev_ut_io_device);
226 }
227 
228 static bool
229 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
230 {
231 	return true;
232 }
233 
234 static struct spdk_bdev_fn_table fn_table = {
235 	.destruct = stub_destruct,
236 	.submit_request = stub_submit_request,
237 	.get_io_channel = bdev_ut_get_io_channel,
238 	.io_type_supported = stub_io_type_supported,
239 };
240 
241 static int
242 bdev_ut_create_ch(void *io_device, void *ctx_buf)
243 {
244 	struct bdev_ut_channel *ch = ctx_buf;
245 
246 	CU_ASSERT(g_bdev_ut_channel == NULL);
247 	g_bdev_ut_channel = ch;
248 
249 	TAILQ_INIT(&ch->outstanding_io);
250 	ch->outstanding_io_count = 0;
251 	TAILQ_INIT(&ch->expected_io);
252 	return 0;
253 }
254 
255 static void
256 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
257 {
258 	CU_ASSERT(g_bdev_ut_channel != NULL);
259 	g_bdev_ut_channel = NULL;
260 }
261 
262 static int
263 bdev_ut_module_init(void)
264 {
265 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
266 				sizeof(struct bdev_ut_channel), NULL);
267 	return 0;
268 }
269 
270 static void
271 bdev_ut_module_fini(void)
272 {
273 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
274 }
275 
276 struct spdk_bdev_module bdev_ut_if = {
277 	.name = "bdev_ut",
278 	.module_init = bdev_ut_module_init,
279 	.module_fini = bdev_ut_module_fini,
280 };
281 
282 static void vbdev_ut_examine(struct spdk_bdev *bdev);
283 
284 static int
285 vbdev_ut_module_init(void)
286 {
287 	return 0;
288 }
289 
290 static void
291 vbdev_ut_module_fini(void)
292 {
293 }
294 
295 struct spdk_bdev_module vbdev_ut_if = {
296 	.name = "vbdev_ut",
297 	.module_init = vbdev_ut_module_init,
298 	.module_fini = vbdev_ut_module_fini,
299 	.examine_config = vbdev_ut_examine,
300 };
301 
302 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
303 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
304 
305 static void
306 vbdev_ut_examine(struct spdk_bdev *bdev)
307 {
308 	spdk_bdev_module_examine_done(&vbdev_ut_if);
309 }
310 
311 static struct spdk_bdev *
312 allocate_bdev(char *name)
313 {
314 	struct spdk_bdev *bdev;
315 	int rc;
316 
317 	bdev = calloc(1, sizeof(*bdev));
318 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
319 
320 	bdev->name = name;
321 	bdev->fn_table = &fn_table;
322 	bdev->module = &bdev_ut_if;
323 	bdev->blockcnt = 1024;
324 	bdev->blocklen = 512;
325 
326 	rc = spdk_bdev_register(bdev);
327 	CU_ASSERT(rc == 0);
328 
329 	return bdev;
330 }
331 
332 static struct spdk_bdev *
333 allocate_vbdev(char *name)
334 {
335 	struct spdk_bdev *bdev;
336 	int rc;
337 
338 	bdev = calloc(1, sizeof(*bdev));
339 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
340 
341 	bdev->name = name;
342 	bdev->fn_table = &fn_table;
343 	bdev->module = &vbdev_ut_if;
344 
345 	rc = spdk_bdev_register(bdev);
346 	CU_ASSERT(rc == 0);
347 
348 	return bdev;
349 }
350 
351 static void
352 free_bdev(struct spdk_bdev *bdev)
353 {
354 	spdk_bdev_unregister(bdev, NULL, NULL);
355 	poll_threads();
356 	memset(bdev, 0xFF, sizeof(*bdev));
357 	free(bdev);
358 }
359 
360 static void
361 free_vbdev(struct spdk_bdev *bdev)
362 {
363 	spdk_bdev_unregister(bdev, NULL, NULL);
364 	poll_threads();
365 	memset(bdev, 0xFF, sizeof(*bdev));
366 	free(bdev);
367 }
368 
369 static void
370 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
371 {
372 	const char *bdev_name;
373 
374 	CU_ASSERT(bdev != NULL);
375 	CU_ASSERT(rc == 0);
376 	bdev_name = spdk_bdev_get_name(bdev);
377 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
378 
379 	free(stat);
380 	free_bdev(bdev);
381 
382 	*(bool *)cb_arg = true;
383 }
384 
385 static void
386 get_device_stat_test(void)
387 {
388 	struct spdk_bdev *bdev;
389 	struct spdk_bdev_io_stat *stat;
390 	bool done;
391 
392 	bdev = allocate_bdev("bdev0");
393 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
394 	if (stat == NULL) {
395 		free_bdev(bdev);
396 		return;
397 	}
398 
399 	done = false;
400 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
401 	while (!done) { poll_threads(); }
402 
403 
404 }
405 
406 static void
407 open_write_test(void)
408 {
409 	struct spdk_bdev *bdev[9];
410 	struct spdk_bdev_desc *desc[9] = {};
411 	int rc;
412 
413 	/*
414 	 * Create a tree of bdevs to test various open w/ write cases.
415 	 *
416 	 * bdev0 through bdev3 are physical block devices, such as NVMe
417 	 * namespaces or Ceph block devices.
418 	 *
419 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
420 	 * caching or RAID use cases.
421 	 *
422 	 * bdev5 through bdev7 are all virtual bdevs with the same base
423 	 * bdev (except bdev7). This models partitioning or logical volume
424 	 * use cases.
425 	 *
426 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
427 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
428 	 * models caching, RAID, partitioning or logical volumes use cases.
429 	 *
430 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
431 	 * base bdevs are themselves virtual bdevs.
432 	 *
433 	 *                bdev8
434 	 *                  |
435 	 *            +----------+
436 	 *            |          |
437 	 *          bdev4      bdev5   bdev6   bdev7
438 	 *            |          |       |       |
439 	 *        +---+---+      +---+   +   +---+---+
440 	 *        |       |           \  |  /         \
441 	 *      bdev0   bdev1          bdev2         bdev3
442 	 */
443 
444 	bdev[0] = allocate_bdev("bdev0");
445 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
446 	CU_ASSERT(rc == 0);
447 
448 	bdev[1] = allocate_bdev("bdev1");
449 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
450 	CU_ASSERT(rc == 0);
451 
452 	bdev[2] = allocate_bdev("bdev2");
453 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
454 	CU_ASSERT(rc == 0);
455 
456 	bdev[3] = allocate_bdev("bdev3");
457 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
458 	CU_ASSERT(rc == 0);
459 
460 	bdev[4] = allocate_vbdev("bdev4");
461 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
462 	CU_ASSERT(rc == 0);
463 
464 	bdev[5] = allocate_vbdev("bdev5");
465 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
466 	CU_ASSERT(rc == 0);
467 
468 	bdev[6] = allocate_vbdev("bdev6");
469 
470 	bdev[7] = allocate_vbdev("bdev7");
471 
472 	bdev[8] = allocate_vbdev("bdev8");
473 
474 	/* Open bdev0 read-only.  This should succeed. */
475 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
476 	CU_ASSERT(rc == 0);
477 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
478 	spdk_bdev_close(desc[0]);
479 
480 	/*
481 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
482 	 * by a vbdev module.
483 	 */
484 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
485 	CU_ASSERT(rc == -EPERM);
486 
487 	/*
488 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
489 	 * by a vbdev module.
490 	 */
491 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
492 	CU_ASSERT(rc == -EPERM);
493 
494 	/* Open bdev4 read-only.  This should succeed. */
495 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
496 	CU_ASSERT(rc == 0);
497 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
498 	spdk_bdev_close(desc[4]);
499 
500 	/*
501 	 * Open bdev8 read/write.  This should succeed since it is a leaf
502 	 * bdev.
503 	 */
504 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
505 	CU_ASSERT(rc == 0);
506 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
507 	spdk_bdev_close(desc[8]);
508 
509 	/*
510 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
511 	 * by a vbdev module.
512 	 */
513 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
514 	CU_ASSERT(rc == -EPERM);
515 
516 	/* Open bdev4 read-only.  This should succeed. */
517 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
518 	CU_ASSERT(rc == 0);
519 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
520 	spdk_bdev_close(desc[5]);
521 
522 	free_vbdev(bdev[8]);
523 
524 	free_vbdev(bdev[5]);
525 	free_vbdev(bdev[6]);
526 	free_vbdev(bdev[7]);
527 
528 	free_vbdev(bdev[4]);
529 
530 	free_bdev(bdev[0]);
531 	free_bdev(bdev[1]);
532 	free_bdev(bdev[2]);
533 	free_bdev(bdev[3]);
534 }
535 
536 static void
537 bytes_to_blocks_test(void)
538 {
539 	struct spdk_bdev bdev;
540 	uint64_t offset_blocks, num_blocks;
541 
542 	memset(&bdev, 0, sizeof(bdev));
543 
544 	bdev.blocklen = 512;
545 
546 	/* All parameters valid */
547 	offset_blocks = 0;
548 	num_blocks = 0;
549 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
550 	CU_ASSERT(offset_blocks == 1);
551 	CU_ASSERT(num_blocks == 2);
552 
553 	/* Offset not a block multiple */
554 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
555 
556 	/* Length not a block multiple */
557 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
558 
559 	/* In case blocklen not the power of two */
560 	bdev.blocklen = 100;
561 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
562 	CU_ASSERT(offset_blocks == 1);
563 	CU_ASSERT(num_blocks == 2);
564 
565 	/* Offset not a block multiple */
566 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
567 
568 	/* Length not a block multiple */
569 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
570 }
571 
572 static void
573 num_blocks_test(void)
574 {
575 	struct spdk_bdev bdev;
576 	struct spdk_bdev_desc *desc = NULL;
577 	int rc;
578 
579 	memset(&bdev, 0, sizeof(bdev));
580 	bdev.name = "num_blocks";
581 	bdev.fn_table = &fn_table;
582 	bdev.module = &bdev_ut_if;
583 	spdk_bdev_register(&bdev);
584 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
585 
586 	/* Growing block number */
587 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
588 	/* Shrinking block number */
589 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
590 
591 	/* In case bdev opened */
592 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
593 	CU_ASSERT(rc == 0);
594 	SPDK_CU_ASSERT_FATAL(desc != NULL);
595 
596 	/* Growing block number */
597 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
598 	/* Shrinking block number */
599 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
600 
601 	spdk_bdev_close(desc);
602 	spdk_bdev_unregister(&bdev, NULL, NULL);
603 
604 	poll_threads();
605 }
606 
607 static void
608 io_valid_test(void)
609 {
610 	struct spdk_bdev bdev;
611 
612 	memset(&bdev, 0, sizeof(bdev));
613 
614 	bdev.blocklen = 512;
615 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
616 
617 	/* All parameters valid */
618 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
619 
620 	/* Last valid block */
621 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
622 
623 	/* Offset past end of bdev */
624 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
625 
626 	/* Offset + length past end of bdev */
627 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
628 
629 	/* Offset near end of uint64_t range (2^64 - 1) */
630 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
631 }
632 
633 static void
634 alias_add_del_test(void)
635 {
636 	struct spdk_bdev *bdev[3];
637 	int rc;
638 
639 	/* Creating and registering bdevs */
640 	bdev[0] = allocate_bdev("bdev0");
641 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
642 
643 	bdev[1] = allocate_bdev("bdev1");
644 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
645 
646 	bdev[2] = allocate_bdev("bdev2");
647 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
648 
649 	poll_threads();
650 
651 	/*
652 	 * Trying adding an alias identical to name.
653 	 * Alias is identical to name, so it can not be added to aliases list
654 	 */
655 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
656 	CU_ASSERT(rc == -EEXIST);
657 
658 	/*
659 	 * Trying to add empty alias,
660 	 * this one should fail
661 	 */
662 	rc = spdk_bdev_alias_add(bdev[0], NULL);
663 	CU_ASSERT(rc == -EINVAL);
664 
665 	/* Trying adding same alias to two different registered bdevs */
666 
667 	/* Alias is used first time, so this one should pass */
668 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
669 	CU_ASSERT(rc == 0);
670 
671 	/* Alias was added to another bdev, so this one should fail */
672 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
673 	CU_ASSERT(rc == -EEXIST);
674 
675 	/* Alias is used first time, so this one should pass */
676 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
677 	CU_ASSERT(rc == 0);
678 
679 	/* Trying removing an alias from registered bdevs */
680 
681 	/* Alias is not on a bdev aliases list, so this one should fail */
682 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
683 	CU_ASSERT(rc == -ENOENT);
684 
685 	/* Alias is present on a bdev aliases list, so this one should pass */
686 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
687 	CU_ASSERT(rc == 0);
688 
689 	/* Alias is present on a bdev aliases list, so this one should pass */
690 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
691 	CU_ASSERT(rc == 0);
692 
693 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
694 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
695 	CU_ASSERT(rc != 0);
696 
697 	/* Trying to del all alias from empty alias list */
698 	spdk_bdev_alias_del_all(bdev[2]);
699 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
700 
701 	/* Trying to del all alias from non-empty alias list */
702 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
703 	CU_ASSERT(rc == 0);
704 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
705 	CU_ASSERT(rc == 0);
706 	spdk_bdev_alias_del_all(bdev[2]);
707 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
708 
709 	/* Unregister and free bdevs */
710 	spdk_bdev_unregister(bdev[0], NULL, NULL);
711 	spdk_bdev_unregister(bdev[1], NULL, NULL);
712 	spdk_bdev_unregister(bdev[2], NULL, NULL);
713 
714 	poll_threads();
715 
716 	free(bdev[0]);
717 	free(bdev[1]);
718 	free(bdev[2]);
719 }
720 
721 static void
722 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
723 {
724 	g_io_done = true;
725 	g_io_status = bdev_io->internal.status;
726 	spdk_bdev_free_io(bdev_io);
727 }
728 
729 static void
730 bdev_init_cb(void *arg, int rc)
731 {
732 	CU_ASSERT(rc == 0);
733 }
734 
735 static void
736 bdev_fini_cb(void *arg)
737 {
738 }
739 
740 struct bdev_ut_io_wait_entry {
741 	struct spdk_bdev_io_wait_entry	entry;
742 	struct spdk_io_channel		*io_ch;
743 	struct spdk_bdev_desc		*desc;
744 	bool				submitted;
745 };
746 
747 static void
748 io_wait_cb(void *arg)
749 {
750 	struct bdev_ut_io_wait_entry *entry = arg;
751 	int rc;
752 
753 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
754 	CU_ASSERT(rc == 0);
755 	entry->submitted = true;
756 }
757 
758 static void
759 bdev_io_wait_test(void)
760 {
761 	struct spdk_bdev *bdev;
762 	struct spdk_bdev_desc *desc = NULL;
763 	struct spdk_io_channel *io_ch;
764 	struct spdk_bdev_opts bdev_opts = {
765 		.bdev_io_pool_size = 4,
766 		.bdev_io_cache_size = 2,
767 	};
768 	struct bdev_ut_io_wait_entry io_wait_entry;
769 	struct bdev_ut_io_wait_entry io_wait_entry2;
770 	int rc;
771 
772 	rc = spdk_bdev_set_opts(&bdev_opts);
773 	CU_ASSERT(rc == 0);
774 	spdk_bdev_initialize(bdev_init_cb, NULL);
775 	poll_threads();
776 
777 	bdev = allocate_bdev("bdev0");
778 
779 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
780 	CU_ASSERT(rc == 0);
781 	poll_threads();
782 	SPDK_CU_ASSERT_FATAL(desc != NULL);
783 	io_ch = spdk_bdev_get_io_channel(desc);
784 	CU_ASSERT(io_ch != NULL);
785 
786 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
787 	CU_ASSERT(rc == 0);
788 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
789 	CU_ASSERT(rc == 0);
790 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
791 	CU_ASSERT(rc == 0);
792 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
793 	CU_ASSERT(rc == 0);
794 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
795 
796 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
797 	CU_ASSERT(rc == -ENOMEM);
798 
799 	io_wait_entry.entry.bdev = bdev;
800 	io_wait_entry.entry.cb_fn = io_wait_cb;
801 	io_wait_entry.entry.cb_arg = &io_wait_entry;
802 	io_wait_entry.io_ch = io_ch;
803 	io_wait_entry.desc = desc;
804 	io_wait_entry.submitted = false;
805 	/* Cannot use the same io_wait_entry for two different calls. */
806 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
807 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
808 
809 	/* Queue two I/O waits. */
810 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
811 	CU_ASSERT(rc == 0);
812 	CU_ASSERT(io_wait_entry.submitted == false);
813 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
814 	CU_ASSERT(rc == 0);
815 	CU_ASSERT(io_wait_entry2.submitted == false);
816 
817 	stub_complete_io(1);
818 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
819 	CU_ASSERT(io_wait_entry.submitted == true);
820 	CU_ASSERT(io_wait_entry2.submitted == false);
821 
822 	stub_complete_io(1);
823 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
824 	CU_ASSERT(io_wait_entry2.submitted == true);
825 
826 	stub_complete_io(4);
827 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
828 
829 	spdk_put_io_channel(io_ch);
830 	spdk_bdev_close(desc);
831 	free_bdev(bdev);
832 	spdk_bdev_finish(bdev_fini_cb, NULL);
833 	poll_threads();
834 }
835 
836 static void
837 bdev_io_spans_boundary_test(void)
838 {
839 	struct spdk_bdev bdev;
840 	struct spdk_bdev_io bdev_io;
841 
842 	memset(&bdev, 0, sizeof(bdev));
843 
844 	bdev.optimal_io_boundary = 0;
845 	bdev_io.bdev = &bdev;
846 
847 	/* bdev has no optimal_io_boundary set - so this should return false. */
848 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
849 
850 	bdev.optimal_io_boundary = 32;
851 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
852 
853 	/* RESETs are not based on LBAs - so this should return false. */
854 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
855 
856 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
857 	bdev_io.u.bdev.offset_blocks = 0;
858 	bdev_io.u.bdev.num_blocks = 32;
859 
860 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
861 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
862 
863 	bdev_io.u.bdev.num_blocks = 33;
864 
865 	/* This I/O spans a boundary. */
866 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
867 }
868 
869 static void
870 bdev_io_split(void)
871 {
872 	struct spdk_bdev *bdev;
873 	struct spdk_bdev_desc *desc = NULL;
874 	struct spdk_io_channel *io_ch;
875 	struct spdk_bdev_opts bdev_opts = {
876 		.bdev_io_pool_size = 512,
877 		.bdev_io_cache_size = 64,
878 	};
879 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
880 	struct ut_expected_io *expected_io;
881 	uint64_t i;
882 	int rc;
883 
884 	rc = spdk_bdev_set_opts(&bdev_opts);
885 	CU_ASSERT(rc == 0);
886 	spdk_bdev_initialize(bdev_init_cb, NULL);
887 
888 	bdev = allocate_bdev("bdev0");
889 
890 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
891 	CU_ASSERT(rc == 0);
892 	SPDK_CU_ASSERT_FATAL(desc != NULL);
893 	io_ch = spdk_bdev_get_io_channel(desc);
894 	CU_ASSERT(io_ch != NULL);
895 
896 	bdev->optimal_io_boundary = 16;
897 	bdev->split_on_optimal_io_boundary = false;
898 
899 	g_io_done = false;
900 
901 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
902 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
903 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
904 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
905 
906 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
907 	CU_ASSERT(rc == 0);
908 	CU_ASSERT(g_io_done == false);
909 
910 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
911 	stub_complete_io(1);
912 	CU_ASSERT(g_io_done == true);
913 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
914 
915 	bdev->split_on_optimal_io_boundary = true;
916 
917 	/* Now test that a single-vector command is split correctly.
918 	 * Offset 14, length 8, payload 0xF000
919 	 *  Child - Offset 14, length 2, payload 0xF000
920 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
921 	 *
922 	 * Set up the expected values before calling spdk_bdev_read_blocks
923 	 */
924 	g_io_done = false;
925 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
926 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
927 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
928 
929 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
930 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
931 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
932 
933 	/* spdk_bdev_read_blocks will submit the first child immediately. */
934 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
935 	CU_ASSERT(rc == 0);
936 	CU_ASSERT(g_io_done == false);
937 
938 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
939 	stub_complete_io(2);
940 	CU_ASSERT(g_io_done == true);
941 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
942 
943 	/* Now set up a more complex, multi-vector command that needs to be split,
944 	 *  including splitting iovecs.
945 	 */
946 	iov[0].iov_base = (void *)0x10000;
947 	iov[0].iov_len = 512;
948 	iov[1].iov_base = (void *)0x20000;
949 	iov[1].iov_len = 20 * 512;
950 	iov[2].iov_base = (void *)0x30000;
951 	iov[2].iov_len = 11 * 512;
952 
953 	g_io_done = false;
954 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
955 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
956 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
957 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
958 
959 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
960 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
961 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
962 
963 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
964 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
965 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
966 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
967 
968 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
969 	CU_ASSERT(rc == 0);
970 	CU_ASSERT(g_io_done == false);
971 
972 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
973 	stub_complete_io(3);
974 	CU_ASSERT(g_io_done == true);
975 
976 	/* Test multi vector command that needs to be split by strip and then needs to be
977 	 * split further due to the capacity of child iovs.
978 	 */
979 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
980 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
981 		iov[i].iov_len = 512;
982 	}
983 
984 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
985 	g_io_done = false;
986 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
987 					   BDEV_IO_NUM_CHILD_IOV);
988 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
989 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
990 	}
991 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
992 
993 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
994 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
995 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
996 		ut_expected_io_set_iov(expected_io, i,
997 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
998 	}
999 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1000 
1001 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1002 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1003 	CU_ASSERT(rc == 0);
1004 	CU_ASSERT(g_io_done == false);
1005 
1006 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1007 	stub_complete_io(1);
1008 	CU_ASSERT(g_io_done == false);
1009 
1010 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1011 	stub_complete_io(1);
1012 	CU_ASSERT(g_io_done == true);
1013 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1014 
1015 	/* Test multi vector command that needs to be split by strip and then needs to be
1016 	 * split further due to the capacity of child iovs, but fails to split. The cause
1017 	 * of failure of split is that the length of an iovec is not multiple of block size.
1018 	 */
1019 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1020 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
1021 		iov[i].iov_len = 512;
1022 	}
1023 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1024 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1025 
1026 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1027 	g_io_done = false;
1028 	g_io_status = 0;
1029 
1030 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1031 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1032 	CU_ASSERT(rc == 0);
1033 	CU_ASSERT(g_io_done == true);
1034 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1035 
1036 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1037 	 * split, so test that.
1038 	 */
1039 	bdev->optimal_io_boundary = 15;
1040 	g_io_done = false;
1041 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1042 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1043 
1044 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1045 	CU_ASSERT(rc == 0);
1046 	CU_ASSERT(g_io_done == false);
1047 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1048 	stub_complete_io(1);
1049 	CU_ASSERT(g_io_done == true);
1050 
1051 	/* Test an UNMAP.  This should also not be split. */
1052 	bdev->optimal_io_boundary = 16;
1053 	g_io_done = false;
1054 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1055 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1056 
1057 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1058 	CU_ASSERT(rc == 0);
1059 	CU_ASSERT(g_io_done == false);
1060 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1061 	stub_complete_io(1);
1062 	CU_ASSERT(g_io_done == true);
1063 
1064 	/* Test a FLUSH.  This should also not be split. */
1065 	bdev->optimal_io_boundary = 16;
1066 	g_io_done = false;
1067 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1068 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1069 
1070 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1071 	CU_ASSERT(rc == 0);
1072 	CU_ASSERT(g_io_done == false);
1073 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1074 	stub_complete_io(1);
1075 	CU_ASSERT(g_io_done == true);
1076 
1077 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1078 
1079 	spdk_put_io_channel(io_ch);
1080 	spdk_bdev_close(desc);
1081 	free_bdev(bdev);
1082 	spdk_bdev_finish(bdev_fini_cb, NULL);
1083 	poll_threads();
1084 }
1085 
1086 static void
1087 bdev_io_split_with_io_wait(void)
1088 {
1089 	struct spdk_bdev *bdev;
1090 	struct spdk_bdev_desc *desc;
1091 	struct spdk_io_channel *io_ch;
1092 	struct spdk_bdev_channel *channel;
1093 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1094 	struct spdk_bdev_opts bdev_opts = {
1095 		.bdev_io_pool_size = 2,
1096 		.bdev_io_cache_size = 1,
1097 	};
1098 	struct iovec iov[3];
1099 	struct ut_expected_io *expected_io;
1100 	int rc;
1101 
1102 	rc = spdk_bdev_set_opts(&bdev_opts);
1103 	CU_ASSERT(rc == 0);
1104 	spdk_bdev_initialize(bdev_init_cb, NULL);
1105 
1106 	bdev = allocate_bdev("bdev0");
1107 
1108 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1109 	CU_ASSERT(rc == 0);
1110 	CU_ASSERT(desc != NULL);
1111 	io_ch = spdk_bdev_get_io_channel(desc);
1112 	CU_ASSERT(io_ch != NULL);
1113 	channel = spdk_io_channel_get_ctx(io_ch);
1114 	mgmt_ch = channel->shared_resource->mgmt_ch;
1115 
1116 	bdev->optimal_io_boundary = 16;
1117 	bdev->split_on_optimal_io_boundary = true;
1118 
1119 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1120 	CU_ASSERT(rc == 0);
1121 
1122 	/* Now test that a single-vector command is split correctly.
1123 	 * Offset 14, length 8, payload 0xF000
1124 	 *  Child - Offset 14, length 2, payload 0xF000
1125 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1126 	 *
1127 	 * Set up the expected values before calling spdk_bdev_read_blocks
1128 	 */
1129 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1130 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1131 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1132 
1133 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1134 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1135 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1136 
1137 	/* The following children will be submitted sequentially due to the capacity of
1138 	 * spdk_bdev_io.
1139 	 */
1140 
1141 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1142 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1143 	CU_ASSERT(rc == 0);
1144 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1145 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1146 
1147 	/* Completing the first read I/O will submit the first child */
1148 	stub_complete_io(1);
1149 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1150 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1151 
1152 	/* Completing the first child will submit the second child */
1153 	stub_complete_io(1);
1154 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1155 
1156 	/* Complete the second child I/O.  This should result in our callback getting
1157 	 * invoked since the parent I/O is now complete.
1158 	 */
1159 	stub_complete_io(1);
1160 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1161 
1162 	/* Now set up a more complex, multi-vector command that needs to be split,
1163 	 *  including splitting iovecs.
1164 	 */
1165 	iov[0].iov_base = (void *)0x10000;
1166 	iov[0].iov_len = 512;
1167 	iov[1].iov_base = (void *)0x20000;
1168 	iov[1].iov_len = 20 * 512;
1169 	iov[2].iov_base = (void *)0x30000;
1170 	iov[2].iov_len = 11 * 512;
1171 
1172 	g_io_done = false;
1173 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1174 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1175 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1176 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1177 
1178 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1179 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1180 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1181 
1182 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1183 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1184 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1185 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1186 
1187 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1188 	CU_ASSERT(rc == 0);
1189 	CU_ASSERT(g_io_done == false);
1190 
1191 	/* The following children will be submitted sequentially due to the capacity of
1192 	 * spdk_bdev_io.
1193 	 */
1194 
1195 	/* Completing the first child will submit the second child */
1196 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1197 	stub_complete_io(1);
1198 	CU_ASSERT(g_io_done == false);
1199 
1200 	/* Completing the second child will submit the third child */
1201 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1202 	stub_complete_io(1);
1203 	CU_ASSERT(g_io_done == false);
1204 
1205 	/* Completing the third child will result in our callback getting invoked
1206 	 * since the parent I/O is now complete.
1207 	 */
1208 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1209 	stub_complete_io(1);
1210 	CU_ASSERT(g_io_done == true);
1211 
1212 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1213 
1214 	spdk_put_io_channel(io_ch);
1215 	spdk_bdev_close(desc);
1216 	free_bdev(bdev);
1217 	spdk_bdev_finish(bdev_fini_cb, NULL);
1218 	poll_threads();
1219 }
1220 
1221 static void
1222 bdev_io_alignment(void)
1223 {
1224 	struct spdk_bdev *bdev;
1225 	struct spdk_bdev_desc *desc;
1226 	struct spdk_io_channel *io_ch;
1227 	struct spdk_bdev_opts bdev_opts = {
1228 		.bdev_io_pool_size = 20,
1229 		.bdev_io_cache_size = 2,
1230 	};
1231 	int rc;
1232 	void *buf;
1233 	struct iovec iovs[2];
1234 	int iovcnt;
1235 	uint64_t alignment;
1236 
1237 	rc = spdk_bdev_set_opts(&bdev_opts);
1238 	CU_ASSERT(rc == 0);
1239 	spdk_bdev_initialize(bdev_init_cb, NULL);
1240 
1241 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1242 	bdev = allocate_bdev("bdev0");
1243 
1244 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1245 	CU_ASSERT(rc == 0);
1246 	CU_ASSERT(desc != NULL);
1247 	io_ch = spdk_bdev_get_io_channel(desc);
1248 	CU_ASSERT(io_ch != NULL);
1249 
1250 	/* Create aligned buffer */
1251 	rc = posix_memalign(&buf, 4096, 8192);
1252 	SPDK_CU_ASSERT_FATAL(rc == 0);
1253 
1254 	/* Pass aligned single buffer with no alignment required */
1255 	alignment = 1;
1256 	bdev->required_alignment = spdk_u32log2(alignment);
1257 
1258 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1259 	CU_ASSERT(rc == 0);
1260 	stub_complete_io(1);
1261 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1262 				    alignment));
1263 
1264 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1265 	CU_ASSERT(rc == 0);
1266 	stub_complete_io(1);
1267 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1268 				    alignment));
1269 
1270 	/* Pass unaligned single buffer with no alignment required */
1271 	alignment = 1;
1272 	bdev->required_alignment = spdk_u32log2(alignment);
1273 
1274 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1275 	CU_ASSERT(rc == 0);
1276 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1277 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1278 	stub_complete_io(1);
1279 
1280 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1281 	CU_ASSERT(rc == 0);
1282 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1283 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1284 	stub_complete_io(1);
1285 
1286 	/* Pass unaligned single buffer with 512 alignment required */
1287 	alignment = 512;
1288 	bdev->required_alignment = spdk_u32log2(alignment);
1289 
1290 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1291 	CU_ASSERT(rc == 0);
1292 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1293 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1294 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1295 				    alignment));
1296 	stub_complete_io(1);
1297 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1298 
1299 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1300 	CU_ASSERT(rc == 0);
1301 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1302 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1303 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1304 				    alignment));
1305 	stub_complete_io(1);
1306 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1307 
1308 	/* Pass unaligned single buffer with 4096 alignment required */
1309 	alignment = 4096;
1310 	bdev->required_alignment = spdk_u32log2(alignment);
1311 
1312 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1313 	CU_ASSERT(rc == 0);
1314 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1315 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1316 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1317 				    alignment));
1318 	stub_complete_io(1);
1319 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1320 
1321 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1322 	CU_ASSERT(rc == 0);
1323 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1324 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1325 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1326 				    alignment));
1327 	stub_complete_io(1);
1328 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1329 
1330 	/* Pass aligned iovs with no alignment required */
1331 	alignment = 1;
1332 	bdev->required_alignment = spdk_u32log2(alignment);
1333 
1334 	iovcnt = 1;
1335 	iovs[0].iov_base = buf;
1336 	iovs[0].iov_len = 512;
1337 
1338 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1339 	CU_ASSERT(rc == 0);
1340 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1341 	stub_complete_io(1);
1342 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1343 
1344 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1345 	CU_ASSERT(rc == 0);
1346 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1347 	stub_complete_io(1);
1348 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1349 
1350 	/* Pass unaligned iovs with no alignment required */
1351 	alignment = 1;
1352 	bdev->required_alignment = spdk_u32log2(alignment);
1353 
1354 	iovcnt = 2;
1355 	iovs[0].iov_base = buf + 16;
1356 	iovs[0].iov_len = 256;
1357 	iovs[1].iov_base = buf + 16 + 256 + 32;
1358 	iovs[1].iov_len = 256;
1359 
1360 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1361 	CU_ASSERT(rc == 0);
1362 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1363 	stub_complete_io(1);
1364 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1365 
1366 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1367 	CU_ASSERT(rc == 0);
1368 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1369 	stub_complete_io(1);
1370 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1371 
1372 	/* Pass unaligned iov with 2048 alignment required */
1373 	alignment = 2048;
1374 	bdev->required_alignment = spdk_u32log2(alignment);
1375 
1376 	iovcnt = 2;
1377 	iovs[0].iov_base = buf + 16;
1378 	iovs[0].iov_len = 256;
1379 	iovs[1].iov_base = buf + 16 + 256 + 32;
1380 	iovs[1].iov_len = 256;
1381 
1382 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1383 	CU_ASSERT(rc == 0);
1384 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1385 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1386 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1387 				    alignment));
1388 	stub_complete_io(1);
1389 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1390 
1391 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1392 	CU_ASSERT(rc == 0);
1393 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1394 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1395 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1396 				    alignment));
1397 	stub_complete_io(1);
1398 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1399 
1400 	/* Pass iov without allocated buffer without alignment required */
1401 	alignment = 1;
1402 	bdev->required_alignment = spdk_u32log2(alignment);
1403 
1404 	iovcnt = 1;
1405 	iovs[0].iov_base = NULL;
1406 	iovs[0].iov_len = 0;
1407 
1408 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1409 	CU_ASSERT(rc == 0);
1410 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1411 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1412 				    alignment));
1413 	stub_complete_io(1);
1414 
1415 	/* Pass iov without allocated buffer with 1024 alignment required */
1416 	alignment = 1024;
1417 	bdev->required_alignment = spdk_u32log2(alignment);
1418 
1419 	iovcnt = 1;
1420 	iovs[0].iov_base = NULL;
1421 	iovs[0].iov_len = 0;
1422 
1423 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1424 	CU_ASSERT(rc == 0);
1425 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1426 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1427 				    alignment));
1428 	stub_complete_io(1);
1429 
1430 	spdk_put_io_channel(io_ch);
1431 	spdk_bdev_close(desc);
1432 	free_bdev(bdev);
1433 	spdk_bdev_finish(bdev_fini_cb, NULL);
1434 	poll_threads();
1435 
1436 	free(buf);
1437 }
1438 
1439 static void
1440 histogram_status_cb(void *cb_arg, int status)
1441 {
1442 	g_status = status;
1443 }
1444 
1445 static void
1446 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1447 {
1448 	g_status = status;
1449 	g_histogram = histogram;
1450 }
1451 
1452 static void
1453 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1454 		   uint64_t total, uint64_t so_far)
1455 {
1456 	g_count += count;
1457 }
1458 
1459 static void
1460 bdev_histograms(void)
1461 {
1462 	struct spdk_bdev *bdev;
1463 	struct spdk_bdev_desc *desc;
1464 	struct spdk_io_channel *ch;
1465 	struct spdk_histogram_data *histogram;
1466 	uint8_t buf[4096];
1467 	int rc;
1468 
1469 	spdk_bdev_initialize(bdev_init_cb, NULL);
1470 
1471 	bdev = allocate_bdev("bdev");
1472 
1473 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1474 	CU_ASSERT(rc == 0);
1475 	CU_ASSERT(desc != NULL);
1476 
1477 	ch = spdk_bdev_get_io_channel(desc);
1478 	CU_ASSERT(ch != NULL);
1479 
1480 	/* Enable histogram */
1481 	g_status = -1;
1482 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
1483 	poll_threads();
1484 	CU_ASSERT(g_status == 0);
1485 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1486 
1487 	/* Allocate histogram */
1488 	histogram = spdk_histogram_data_alloc();
1489 	SPDK_CU_ASSERT_FATAL(histogram != NULL);
1490 
1491 	/* Check if histogram is zeroed */
1492 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1493 	poll_threads();
1494 	CU_ASSERT(g_status == 0);
1495 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1496 
1497 	g_count = 0;
1498 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1499 
1500 	CU_ASSERT(g_count == 0);
1501 
1502 	rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1503 	CU_ASSERT(rc == 0);
1504 
1505 	spdk_delay_us(10);
1506 	stub_complete_io(1);
1507 	poll_threads();
1508 
1509 	rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL);
1510 	CU_ASSERT(rc == 0);
1511 
1512 	spdk_delay_us(10);
1513 	stub_complete_io(1);
1514 	poll_threads();
1515 
1516 	/* Check if histogram gathered data from all I/O channels */
1517 	g_histogram = NULL;
1518 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1519 	poll_threads();
1520 	CU_ASSERT(g_status == 0);
1521 	CU_ASSERT(bdev->internal.histogram_enabled == true);
1522 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1523 
1524 	g_count = 0;
1525 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1526 	CU_ASSERT(g_count == 2);
1527 
1528 	/* Disable histogram */
1529 	spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
1530 	poll_threads();
1531 	CU_ASSERT(g_status == 0);
1532 	CU_ASSERT(bdev->internal.histogram_enabled == false);
1533 
1534 	/* Try to run histogram commands on disabled bdev */
1535 	spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
1536 	poll_threads();
1537 	CU_ASSERT(g_status == -EFAULT);
1538 
1539 	spdk_histogram_data_free(g_histogram);
1540 	spdk_put_io_channel(ch);
1541 	spdk_bdev_close(desc);
1542 	free_bdev(bdev);
1543 	spdk_bdev_finish(bdev_fini_cb, NULL);
1544 	poll_threads();
1545 }
1546 
1547 int
1548 main(int argc, char **argv)
1549 {
1550 	CU_pSuite		suite = NULL;
1551 	unsigned int		num_failures;
1552 
1553 	if (CU_initialize_registry() != CUE_SUCCESS) {
1554 		return CU_get_error();
1555 	}
1556 
1557 	suite = CU_add_suite("bdev", null_init, null_clean);
1558 	if (suite == NULL) {
1559 		CU_cleanup_registry();
1560 		return CU_get_error();
1561 	}
1562 
1563 	if (
1564 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1565 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1566 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1567 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1568 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1569 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1570 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1571 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1572 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1573 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1574 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL ||
1575 		CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL
1576 	) {
1577 		CU_cleanup_registry();
1578 		return CU_get_error();
1579 	}
1580 
1581 	allocate_threads(1);
1582 	set_thread(0);
1583 
1584 	CU_basic_set_mode(CU_BRM_VERBOSE);
1585 	CU_basic_run_tests();
1586 	num_failures = CU_get_number_of_failures();
1587 	CU_cleanup_registry();
1588 
1589 	free_threads();
1590 
1591 	return num_failures;
1592 }
1593