xref: /spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c (revision 3c9815082103a34b36844805b678eb31249a8616)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 		const char *name), NULL);
47 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50 
51 struct spdk_trace_histories *g_trace_histories;
52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
56 		uint16_t tpoint_id, uint8_t owner_type,
57 		uint8_t object_type, uint8_t new_object,
58 		uint8_t arg1_is_ptr, const char *arg1_name));
59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 				   uint32_t size, uint64_t object_id, uint64_t arg1));
61 
62 static void
63 _bdev_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
64 {
65 	fn(ctx);
66 }
67 
68 void
69 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
70 			 int *sc, int *sk, int *asc, int *ascq)
71 {
72 }
73 
74 static int
75 null_init(void)
76 {
77 	return 0;
78 }
79 
80 static int
81 null_clean(void)
82 {
83 	return 0;
84 }
85 
86 static int
87 stub_destruct(void *ctx)
88 {
89 	return 0;
90 }
91 
92 struct ut_expected_io {
93 	uint8_t				type;
94 	uint64_t			offset;
95 	uint64_t			length;
96 	int				iovcnt;
97 	struct iovec			iov[BDEV_IO_NUM_CHILD_IOV];
98 	TAILQ_ENTRY(ut_expected_io)	link;
99 };
100 
101 struct bdev_ut_channel {
102 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
103 	uint32_t			outstanding_io_count;
104 	TAILQ_HEAD(, ut_expected_io)	expected_io;
105 };
106 
107 static bool g_io_done;
108 static struct spdk_bdev_io *g_bdev_io;
109 static enum spdk_bdev_io_status g_io_status;
110 static uint32_t g_bdev_ut_io_device;
111 static struct bdev_ut_channel *g_bdev_ut_channel;
112 
113 static struct ut_expected_io *
114 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
115 {
116 	struct ut_expected_io *expected_io;
117 
118 	expected_io = calloc(1, sizeof(*expected_io));
119 	SPDK_CU_ASSERT_FATAL(expected_io != NULL);
120 
121 	expected_io->type = type;
122 	expected_io->offset = offset;
123 	expected_io->length = length;
124 	expected_io->iovcnt = iovcnt;
125 
126 	return expected_io;
127 }
128 
129 static void
130 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
131 {
132 	expected_io->iov[pos].iov_base = base;
133 	expected_io->iov[pos].iov_len = len;
134 }
135 
136 static void
137 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
138 {
139 	struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
140 	struct ut_expected_io *expected_io;
141 	struct iovec *iov, *expected_iov;
142 	int i;
143 
144 	g_bdev_io = bdev_io;
145 
146 	TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
147 	ch->outstanding_io_count++;
148 
149 	expected_io = TAILQ_FIRST(&ch->expected_io);
150 	if (expected_io == NULL) {
151 		return;
152 	}
153 	TAILQ_REMOVE(&ch->expected_io, expected_io, link);
154 
155 	if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
156 		CU_ASSERT(bdev_io->type == expected_io->type);
157 	}
158 
159 	if (expected_io->length == 0) {
160 		free(expected_io);
161 		return;
162 	}
163 
164 	CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
165 	CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
166 
167 	if (expected_io->iovcnt == 0) {
168 		free(expected_io);
169 		/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
170 		return;
171 	}
172 
173 	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
174 	for (i = 0; i < expected_io->iovcnt; i++) {
175 		iov = &bdev_io->u.bdev.iovs[i];
176 		expected_iov = &expected_io->iov[i];
177 		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
178 		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
179 	}
180 
181 	free(expected_io);
182 }
183 
184 static void
185 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
186 {
187 	spdk_bdev_io_get_buf(bdev_io, stub_submit_request,
188 			     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
189 }
190 
191 static uint32_t
192 stub_complete_io(uint32_t num_to_complete)
193 {
194 	struct bdev_ut_channel *ch = g_bdev_ut_channel;
195 	struct spdk_bdev_io *bdev_io;
196 	uint32_t num_completed = 0;
197 
198 	while (num_completed < num_to_complete) {
199 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
200 			break;
201 		}
202 		bdev_io = TAILQ_FIRST(&ch->outstanding_io);
203 		TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
204 		ch->outstanding_io_count--;
205 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
206 		num_completed++;
207 	}
208 
209 	return num_completed;
210 }
211 
212 static struct spdk_io_channel *
213 bdev_ut_get_io_channel(void *ctx)
214 {
215 	return spdk_get_io_channel(&g_bdev_ut_io_device);
216 }
217 
218 static bool
219 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
220 {
221 	return true;
222 }
223 
224 static struct spdk_bdev_fn_table fn_table = {
225 	.destruct = stub_destruct,
226 	.submit_request = stub_submit_request,
227 	.get_io_channel = bdev_ut_get_io_channel,
228 	.io_type_supported = stub_io_type_supported,
229 };
230 
231 static int
232 bdev_ut_create_ch(void *io_device, void *ctx_buf)
233 {
234 	struct bdev_ut_channel *ch = ctx_buf;
235 
236 	CU_ASSERT(g_bdev_ut_channel == NULL);
237 	g_bdev_ut_channel = ch;
238 
239 	TAILQ_INIT(&ch->outstanding_io);
240 	ch->outstanding_io_count = 0;
241 	TAILQ_INIT(&ch->expected_io);
242 	return 0;
243 }
244 
245 static void
246 bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
247 {
248 	CU_ASSERT(g_bdev_ut_channel != NULL);
249 	g_bdev_ut_channel = NULL;
250 }
251 
252 static int
253 bdev_ut_module_init(void)
254 {
255 	spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
256 				sizeof(struct bdev_ut_channel), NULL);
257 	return 0;
258 }
259 
260 static void
261 bdev_ut_module_fini(void)
262 {
263 	spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
264 }
265 
266 struct spdk_bdev_module bdev_ut_if = {
267 	.name = "bdev_ut",
268 	.module_init = bdev_ut_module_init,
269 	.module_fini = bdev_ut_module_fini,
270 };
271 
272 static void vbdev_ut_examine(struct spdk_bdev *bdev);
273 
274 static int
275 vbdev_ut_module_init(void)
276 {
277 	return 0;
278 }
279 
280 static void
281 vbdev_ut_module_fini(void)
282 {
283 }
284 
285 struct spdk_bdev_module vbdev_ut_if = {
286 	.name = "vbdev_ut",
287 	.module_init = vbdev_ut_module_init,
288 	.module_fini = vbdev_ut_module_fini,
289 	.examine_config = vbdev_ut_examine,
290 };
291 
292 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
293 SPDK_BDEV_MODULE_REGISTER(&vbdev_ut_if)
294 
295 static void
296 vbdev_ut_examine(struct spdk_bdev *bdev)
297 {
298 	spdk_bdev_module_examine_done(&vbdev_ut_if);
299 }
300 
301 static struct spdk_bdev *
302 allocate_bdev(char *name)
303 {
304 	struct spdk_bdev *bdev;
305 	int rc;
306 
307 	bdev = calloc(1, sizeof(*bdev));
308 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
309 
310 	bdev->name = name;
311 	bdev->fn_table = &fn_table;
312 	bdev->module = &bdev_ut_if;
313 	bdev->blockcnt = 1024;
314 	bdev->blocklen = 512;
315 
316 	rc = spdk_bdev_register(bdev);
317 	CU_ASSERT(rc == 0);
318 
319 	return bdev;
320 }
321 
322 static struct spdk_bdev *
323 allocate_vbdev(char *name, struct spdk_bdev *base1, struct spdk_bdev *base2)
324 {
325 	struct spdk_bdev *bdev;
326 	struct spdk_bdev *array[2];
327 	int rc;
328 
329 	bdev = calloc(1, sizeof(*bdev));
330 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
331 
332 	bdev->name = name;
333 	bdev->fn_table = &fn_table;
334 	bdev->module = &vbdev_ut_if;
335 
336 	/* vbdev must have at least one base bdev */
337 	CU_ASSERT(base1 != NULL);
338 
339 	array[0] = base1;
340 	array[1] = base2;
341 
342 	rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2);
343 	CU_ASSERT(rc == 0);
344 
345 	return bdev;
346 }
347 
348 static void
349 free_bdev(struct spdk_bdev *bdev)
350 {
351 	spdk_bdev_unregister(bdev, NULL, NULL);
352 	memset(bdev, 0xFF, sizeof(*bdev));
353 	free(bdev);
354 }
355 
356 static void
357 free_vbdev(struct spdk_bdev *bdev)
358 {
359 	spdk_bdev_unregister(bdev, NULL, NULL);
360 	memset(bdev, 0xFF, sizeof(*bdev));
361 	free(bdev);
362 }
363 
364 static void
365 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
366 {
367 	const char *bdev_name;
368 
369 	CU_ASSERT(bdev != NULL);
370 	CU_ASSERT(rc == 0);
371 	bdev_name = spdk_bdev_get_name(bdev);
372 	CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
373 
374 	free(stat);
375 	free_bdev(bdev);
376 }
377 
378 static void
379 get_device_stat_test(void)
380 {
381 	struct spdk_bdev *bdev;
382 	struct spdk_bdev_io_stat *stat;
383 
384 	bdev = allocate_bdev("bdev0");
385 	stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
386 	if (stat == NULL) {
387 		free_bdev(bdev);
388 		return;
389 	}
390 	spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, NULL);
391 }
392 
393 static void
394 open_write_test(void)
395 {
396 	struct spdk_bdev *bdev[9];
397 	struct spdk_bdev_desc *desc[9] = {};
398 	int rc;
399 
400 	/*
401 	 * Create a tree of bdevs to test various open w/ write cases.
402 	 *
403 	 * bdev0 through bdev3 are physical block devices, such as NVMe
404 	 * namespaces or Ceph block devices.
405 	 *
406 	 * bdev4 is a virtual bdev with multiple base bdevs.  This models
407 	 * caching or RAID use cases.
408 	 *
409 	 * bdev5 through bdev7 are all virtual bdevs with the same base
410 	 * bdev (except bdev7). This models partitioning or logical volume
411 	 * use cases.
412 	 *
413 	 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
414 	 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
415 	 * models caching, RAID, partitioning or logical volumes use cases.
416 	 *
417 	 * bdev8 is a virtual bdev with multiple base bdevs, but these
418 	 * base bdevs are themselves virtual bdevs.
419 	 *
420 	 *                bdev8
421 	 *                  |
422 	 *            +----------+
423 	 *            |          |
424 	 *          bdev4      bdev5   bdev6   bdev7
425 	 *            |          |       |       |
426 	 *        +---+---+      +---+   +   +---+---+
427 	 *        |       |           \  |  /         \
428 	 *      bdev0   bdev1          bdev2         bdev3
429 	 */
430 
431 	bdev[0] = allocate_bdev("bdev0");
432 	rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
433 	CU_ASSERT(rc == 0);
434 
435 	bdev[1] = allocate_bdev("bdev1");
436 	rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
437 	CU_ASSERT(rc == 0);
438 
439 	bdev[2] = allocate_bdev("bdev2");
440 	rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
441 	CU_ASSERT(rc == 0);
442 
443 	bdev[3] = allocate_bdev("bdev3");
444 	rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
445 	CU_ASSERT(rc == 0);
446 
447 	bdev[4] = allocate_vbdev("bdev4", bdev[0], bdev[1]);
448 	rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
449 	CU_ASSERT(rc == 0);
450 
451 	bdev[5] = allocate_vbdev("bdev5", bdev[2], NULL);
452 	rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
453 	CU_ASSERT(rc == 0);
454 
455 	bdev[6] = allocate_vbdev("bdev6", bdev[2], NULL);
456 
457 	bdev[7] = allocate_vbdev("bdev7", bdev[2], bdev[3]);
458 
459 	bdev[8] = allocate_vbdev("bdev8", bdev[4], bdev[5]);
460 
461 	/* Open bdev0 read-only.  This should succeed. */
462 	rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
463 	CU_ASSERT(rc == 0);
464 	SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
465 	spdk_bdev_close(desc[0]);
466 
467 	/*
468 	 * Open bdev1 read/write.  This should fail since bdev1 has been claimed
469 	 * by a vbdev module.
470 	 */
471 	rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
472 	CU_ASSERT(rc == -EPERM);
473 
474 	/*
475 	 * Open bdev4 read/write.  This should fail since bdev3 has been claimed
476 	 * by a vbdev module.
477 	 */
478 	rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
479 	CU_ASSERT(rc == -EPERM);
480 
481 	/* Open bdev4 read-only.  This should succeed. */
482 	rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
483 	CU_ASSERT(rc == 0);
484 	SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
485 	spdk_bdev_close(desc[4]);
486 
487 	/*
488 	 * Open bdev8 read/write.  This should succeed since it is a leaf
489 	 * bdev.
490 	 */
491 	rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
492 	CU_ASSERT(rc == 0);
493 	SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
494 	spdk_bdev_close(desc[8]);
495 
496 	/*
497 	 * Open bdev5 read/write.  This should fail since bdev4 has been claimed
498 	 * by a vbdev module.
499 	 */
500 	rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
501 	CU_ASSERT(rc == -EPERM);
502 
503 	/* Open bdev4 read-only.  This should succeed. */
504 	rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
505 	CU_ASSERT(rc == 0);
506 	SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
507 	spdk_bdev_close(desc[5]);
508 
509 	free_vbdev(bdev[8]);
510 
511 	free_vbdev(bdev[5]);
512 	free_vbdev(bdev[6]);
513 	free_vbdev(bdev[7]);
514 
515 	free_vbdev(bdev[4]);
516 
517 	free_bdev(bdev[0]);
518 	free_bdev(bdev[1]);
519 	free_bdev(bdev[2]);
520 	free_bdev(bdev[3]);
521 }
522 
523 static void
524 bytes_to_blocks_test(void)
525 {
526 	struct spdk_bdev bdev;
527 	uint64_t offset_blocks, num_blocks;
528 
529 	memset(&bdev, 0, sizeof(bdev));
530 
531 	bdev.blocklen = 512;
532 
533 	/* All parameters valid */
534 	offset_blocks = 0;
535 	num_blocks = 0;
536 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
537 	CU_ASSERT(offset_blocks == 1);
538 	CU_ASSERT(num_blocks == 2);
539 
540 	/* Offset not a block multiple */
541 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
542 
543 	/* Length not a block multiple */
544 	CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
545 }
546 
547 static void
548 num_blocks_test(void)
549 {
550 	struct spdk_bdev bdev;
551 	struct spdk_bdev_desc *desc = NULL;
552 	int rc;
553 
554 	memset(&bdev, 0, sizeof(bdev));
555 	bdev.name = "num_blocks";
556 	bdev.fn_table = &fn_table;
557 	bdev.module = &bdev_ut_if;
558 	spdk_bdev_register(&bdev);
559 	spdk_bdev_notify_blockcnt_change(&bdev, 50);
560 
561 	/* Growing block number */
562 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
563 	/* Shrinking block number */
564 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
565 
566 	/* In case bdev opened */
567 	rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
568 	CU_ASSERT(rc == 0);
569 	SPDK_CU_ASSERT_FATAL(desc != NULL);
570 
571 	/* Growing block number */
572 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
573 	/* Shrinking block number */
574 	CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
575 
576 	spdk_bdev_close(desc);
577 	spdk_bdev_unregister(&bdev, NULL, NULL);
578 }
579 
580 static void
581 io_valid_test(void)
582 {
583 	struct spdk_bdev bdev;
584 
585 	memset(&bdev, 0, sizeof(bdev));
586 
587 	bdev.blocklen = 512;
588 	spdk_bdev_notify_blockcnt_change(&bdev, 100);
589 
590 	/* All parameters valid */
591 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
592 
593 	/* Last valid block */
594 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
595 
596 	/* Offset past end of bdev */
597 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
598 
599 	/* Offset + length past end of bdev */
600 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
601 
602 	/* Offset near end of uint64_t range (2^64 - 1) */
603 	CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
604 }
605 
606 static void
607 alias_add_del_test(void)
608 {
609 	struct spdk_bdev *bdev[3];
610 	int rc;
611 
612 	/* Creating and registering bdevs */
613 	bdev[0] = allocate_bdev("bdev0");
614 	SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
615 
616 	bdev[1] = allocate_bdev("bdev1");
617 	SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
618 
619 	bdev[2] = allocate_bdev("bdev2");
620 	SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
621 
622 	/*
623 	 * Trying adding an alias identical to name.
624 	 * Alias is identical to name, so it can not be added to aliases list
625 	 */
626 	rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
627 	CU_ASSERT(rc == -EEXIST);
628 
629 	/*
630 	 * Trying to add empty alias,
631 	 * this one should fail
632 	 */
633 	rc = spdk_bdev_alias_add(bdev[0], NULL);
634 	CU_ASSERT(rc == -EINVAL);
635 
636 	/* Trying adding same alias to two different registered bdevs */
637 
638 	/* Alias is used first time, so this one should pass */
639 	rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
640 	CU_ASSERT(rc == 0);
641 
642 	/* Alias was added to another bdev, so this one should fail */
643 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
644 	CU_ASSERT(rc == -EEXIST);
645 
646 	/* Alias is used first time, so this one should pass */
647 	rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
648 	CU_ASSERT(rc == 0);
649 
650 	/* Trying removing an alias from registered bdevs */
651 
652 	/* Alias is not on a bdev aliases list, so this one should fail */
653 	rc = spdk_bdev_alias_del(bdev[0], "not existing");
654 	CU_ASSERT(rc == -ENOENT);
655 
656 	/* Alias is present on a bdev aliases list, so this one should pass */
657 	rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
658 	CU_ASSERT(rc == 0);
659 
660 	/* Alias is present on a bdev aliases list, so this one should pass */
661 	rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
662 	CU_ASSERT(rc == 0);
663 
664 	/* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
665 	rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
666 	CU_ASSERT(rc != 0);
667 
668 	/* Trying to del all alias from empty alias list */
669 	spdk_bdev_alias_del_all(bdev[2]);
670 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
671 
672 	/* Trying to del all alias from non-empty alias list */
673 	rc = spdk_bdev_alias_add(bdev[2], "alias0");
674 	CU_ASSERT(rc == 0);
675 	rc = spdk_bdev_alias_add(bdev[2], "alias1");
676 	CU_ASSERT(rc == 0);
677 	spdk_bdev_alias_del_all(bdev[2]);
678 	CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
679 
680 	/* Unregister and free bdevs */
681 	spdk_bdev_unregister(bdev[0], NULL, NULL);
682 	spdk_bdev_unregister(bdev[1], NULL, NULL);
683 	spdk_bdev_unregister(bdev[2], NULL, NULL);
684 
685 	free(bdev[0]);
686 	free(bdev[1]);
687 	free(bdev[2]);
688 }
689 
690 static void
691 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
692 {
693 	g_io_done = true;
694 	g_io_status = bdev_io->internal.status;
695 	spdk_bdev_free_io(bdev_io);
696 }
697 
698 static void
699 bdev_init_cb(void *arg, int rc)
700 {
701 	CU_ASSERT(rc == 0);
702 }
703 
704 static void
705 bdev_fini_cb(void *arg)
706 {
707 }
708 
709 struct bdev_ut_io_wait_entry {
710 	struct spdk_bdev_io_wait_entry	entry;
711 	struct spdk_io_channel		*io_ch;
712 	struct spdk_bdev_desc		*desc;
713 	bool				submitted;
714 };
715 
716 static void
717 io_wait_cb(void *arg)
718 {
719 	struct bdev_ut_io_wait_entry *entry = arg;
720 	int rc;
721 
722 	rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
723 	CU_ASSERT(rc == 0);
724 	entry->submitted = true;
725 }
726 
727 static void
728 bdev_io_wait_test(void)
729 {
730 	struct spdk_bdev *bdev;
731 	struct spdk_bdev_desc *desc = NULL;
732 	struct spdk_io_channel *io_ch;
733 	struct spdk_bdev_opts bdev_opts = {
734 		.bdev_io_pool_size = 4,
735 		.bdev_io_cache_size = 2,
736 	};
737 	struct bdev_ut_io_wait_entry io_wait_entry;
738 	struct bdev_ut_io_wait_entry io_wait_entry2;
739 	int rc;
740 
741 	rc = spdk_bdev_set_opts(&bdev_opts);
742 	CU_ASSERT(rc == 0);
743 	spdk_bdev_initialize(bdev_init_cb, NULL);
744 
745 	bdev = allocate_bdev("bdev0");
746 
747 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
748 	CU_ASSERT(rc == 0);
749 	SPDK_CU_ASSERT_FATAL(desc != NULL);
750 	io_ch = spdk_bdev_get_io_channel(desc);
751 	CU_ASSERT(io_ch != NULL);
752 
753 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
754 	CU_ASSERT(rc == 0);
755 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
756 	CU_ASSERT(rc == 0);
757 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
758 	CU_ASSERT(rc == 0);
759 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
760 	CU_ASSERT(rc == 0);
761 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
762 
763 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
764 	CU_ASSERT(rc == -ENOMEM);
765 
766 	io_wait_entry.entry.bdev = bdev;
767 	io_wait_entry.entry.cb_fn = io_wait_cb;
768 	io_wait_entry.entry.cb_arg = &io_wait_entry;
769 	io_wait_entry.io_ch = io_ch;
770 	io_wait_entry.desc = desc;
771 	io_wait_entry.submitted = false;
772 	/* Cannot use the same io_wait_entry for two different calls. */
773 	memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
774 	io_wait_entry2.entry.cb_arg = &io_wait_entry2;
775 
776 	/* Queue two I/O waits. */
777 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
778 	CU_ASSERT(rc == 0);
779 	CU_ASSERT(io_wait_entry.submitted == false);
780 	rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
781 	CU_ASSERT(rc == 0);
782 	CU_ASSERT(io_wait_entry2.submitted == false);
783 
784 	stub_complete_io(1);
785 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
786 	CU_ASSERT(io_wait_entry.submitted == true);
787 	CU_ASSERT(io_wait_entry2.submitted == false);
788 
789 	stub_complete_io(1);
790 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
791 	CU_ASSERT(io_wait_entry2.submitted == true);
792 
793 	stub_complete_io(4);
794 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
795 
796 	spdk_put_io_channel(io_ch);
797 	spdk_bdev_close(desc);
798 	free_bdev(bdev);
799 	spdk_bdev_finish(bdev_fini_cb, NULL);
800 }
801 
802 static void
803 bdev_io_spans_boundary_test(void)
804 {
805 	struct spdk_bdev bdev;
806 	struct spdk_bdev_io bdev_io;
807 
808 	memset(&bdev, 0, sizeof(bdev));
809 
810 	bdev.optimal_io_boundary = 0;
811 	bdev_io.bdev = &bdev;
812 
813 	/* bdev has no optimal_io_boundary set - so this should return false. */
814 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
815 
816 	bdev.optimal_io_boundary = 32;
817 	bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
818 
819 	/* RESETs are not based on LBAs - so this should return false. */
820 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
821 
822 	bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
823 	bdev_io.u.bdev.offset_blocks = 0;
824 	bdev_io.u.bdev.num_blocks = 32;
825 
826 	/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
827 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
828 
829 	bdev_io.u.bdev.num_blocks = 33;
830 
831 	/* This I/O spans a boundary. */
832 	CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
833 }
834 
835 static void
836 bdev_io_split(void)
837 {
838 	struct spdk_bdev *bdev;
839 	struct spdk_bdev_desc *desc = NULL;
840 	struct spdk_io_channel *io_ch;
841 	struct spdk_bdev_opts bdev_opts = {
842 		.bdev_io_pool_size = 512,
843 		.bdev_io_cache_size = 64,
844 	};
845 	struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
846 	struct ut_expected_io *expected_io;
847 	uint64_t i;
848 	int rc;
849 
850 	rc = spdk_bdev_set_opts(&bdev_opts);
851 	CU_ASSERT(rc == 0);
852 	spdk_bdev_initialize(bdev_init_cb, NULL);
853 
854 	bdev = allocate_bdev("bdev0");
855 
856 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
857 	CU_ASSERT(rc == 0);
858 	SPDK_CU_ASSERT_FATAL(desc != NULL);
859 	io_ch = spdk_bdev_get_io_channel(desc);
860 	CU_ASSERT(io_ch != NULL);
861 
862 	bdev->optimal_io_boundary = 16;
863 	bdev->split_on_optimal_io_boundary = false;
864 
865 	g_io_done = false;
866 
867 	/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
868 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
869 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
870 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
871 
872 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
873 	CU_ASSERT(rc == 0);
874 	CU_ASSERT(g_io_done == false);
875 
876 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
877 	stub_complete_io(1);
878 	CU_ASSERT(g_io_done == true);
879 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
880 
881 	bdev->split_on_optimal_io_boundary = true;
882 
883 	/* Now test that a single-vector command is split correctly.
884 	 * Offset 14, length 8, payload 0xF000
885 	 *  Child - Offset 14, length 2, payload 0xF000
886 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
887 	 *
888 	 * Set up the expected values before calling spdk_bdev_read_blocks
889 	 */
890 	g_io_done = false;
891 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
892 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
893 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
894 
895 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
896 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
897 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
898 
899 	/* spdk_bdev_read_blocks will submit the first child immediately. */
900 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
901 	CU_ASSERT(rc == 0);
902 	CU_ASSERT(g_io_done == false);
903 
904 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
905 	stub_complete_io(2);
906 	CU_ASSERT(g_io_done == true);
907 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
908 
909 	/* Now set up a more complex, multi-vector command that needs to be split,
910 	 *  including splitting iovecs.
911 	 */
912 	iov[0].iov_base = (void *)0x10000;
913 	iov[0].iov_len = 512;
914 	iov[1].iov_base = (void *)0x20000;
915 	iov[1].iov_len = 20 * 512;
916 	iov[2].iov_base = (void *)0x30000;
917 	iov[2].iov_len = 11 * 512;
918 
919 	g_io_done = false;
920 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
921 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
922 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
923 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
924 
925 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
926 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
927 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
928 
929 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
930 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
931 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
932 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
933 
934 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
935 	CU_ASSERT(rc == 0);
936 	CU_ASSERT(g_io_done == false);
937 
938 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
939 	stub_complete_io(3);
940 	CU_ASSERT(g_io_done == true);
941 
942 	/* Test multi vector command that needs to be split by strip and then needs to be
943 	 * split further due to the capacity of child iovs.
944 	 */
945 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
946 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
947 		iov[i].iov_len = 512;
948 	}
949 
950 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
951 	g_io_done = false;
952 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
953 					   BDEV_IO_NUM_CHILD_IOV);
954 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
955 		ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
956 	}
957 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
958 
959 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
960 					   BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
961 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
962 		ut_expected_io_set_iov(expected_io, i,
963 				       (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
964 	}
965 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
966 
967 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
968 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
969 	CU_ASSERT(rc == 0);
970 	CU_ASSERT(g_io_done == false);
971 
972 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
973 	stub_complete_io(1);
974 	CU_ASSERT(g_io_done == false);
975 
976 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
977 	stub_complete_io(1);
978 	CU_ASSERT(g_io_done == true);
979 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
980 
981 	/* Test multi vector command that needs to be split by strip and then needs to be
982 	 * split further due to the capacity of child iovs, but fails to split. The cause
983 	 * of failure of split is that the length of an iovec is not multiple of block size.
984 	 */
985 	for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
986 		iov[i].iov_base = (void *)((i + 1) * 0x10000);
987 		iov[i].iov_len = 512;
988 	}
989 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
990 	iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
991 
992 	bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
993 	g_io_done = false;
994 	g_io_status = 0;
995 
996 	rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
997 				    BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
998 	CU_ASSERT(rc == 0);
999 	CU_ASSERT(g_io_done == true);
1000 	CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1001 
1002 	/* Test a WRITE_ZEROES that would span an I/O boundary.  WRITE_ZEROES should not be
1003 	 * split, so test that.
1004 	 */
1005 	bdev->optimal_io_boundary = 15;
1006 	g_io_done = false;
1007 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1008 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1009 
1010 	rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1011 	CU_ASSERT(rc == 0);
1012 	CU_ASSERT(g_io_done == false);
1013 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1014 	stub_complete_io(1);
1015 	CU_ASSERT(g_io_done == true);
1016 
1017 	/* Test an UNMAP.  This should also not be split. */
1018 	bdev->optimal_io_boundary = 16;
1019 	g_io_done = false;
1020 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1021 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1022 
1023 	rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1024 	CU_ASSERT(rc == 0);
1025 	CU_ASSERT(g_io_done == false);
1026 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1027 	stub_complete_io(1);
1028 	CU_ASSERT(g_io_done == true);
1029 
1030 	/* Test a FLUSH.  This should also not be split. */
1031 	bdev->optimal_io_boundary = 16;
1032 	g_io_done = false;
1033 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1034 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1035 
1036 	rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1037 	CU_ASSERT(rc == 0);
1038 	CU_ASSERT(g_io_done == false);
1039 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1040 	stub_complete_io(1);
1041 	CU_ASSERT(g_io_done == true);
1042 
1043 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1044 
1045 	spdk_put_io_channel(io_ch);
1046 	spdk_bdev_close(desc);
1047 	free_bdev(bdev);
1048 	spdk_bdev_finish(bdev_fini_cb, NULL);
1049 }
1050 
1051 static void
1052 bdev_io_split_with_io_wait(void)
1053 {
1054 	struct spdk_bdev *bdev;
1055 	struct spdk_bdev_desc *desc;
1056 	struct spdk_io_channel *io_ch;
1057 	struct spdk_bdev_channel *channel;
1058 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1059 	struct spdk_bdev_opts bdev_opts = {
1060 		.bdev_io_pool_size = 2,
1061 		.bdev_io_cache_size = 1,
1062 	};
1063 	struct iovec iov[3];
1064 	struct ut_expected_io *expected_io;
1065 	int rc;
1066 
1067 	rc = spdk_bdev_set_opts(&bdev_opts);
1068 	CU_ASSERT(rc == 0);
1069 	spdk_bdev_initialize(bdev_init_cb, NULL);
1070 
1071 	bdev = allocate_bdev("bdev0");
1072 
1073 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1074 	CU_ASSERT(rc == 0);
1075 	CU_ASSERT(desc != NULL);
1076 	io_ch = spdk_bdev_get_io_channel(desc);
1077 	CU_ASSERT(io_ch != NULL);
1078 	channel = spdk_io_channel_get_ctx(io_ch);
1079 	mgmt_ch = channel->shared_resource->mgmt_ch;
1080 
1081 	bdev->optimal_io_boundary = 16;
1082 	bdev->split_on_optimal_io_boundary = true;
1083 
1084 	rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1085 	CU_ASSERT(rc == 0);
1086 
1087 	/* Now test that a single-vector command is split correctly.
1088 	 * Offset 14, length 8, payload 0xF000
1089 	 *  Child - Offset 14, length 2, payload 0xF000
1090 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1091 	 *
1092 	 * Set up the expected values before calling spdk_bdev_read_blocks
1093 	 */
1094 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1095 	ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1096 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1097 
1098 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1099 	ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1100 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1101 
1102 	/* The following children will be submitted sequentially due to the capacity of
1103 	 * spdk_bdev_io.
1104 	 */
1105 
1106 	/* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1107 	rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1108 	CU_ASSERT(rc == 0);
1109 	CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1110 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1111 
1112 	/* Completing the first read I/O will submit the first child */
1113 	stub_complete_io(1);
1114 	CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1115 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1116 
1117 	/* Completing the first child will submit the second child */
1118 	stub_complete_io(1);
1119 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1120 
1121 	/* Complete the second child I/O.  This should result in our callback getting
1122 	 * invoked since the parent I/O is now complete.
1123 	 */
1124 	stub_complete_io(1);
1125 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1126 
1127 	/* Now set up a more complex, multi-vector command that needs to be split,
1128 	 *  including splitting iovecs.
1129 	 */
1130 	iov[0].iov_base = (void *)0x10000;
1131 	iov[0].iov_len = 512;
1132 	iov[1].iov_base = (void *)0x20000;
1133 	iov[1].iov_len = 20 * 512;
1134 	iov[2].iov_base = (void *)0x30000;
1135 	iov[2].iov_len = 11 * 512;
1136 
1137 	g_io_done = false;
1138 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1139 	ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1140 	ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1141 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1142 
1143 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1144 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1145 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1146 
1147 	expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1148 	ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1149 	ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1150 	TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1151 
1152 	rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1153 	CU_ASSERT(rc == 0);
1154 	CU_ASSERT(g_io_done == false);
1155 
1156 	/* The following children will be submitted sequentially due to the capacity of
1157 	 * spdk_bdev_io.
1158 	 */
1159 
1160 	/* Completing the first child will submit the second child */
1161 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1162 	stub_complete_io(1);
1163 	CU_ASSERT(g_io_done == false);
1164 
1165 	/* Completing the second child will submit the third child */
1166 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1167 	stub_complete_io(1);
1168 	CU_ASSERT(g_io_done == false);
1169 
1170 	/* Completing the third child will result in our callback getting invoked
1171 	 * since the parent I/O is now complete.
1172 	 */
1173 	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1174 	stub_complete_io(1);
1175 	CU_ASSERT(g_io_done == true);
1176 
1177 	CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1178 
1179 	spdk_put_io_channel(io_ch);
1180 	spdk_bdev_close(desc);
1181 	free_bdev(bdev);
1182 	spdk_bdev_finish(bdev_fini_cb, NULL);
1183 }
1184 
1185 static void
1186 bdev_io_alignment(void)
1187 {
1188 	struct spdk_bdev *bdev;
1189 	struct spdk_bdev_desc *desc;
1190 	struct spdk_io_channel *io_ch;
1191 	struct spdk_bdev_opts bdev_opts = {
1192 		.bdev_io_pool_size = 20,
1193 		.bdev_io_cache_size = 2,
1194 	};
1195 	int rc;
1196 	void *buf;
1197 	struct iovec iovs[2];
1198 	int iovcnt;
1199 	uint64_t alignment;
1200 
1201 	rc = spdk_bdev_set_opts(&bdev_opts);
1202 	CU_ASSERT(rc == 0);
1203 	spdk_bdev_initialize(bdev_init_cb, NULL);
1204 
1205 	fn_table.submit_request = stub_submit_request_aligned_buffer;
1206 	bdev = allocate_bdev("bdev0");
1207 
1208 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1209 	CU_ASSERT(rc == 0);
1210 	CU_ASSERT(desc != NULL);
1211 	io_ch = spdk_bdev_get_io_channel(desc);
1212 	CU_ASSERT(io_ch != NULL);
1213 
1214 	/* Create aligned buffer */
1215 	rc = posix_memalign(&buf, 4096, 8192);
1216 	SPDK_CU_ASSERT_FATAL(rc == 0);
1217 
1218 	/* Pass aligned single buffer with no alignment required */
1219 	alignment = 1;
1220 	bdev->required_alignment = spdk_u32log2(alignment);
1221 
1222 	rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1223 	CU_ASSERT(rc == 0);
1224 	stub_complete_io(1);
1225 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1226 				    alignment));
1227 
1228 	rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1229 	CU_ASSERT(rc == 0);
1230 	stub_complete_io(1);
1231 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1232 				    alignment));
1233 
1234 	/* Pass unaligned single buffer with no alignment required */
1235 	alignment = 1;
1236 	bdev->required_alignment = spdk_u32log2(alignment);
1237 
1238 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1239 	CU_ASSERT(rc == 0);
1240 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1241 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1242 	stub_complete_io(1);
1243 
1244 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1245 	CU_ASSERT(rc == 0);
1246 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1247 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1248 	stub_complete_io(1);
1249 
1250 	/* Pass unaligned single buffer with 512 alignment required */
1251 	alignment = 512;
1252 	bdev->required_alignment = spdk_u32log2(alignment);
1253 
1254 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1255 	CU_ASSERT(rc == 0);
1256 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1257 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1258 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1259 				    alignment));
1260 	stub_complete_io(1);
1261 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1262 
1263 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1264 	CU_ASSERT(rc == 0);
1265 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1266 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1267 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1268 				    alignment));
1269 	stub_complete_io(1);
1270 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1271 
1272 	/* Pass unaligned single buffer with 4096 alignment required */
1273 	alignment = 4096;
1274 	bdev->required_alignment = spdk_u32log2(alignment);
1275 
1276 	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1277 	CU_ASSERT(rc == 0);
1278 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1279 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1280 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1281 				    alignment));
1282 	stub_complete_io(1);
1283 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1284 
1285 	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1286 	CU_ASSERT(rc == 0);
1287 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1288 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1289 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1290 				    alignment));
1291 	stub_complete_io(1);
1292 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1293 
1294 	/* Pass aligned iovs with no alignment required */
1295 	alignment = 1;
1296 	bdev->required_alignment = spdk_u32log2(alignment);
1297 
1298 	iovcnt = 1;
1299 	iovs[0].iov_base = buf;
1300 	iovs[0].iov_len = 512;
1301 
1302 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1303 	CU_ASSERT(rc == 0);
1304 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1305 	stub_complete_io(1);
1306 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1307 
1308 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1309 	CU_ASSERT(rc == 0);
1310 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1311 	stub_complete_io(1);
1312 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1313 
1314 	/* Pass unaligned iovs with no alignment required */
1315 	alignment = 1;
1316 	bdev->required_alignment = spdk_u32log2(alignment);
1317 
1318 	iovcnt = 2;
1319 	iovs[0].iov_base = buf + 16;
1320 	iovs[0].iov_len = 256;
1321 	iovs[1].iov_base = buf + 16 + 256 + 32;
1322 	iovs[1].iov_len = 256;
1323 
1324 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1325 	CU_ASSERT(rc == 0);
1326 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1327 	stub_complete_io(1);
1328 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1329 
1330 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1331 	CU_ASSERT(rc == 0);
1332 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1333 	stub_complete_io(1);
1334 	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1335 
1336 	/* Pass unaligned iov with 2048 alignment required */
1337 	alignment = 2048;
1338 	bdev->required_alignment = spdk_u32log2(alignment);
1339 
1340 	iovcnt = 2;
1341 	iovs[0].iov_base = buf + 16;
1342 	iovs[0].iov_len = 256;
1343 	iovs[1].iov_base = buf + 16 + 256 + 32;
1344 	iovs[1].iov_len = 256;
1345 
1346 	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1347 	CU_ASSERT(rc == 0);
1348 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1349 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1350 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1351 				    alignment));
1352 	stub_complete_io(1);
1353 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1354 
1355 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1356 	CU_ASSERT(rc == 0);
1357 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1358 	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1359 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1360 				    alignment));
1361 	stub_complete_io(1);
1362 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1363 
1364 	/* Pass iov without allocated buffer without alignment required */
1365 	alignment = 1;
1366 	bdev->required_alignment = spdk_u32log2(alignment);
1367 
1368 	iovcnt = 1;
1369 	iovs[0].iov_base = NULL;
1370 	iovs[0].iov_len = 0;
1371 
1372 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1373 	CU_ASSERT(rc == 0);
1374 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1375 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1376 				    alignment));
1377 	stub_complete_io(1);
1378 
1379 	/* Pass iov without allocated buffer with 1024 alignment required */
1380 	alignment = 1024;
1381 	bdev->required_alignment = spdk_u32log2(alignment);
1382 
1383 	iovcnt = 1;
1384 	iovs[0].iov_base = NULL;
1385 	iovs[0].iov_len = 0;
1386 
1387 	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1388 	CU_ASSERT(rc == 0);
1389 	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1390 	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1391 				    alignment));
1392 	stub_complete_io(1);
1393 
1394 	spdk_put_io_channel(io_ch);
1395 	spdk_bdev_close(desc);
1396 	free_bdev(bdev);
1397 	spdk_bdev_finish(bdev_fini_cb, NULL);
1398 
1399 	free(buf);
1400 }
1401 
1402 int
1403 main(int argc, char **argv)
1404 {
1405 	CU_pSuite	suite = NULL;
1406 	unsigned int	num_failures;
1407 
1408 	if (CU_initialize_registry() != CUE_SUCCESS) {
1409 		return CU_get_error();
1410 	}
1411 
1412 	suite = CU_add_suite("bdev", null_init, null_clean);
1413 	if (suite == NULL) {
1414 		CU_cleanup_registry();
1415 		return CU_get_error();
1416 	}
1417 
1418 	if (
1419 		CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
1420 		CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
1421 		CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
1422 		CU_add_test(suite, "open_write", open_write_test) == NULL ||
1423 		CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
1424 		CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
1425 		CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
1426 		CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
1427 		CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
1428 		CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL ||
1429 		CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL
1430 	) {
1431 		CU_cleanup_registry();
1432 		return CU_get_error();
1433 	}
1434 
1435 	spdk_allocate_thread(_bdev_send_msg, NULL, NULL, NULL, "thread0");
1436 	CU_basic_set_mode(CU_BRM_VERBOSE);
1437 	CU_basic_run_tests();
1438 	num_failures = CU_get_number_of_failures();
1439 	CU_cleanup_registry();
1440 	spdk_free_thread();
1441 	return num_failures;
1442 }
1443