xref: /spdk/test/bdev/bdevio/bdevio.c (revision e967dcd245f096f102d811e5c6d8aeb96c172e3e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/copy_engine.h"
38 #include "spdk/env.h"
39 #include "spdk/log.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
42 
43 #include "CUnit/Basic.h"
44 
45 #define BUFFER_IOVS		1024
46 #define BUFFER_SIZE		260 * 1024
47 #define BDEV_TASK_ARRAY_SIZE	2048
48 
49 pthread_mutex_t g_test_mutex;
50 pthread_cond_t g_test_cond;
51 
52 static uint32_t g_lcore_id_init;
53 static uint32_t g_lcore_id_ut;
54 static uint32_t g_lcore_id_io;
55 
56 struct io_target {
57 	struct spdk_bdev	*bdev;
58 	struct spdk_bdev_desc	*bdev_desc;
59 	struct spdk_io_channel	*ch;
60 	struct io_target	*next;
61 };
62 
63 struct bdevio_request {
64 	char *buf;
65 	int data_len;
66 	uint64_t offset;
67 	struct iovec iov[BUFFER_IOVS];
68 	int iovcnt;
69 	struct io_target *target;
70 };
71 
72 struct io_target *g_io_targets = NULL;
73 
74 static void
75 execute_spdk_function(spdk_event_fn fn, void *arg1, void *arg2)
76 {
77 	struct spdk_event *event;
78 
79 	event = spdk_event_allocate(g_lcore_id_io, fn, arg1, arg2);
80 	pthread_mutex_lock(&g_test_mutex);
81 	spdk_event_call(event);
82 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
83 	pthread_mutex_unlock(&g_test_mutex);
84 }
85 
86 static void
87 wake_ut_thread(void)
88 {
89 	pthread_mutex_lock(&g_test_mutex);
90 	pthread_cond_signal(&g_test_cond);
91 	pthread_mutex_unlock(&g_test_mutex);
92 }
93 
94 static void
95 __get_io_channel(void *arg1, void *arg2)
96 {
97 	struct io_target *target = arg1;
98 
99 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
100 	assert(target->ch);
101 	wake_ut_thread();
102 }
103 
104 static int
105 bdevio_construct_target(struct spdk_bdev *bdev)
106 {
107 	struct io_target *target;
108 	int rc;
109 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
110 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
111 
112 	target = malloc(sizeof(struct io_target));
113 	if (target == NULL) {
114 		return -ENOMEM;
115 	}
116 
117 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
118 	if (rc != 0) {
119 		free(target);
120 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
121 		return rc;
122 	}
123 
124 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
125 	       spdk_bdev_get_name(bdev),
126 	       num_blocks, block_size,
127 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
128 
129 	target->bdev = bdev;
130 	target->next = g_io_targets;
131 	execute_spdk_function(__get_io_channel, target, NULL);
132 	g_io_targets = target;
133 
134 	return 0;
135 }
136 
137 static int
138 bdevio_construct_targets(void)
139 {
140 	struct spdk_bdev *bdev;
141 	int rc;
142 
143 	printf("I/O targets:\n");
144 
145 	bdev = spdk_bdev_first_leaf();
146 	while (bdev != NULL) {
147 		rc = bdevio_construct_target(bdev);
148 		if (rc < 0) {
149 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
150 			return rc;
151 		}
152 		bdev = spdk_bdev_next_leaf(bdev);
153 	}
154 
155 	if (g_io_targets == NULL) {
156 		SPDK_ERRLOG("No bdevs to perform tests on\n");
157 		return -1;
158 	}
159 
160 	return 0;
161 }
162 
163 static void
164 __put_io_channel(void *arg1, void *arg2)
165 {
166 	struct io_target *target = arg1;
167 
168 	spdk_put_io_channel(target->ch);
169 	wake_ut_thread();
170 }
171 
172 static void
173 bdevio_cleanup_targets(void)
174 {
175 	struct io_target *target;
176 
177 	target = g_io_targets;
178 	while (target != NULL) {
179 		execute_spdk_function(__put_io_channel, target, NULL);
180 		spdk_bdev_close(target->bdev_desc);
181 		g_io_targets = target->next;
182 		free(target);
183 		target = g_io_targets;
184 	}
185 }
186 
187 static bool g_completion_success;
188 
189 static void
190 initialize_buffer(char **buf, int pattern, int size)
191 {
192 	*buf = spdk_dma_zmalloc(size, 0x1000, NULL);
193 	memset(*buf, pattern, size);
194 }
195 
196 static void
197 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
198 {
199 	g_completion_success = success;
200 	spdk_bdev_free_io(bdev_io);
201 	wake_ut_thread();
202 }
203 
204 static void
205 __blockdev_write(void *arg1, void *arg2)
206 {
207 	struct bdevio_request *req = arg1;
208 	struct io_target *target = req->target;
209 	int rc;
210 
211 	if (req->iovcnt) {
212 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
213 				      req->data_len, quick_test_complete, NULL);
214 	} else {
215 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
216 				     req->data_len, quick_test_complete, NULL);
217 	}
218 
219 	if (rc) {
220 		g_completion_success = false;
221 		wake_ut_thread();
222 	}
223 }
224 
225 static void
226 __blockdev_write_zeroes(void *arg1, void *arg2)
227 {
228 	struct bdevio_request *req = arg1;
229 	struct io_target *target = req->target;
230 	int rc;
231 
232 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
233 				    req->data_len, quick_test_complete, NULL);
234 	if (rc) {
235 		g_completion_success = false;
236 		wake_ut_thread();
237 	}
238 }
239 
240 static void
241 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
242 {
243 	int data_len = req->data_len;
244 	char *buf = req->buf;
245 
246 	req->iovcnt = 0;
247 	if (!iov_len) {
248 		return;
249 	}
250 
251 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
252 		if (data_len < iov_len) {
253 			iov_len = data_len;
254 		}
255 
256 		req->iov[req->iovcnt].iov_base = buf;
257 		req->iov[req->iovcnt].iov_len = iov_len;
258 
259 		buf += iov_len;
260 		data_len -= iov_len;
261 	}
262 
263 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
264 }
265 
266 static void
267 blockdev_write(struct io_target *target, char *tx_buf,
268 	       uint64_t offset, int data_len, int iov_len)
269 {
270 	struct bdevio_request req;
271 
272 	req.target = target;
273 	req.buf = tx_buf;
274 	req.data_len = data_len;
275 	req.offset = offset;
276 	sgl_chop_buffer(&req, iov_len);
277 
278 	g_completion_success = false;
279 
280 	execute_spdk_function(__blockdev_write, &req, NULL);
281 }
282 
283 static void
284 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
285 		      uint64_t offset, int data_len)
286 {
287 	struct bdevio_request req;
288 
289 	req.target = target;
290 	req.buf = tx_buf;
291 	req.data_len = data_len;
292 	req.offset = offset;
293 
294 	g_completion_success = false;
295 
296 	execute_spdk_function(__blockdev_write_zeroes, &req, NULL);
297 }
298 
299 static void
300 __blockdev_read(void *arg1, void *arg2)
301 {
302 	struct bdevio_request *req = arg1;
303 	struct io_target *target = req->target;
304 	int rc;
305 
306 	if (req->iovcnt) {
307 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
308 				     req->data_len, quick_test_complete, NULL);
309 	} else {
310 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
311 				    req->data_len, quick_test_complete, NULL);
312 	}
313 
314 	if (rc) {
315 		g_completion_success = false;
316 		wake_ut_thread();
317 	}
318 }
319 
320 static void
321 blockdev_read(struct io_target *target, char *rx_buf,
322 	      uint64_t offset, int data_len, int iov_len)
323 {
324 	struct bdevio_request req;
325 
326 	req.target = target;
327 	req.buf = rx_buf;
328 	req.data_len = data_len;
329 	req.offset = offset;
330 	req.iovcnt = 0;
331 	sgl_chop_buffer(&req, iov_len);
332 
333 	g_completion_success = false;
334 
335 	execute_spdk_function(__blockdev_read, &req, NULL);
336 }
337 
338 static int
339 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
340 {
341 	int rc;
342 	rc = memcmp(rx_buf, tx_buf, data_length);
343 
344 	spdk_dma_free(rx_buf);
345 	spdk_dma_free(tx_buf);
346 
347 	return rc;
348 }
349 
350 static void
351 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
352 		    int expected_rc, bool write_zeroes)
353 {
354 	struct io_target *target;
355 	char	*tx_buf = NULL;
356 	char	*rx_buf = NULL;
357 	int	rc;
358 
359 	target = g_io_targets;
360 	while (target != NULL) {
361 		if (data_length < spdk_bdev_get_block_size(target->bdev) ||
362 		    data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) {
363 			target = target->next;
364 			continue;
365 		}
366 
367 		if (!write_zeroes) {
368 			initialize_buffer(&tx_buf, pattern, data_length);
369 			initialize_buffer(&rx_buf, 0, data_length);
370 
371 			blockdev_write(target, tx_buf, offset, data_length, iov_len);
372 		} else {
373 			initialize_buffer(&tx_buf, 0, data_length);
374 			initialize_buffer(&rx_buf, pattern, data_length);
375 
376 			blockdev_write_zeroes(target, tx_buf, offset, data_length);
377 		}
378 
379 
380 		if (expected_rc == 0) {
381 			CU_ASSERT_EQUAL(g_completion_success, true);
382 		} else {
383 			CU_ASSERT_EQUAL(g_completion_success, false);
384 		}
385 		blockdev_read(target, rx_buf, offset, data_length, iov_len);
386 
387 		if (expected_rc == 0) {
388 			CU_ASSERT_EQUAL(g_completion_success, true);
389 		} else {
390 			CU_ASSERT_EQUAL(g_completion_success, false);
391 		}
392 
393 		if (g_completion_success) {
394 			rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
395 			/* Assert the write by comparing it with values read
396 			 * from each blockdev */
397 			CU_ASSERT_EQUAL(rc, 0);
398 		}
399 
400 		target = target->next;
401 	}
402 }
403 
404 static void
405 blockdev_write_read_4k(void)
406 {
407 	uint32_t data_length;
408 	uint64_t offset;
409 	int pattern;
410 	int expected_rc;
411 
412 	/* Data size = 4K */
413 	data_length = 4096;
414 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
415 	offset = 0;
416 	pattern = 0xA3;
417 	/* Params are valid, hence the expected return value
418 	 * of write and read for all blockdevs is 0. */
419 	expected_rc = 0;
420 
421 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
422 }
423 
424 static void
425 blockdev_write_zeroes_read_4k(void)
426 {
427 	uint32_t data_length;
428 	uint64_t offset;
429 	int pattern;
430 	int expected_rc;
431 
432 	/* Data size = 4K */
433 	data_length = 4096;
434 	offset = 0;
435 	pattern = 0xA3;
436 	/* Params are valid, hence the expected return value
437 	 * of write_zeroes and read for all blockdevs is 0. */
438 	expected_rc = 0;
439 
440 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
441 }
442 
443 /*
444  * This i/o will not have to split at the bdev layer.
445  */
446 static void
447 blockdev_write_zeroes_read_1m(void)
448 {
449 	uint32_t data_length;
450 	uint64_t offset;
451 	int pattern;
452 	int expected_rc;
453 
454 	/* Data size = 1M */
455 	data_length = 1048576;
456 	offset = 0;
457 	pattern = 0xA3;
458 	/* Params are valid, hence the expected return value
459 	 * of write_zeroes and read for all blockdevs is 0. */
460 	expected_rc = 0;
461 
462 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
463 }
464 
465 /*
466  * This i/o will have to split at the bdev layer if
467  * write-zeroes is not supported by the bdev.
468  */
469 static void
470 blockdev_write_zeroes_read_3m(void)
471 {
472 	uint32_t data_length;
473 	uint64_t offset;
474 	int pattern;
475 	int expected_rc;
476 
477 	/* Data size = 3M */
478 	data_length = 3145728;
479 	offset = 0;
480 	pattern = 0xA3;
481 	/* Params are valid, hence the expected return value
482 	 * of write_zeroes and read for all blockdevs is 0. */
483 	expected_rc = 0;
484 
485 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
486 }
487 
488 /*
489  * This i/o will have to split at the bdev layer if
490  * write-zeroes is not supported by the bdev. It also
491  * tests a write size that is not an even multiple of
492  * the bdev layer zero buffer size.
493  */
494 static void
495 blockdev_write_zeroes_read_3m_500k(void)
496 {
497 	uint32_t data_length;
498 	uint64_t offset;
499 	int pattern;
500 	int expected_rc;
501 
502 	/* Data size = 3.5M */
503 	data_length = 3670016;
504 	offset = 0;
505 	pattern = 0xA3;
506 	/* Params are valid, hence the expected return value
507 	 * of write_zeroes and read for all blockdevs is 0. */
508 	expected_rc = 0;
509 
510 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
511 }
512 
513 static void
514 blockdev_writev_readv_4k(void)
515 {
516 	uint32_t data_length, iov_len;
517 	uint64_t offset;
518 	int pattern;
519 	int expected_rc;
520 
521 	/* Data size = 4K */
522 	data_length = 4096;
523 	iov_len = 4096;
524 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
525 	offset = 0;
526 	pattern = 0xA3;
527 	/* Params are valid, hence the expected return value
528 	 * of write and read for all blockdevs is 0. */
529 	expected_rc = 0;
530 
531 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
532 }
533 
534 static void
535 blockdev_writev_readv_30x4k(void)
536 {
537 	uint32_t data_length, iov_len;
538 	uint64_t offset;
539 	int pattern;
540 	int expected_rc;
541 
542 	/* Data size = 4K */
543 	data_length = 4096 * 30;
544 	iov_len = 4096;
545 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
546 	offset = 0;
547 	pattern = 0xA3;
548 	/* Params are valid, hence the expected return value
549 	 * of write and read for all blockdevs is 0. */
550 	expected_rc = 0;
551 
552 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
553 }
554 
555 static void
556 blockdev_write_read_512Bytes(void)
557 {
558 	uint32_t data_length;
559 	uint64_t offset;
560 	int pattern;
561 	int expected_rc;
562 
563 	/* Data size = 512 */
564 	data_length = 512;
565 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
566 	offset = 8192;
567 	pattern = 0xA3;
568 	/* Params are valid, hence the expected return value
569 	 * of write and read for all blockdevs is 0. */
570 	expected_rc = 0;
571 
572 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
573 }
574 
575 static void
576 blockdev_writev_readv_512Bytes(void)
577 {
578 	uint32_t data_length, iov_len;
579 	uint64_t offset;
580 	int pattern;
581 	int expected_rc;
582 
583 	/* Data size = 512 */
584 	data_length = 512;
585 	iov_len = 512;
586 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
587 	offset = 8192;
588 	pattern = 0xA3;
589 	/* Params are valid, hence the expected return value
590 	 * of write and read for all blockdevs is 0. */
591 	expected_rc = 0;
592 
593 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
594 }
595 
596 static void
597 blockdev_write_read_size_gt_128k(void)
598 {
599 	uint32_t data_length;
600 	uint64_t offset;
601 	int pattern;
602 	int expected_rc;
603 
604 	/* Data size = 132K */
605 	data_length = 135168;
606 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
607 	offset = 8192;
608 	pattern = 0xA3;
609 	/* Params are valid, hence the expected return value
610 	 * of write and read for all blockdevs is 0. */
611 	expected_rc = 0;
612 
613 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
614 }
615 
616 static void
617 blockdev_writev_readv_size_gt_128k(void)
618 {
619 	uint32_t data_length, iov_len;
620 	uint64_t offset;
621 	int pattern;
622 	int expected_rc;
623 
624 	/* Data size = 132K */
625 	data_length = 135168;
626 	iov_len = 135168;
627 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
628 	offset = 8192;
629 	pattern = 0xA3;
630 	/* Params are valid, hence the expected return value
631 	 * of write and read for all blockdevs is 0. */
632 	expected_rc = 0;
633 
634 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
635 }
636 
637 static void
638 blockdev_writev_readv_size_gt_128k_two_iov(void)
639 {
640 	uint32_t data_length, iov_len;
641 	uint64_t offset;
642 	int pattern;
643 	int expected_rc;
644 
645 	/* Data size = 132K */
646 	data_length = 135168;
647 	iov_len = 128 * 1024;
648 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
649 	offset = 8192;
650 	pattern = 0xA3;
651 	/* Params are valid, hence the expected return value
652 	 * of write and read for all blockdevs is 0. */
653 	expected_rc = 0;
654 
655 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
656 }
657 
658 static void
659 blockdev_write_read_invalid_size(void)
660 {
661 	uint32_t data_length;
662 	uint64_t offset;
663 	int pattern;
664 	int expected_rc;
665 
666 	/* Data size is not a multiple of the block size */
667 	data_length = 0x1015;
668 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
669 	offset = 8192;
670 	pattern = 0xA3;
671 	/* Params are invalid, hence the expected return value
672 	 * of write and read for all blockdevs is < 0 */
673 	expected_rc = -1;
674 
675 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
676 }
677 
678 static void
679 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
680 {
681 	struct io_target *target;
682 	struct spdk_bdev *bdev;
683 	char	*tx_buf = NULL;
684 	char	*rx_buf = NULL;
685 	uint64_t offset;
686 	uint32_t block_size;
687 	int rc;
688 
689 	target = g_io_targets;
690 	while (target != NULL) {
691 		bdev = target->bdev;
692 
693 		block_size = spdk_bdev_get_block_size(bdev);
694 
695 		/* The start offset has been set to a marginal value
696 		 * such that offset + nbytes == Total size of
697 		 * blockdev. */
698 		offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
699 
700 		initialize_buffer(&tx_buf, 0xA3, block_size);
701 		initialize_buffer(&rx_buf, 0, block_size);
702 
703 		blockdev_write(target, tx_buf, offset, block_size, 0);
704 		CU_ASSERT_EQUAL(g_completion_success, true);
705 
706 		blockdev_read(target, rx_buf, offset, block_size, 0);
707 		CU_ASSERT_EQUAL(g_completion_success, true);
708 
709 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
710 		/* Assert the write by comparing it with values read
711 		 * from each blockdev */
712 		CU_ASSERT_EQUAL(rc, 0);
713 
714 		target = target->next;
715 	}
716 }
717 
718 static void
719 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
720 {
721 	struct io_target *target;
722 	struct spdk_bdev *bdev;
723 	char	*tx_buf = NULL;
724 	char	*rx_buf = NULL;
725 	int	data_length;
726 	uint64_t offset;
727 	int pattern;
728 
729 	/* Tests the overflow condition of the blockdevs. */
730 	data_length = 4096;
731 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
732 	pattern = 0xA3;
733 
734 	target = g_io_targets;
735 	while (target != NULL) {
736 		bdev = target->bdev;
737 
738 		/* The start offset has been set to a valid value
739 		 * but offset + nbytes is greater than the Total size
740 		 * of the blockdev. The test should fail. */
741 		offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
742 
743 		initialize_buffer(&tx_buf, pattern, data_length);
744 		initialize_buffer(&rx_buf, 0, data_length);
745 
746 		blockdev_write(target, tx_buf, offset, data_length, 0);
747 		CU_ASSERT_EQUAL(g_completion_success, false);
748 
749 		blockdev_read(target, rx_buf, offset, data_length, 0);
750 		CU_ASSERT_EQUAL(g_completion_success, false);
751 
752 		target = target->next;
753 	}
754 }
755 
756 static void
757 blockdev_write_read_max_offset(void)
758 {
759 	int	data_length;
760 	uint64_t offset;
761 	int pattern;
762 	int expected_rc;
763 
764 	data_length = 4096;
765 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
766 	/* The start offset has been set to UINT64_MAX such that
767 	 * adding nbytes wraps around and points to an invalid address. */
768 	offset = UINT64_MAX;
769 	pattern = 0xA3;
770 	/* Params are invalid, hence the expected return value
771 	 * of write and read for all blockdevs is < 0 */
772 	expected_rc = -1;
773 
774 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
775 }
776 
777 static void
778 blockdev_overlapped_write_read_8k(void)
779 {
780 	int	data_length;
781 	uint64_t offset;
782 	int pattern;
783 	int expected_rc;
784 
785 	/* Data size = 8K */
786 	data_length = 8192;
787 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
788 	offset = 0;
789 	pattern = 0xA3;
790 	/* Params are valid, hence the expected return value
791 	 * of write and read for all blockdevs is 0. */
792 	expected_rc = 0;
793 	/* Assert the write by comparing it with values read
794 	 * from the same offset for each blockdev */
795 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
796 
797 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
798 	 * with the address written above and assert the new value in
799 	 * the overlapped address range */
800 	/* Populate 8k with value 0xBB */
801 	pattern = 0xBB;
802 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
803 	offset = 4096;
804 	/* Assert the write by comparing it with values read
805 	 * from the overlapped offset for each blockdev */
806 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
807 }
808 
809 static void
810 __blockdev_reset(void *arg1, void *arg2)
811 {
812 	struct bdevio_request *req = arg1;
813 	struct io_target *target = req->target;
814 	int rc;
815 
816 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
817 	if (rc < 0) {
818 		g_completion_success = false;
819 		wake_ut_thread();
820 	}
821 }
822 
823 static void
824 blockdev_reset(struct io_target *target)
825 {
826 	struct bdevio_request req;
827 
828 	req.target = target;
829 
830 	g_completion_success = false;
831 
832 	execute_spdk_function(__blockdev_reset, &req, NULL);
833 }
834 
835 static void
836 blockdev_test_reset(void)
837 {
838 	struct io_target	*target;
839 
840 	target = g_io_targets;
841 	while (target != NULL) {
842 		blockdev_reset(target);
843 		/* Workaround: NVMe-oF target doesn't support reset yet - so for now
844 		 *  don't fail the test if it's an NVMe bdev.
845 		 */
846 		if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
847 			CU_ASSERT_EQUAL(g_completion_success, true);
848 		}
849 
850 		target = target->next;
851 	}
852 }
853 
854 struct bdevio_passthrough_request {
855 	struct spdk_nvme_cmd cmd;
856 	void *buf;
857 	uint32_t len;
858 	struct io_target *target;
859 	int sct;
860 	int sc;
861 };
862 
863 static void
864 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
865 {
866 	struct bdevio_passthrough_request *pt_req = arg;
867 
868 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->sct, &pt_req->sc);
869 	spdk_bdev_free_io(bdev_io);
870 	wake_ut_thread();
871 }
872 
873 static void
874 __blockdev_nvme_passthru(void *arg1, void *arg2)
875 {
876 	struct bdevio_passthrough_request *pt_req = arg1;
877 	struct io_target *target = pt_req->target;
878 	int rc;
879 
880 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
881 					&pt_req->cmd, pt_req->buf, pt_req->len,
882 					nvme_pt_test_complete, pt_req);
883 	if (rc) {
884 		wake_ut_thread();
885 	}
886 }
887 
888 static void
889 blockdev_nvme_passthru_rw(struct io_target *target)
890 {
891 	struct bdevio_passthrough_request pt_req;
892 	void *write_buf, *read_buf;
893 
894 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
895 		return;
896 	}
897 
898 	memset(&pt_req, 0, sizeof(pt_req));
899 	pt_req.target = target;
900 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
901 	pt_req.cmd.nsid = 1;
902 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
903 	pt_req.cmd.cdw12 = 0;
904 
905 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
906 	write_buf = spdk_dma_malloc(pt_req.len, 0, NULL);
907 	memset(write_buf, 0xA5, pt_req.len);
908 	pt_req.buf = write_buf;
909 
910 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
911 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
912 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req, NULL);
913 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
914 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
915 
916 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
917 	read_buf = spdk_dma_zmalloc(pt_req.len, 0, NULL);
918 	pt_req.buf = read_buf;
919 
920 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
921 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
922 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req, NULL);
923 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
924 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
925 
926 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
927 	spdk_dma_free(read_buf);
928 	spdk_dma_free(write_buf);
929 }
930 
931 static void
932 blockdev_test_nvme_passthru_rw(void)
933 {
934 	struct io_target	*target;
935 
936 	target = g_io_targets;
937 	while (target != NULL) {
938 		blockdev_nvme_passthru_rw(target);
939 		target = target->next;
940 	}
941 }
942 
943 static void
944 blockdev_nvme_passthru_vendor_specific(struct io_target *target)
945 {
946 	struct bdevio_passthrough_request pt_req;
947 
948 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
949 		return;
950 	}
951 
952 	memset(&pt_req, 0, sizeof(pt_req));
953 	pt_req.target = target;
954 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
955 	pt_req.cmd.nsid = 1;
956 
957 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
958 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
959 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req, NULL);
960 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
961 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
962 }
963 
964 static void
965 blockdev_test_nvme_passthru_vendor_specific(void)
966 {
967 	struct io_target	*target;
968 
969 	target = g_io_targets;
970 	while (target != NULL) {
971 		blockdev_nvme_passthru_vendor_specific(target);
972 		target = target->next;
973 	}
974 }
975 
976 static void
977 __stop_init_thread(void *arg1, void *arg2)
978 {
979 	unsigned num_failures = (unsigned)(uintptr_t)arg1;
980 
981 	bdevio_cleanup_targets();
982 	spdk_app_stop(num_failures);
983 }
984 
985 static void
986 stop_init_thread(unsigned num_failures)
987 {
988 	struct spdk_event *event;
989 
990 	event = spdk_event_allocate(g_lcore_id_init, __stop_init_thread,
991 				    (void *)(uintptr_t)num_failures, NULL);
992 	spdk_event_call(event);
993 }
994 
995 static void
996 __run_ut_thread(void *arg1, void *arg2)
997 {
998 	CU_pSuite suite = NULL;
999 	unsigned num_failures;
1000 
1001 	if (CU_initialize_registry() != CUE_SUCCESS) {
1002 		stop_init_thread(CU_get_error());
1003 		return;
1004 	}
1005 
1006 	suite = CU_add_suite("components_suite", NULL, NULL);
1007 	if (suite == NULL) {
1008 		CU_cleanup_registry();
1009 		stop_init_thread(CU_get_error());
1010 		return;
1011 	}
1012 
1013 	if (
1014 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
1015 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
1016 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
1017 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
1018 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
1019 		|| CU_add_test(suite, "blockdev reset",
1020 			       blockdev_test_reset) == NULL
1021 		|| CU_add_test(suite, "blockdev write read 512 bytes",
1022 			       blockdev_write_read_512Bytes) == NULL
1023 		|| CU_add_test(suite, "blockdev write read size > 128k",
1024 			       blockdev_write_read_size_gt_128k) == NULL
1025 		|| CU_add_test(suite, "blockdev write read invalid size",
1026 			       blockdev_write_read_invalid_size) == NULL
1027 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1028 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1029 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1030 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1031 		|| CU_add_test(suite, "blockdev write read max offset",
1032 			       blockdev_write_read_max_offset) == NULL
1033 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
1034 			       blockdev_overlapped_write_read_8k) == NULL
1035 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
1036 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
1037 			       blockdev_writev_readv_30x4k) == NULL
1038 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
1039 			       blockdev_writev_readv_512Bytes) == NULL
1040 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1041 			       blockdev_writev_readv_size_gt_128k) == NULL
1042 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1043 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1044 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1045 			       blockdev_test_nvme_passthru_rw) == NULL
1046 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1047 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1048 	) {
1049 		CU_cleanup_registry();
1050 		stop_init_thread(CU_get_error());
1051 		return;
1052 	}
1053 
1054 	CU_basic_set_mode(CU_BRM_VERBOSE);
1055 	CU_basic_run_tests();
1056 	num_failures = CU_get_number_of_failures();
1057 	CU_cleanup_registry();
1058 	stop_init_thread(num_failures);
1059 }
1060 
1061 static void
1062 test_main(void *arg1)
1063 {
1064 	struct spdk_event *event;
1065 
1066 	pthread_mutex_init(&g_test_mutex, NULL);
1067 	pthread_cond_init(&g_test_cond, NULL);
1068 
1069 	g_lcore_id_init = spdk_env_get_first_core();
1070 	g_lcore_id_ut = spdk_env_get_next_core(g_lcore_id_init);
1071 	g_lcore_id_io = spdk_env_get_next_core(g_lcore_id_ut);
1072 
1073 	if (g_lcore_id_init == SPDK_ENV_LCORE_ID_ANY ||
1074 	    g_lcore_id_ut == SPDK_ENV_LCORE_ID_ANY ||
1075 	    g_lcore_id_io == SPDK_ENV_LCORE_ID_ANY) {
1076 		SPDK_ERRLOG("Could not reserve 3 separate threads.\n");
1077 		spdk_app_stop(-1);
1078 	}
1079 
1080 	if (bdevio_construct_targets() < 0) {
1081 		spdk_app_stop(-1);
1082 		return;
1083 	}
1084 
1085 	event = spdk_event_allocate(g_lcore_id_ut, __run_ut_thread, NULL, NULL);
1086 	spdk_event_call(event);
1087 }
1088 
1089 static void
1090 bdevio_usage(void)
1091 {
1092 }
1093 
1094 static int
1095 bdevio_parse_arg(int ch, char *arg)
1096 {
1097 	return 0;
1098 }
1099 
1100 int
1101 main(int argc, char **argv)
1102 {
1103 	int			rc;
1104 	struct spdk_app_opts	opts = {};
1105 
1106 	spdk_app_opts_init(&opts);
1107 	opts.name = "bdevio";
1108 	opts.reactor_mask = "0x7";
1109 
1110 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "", NULL,
1111 				      bdevio_parse_arg, bdevio_usage)) !=
1112 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1113 		return rc;
1114 	}
1115 
1116 	rc = spdk_app_start(&opts, test_main, NULL);
1117 	spdk_app_fini();
1118 
1119 	return rc;
1120 }
1121