xref: /spdk/test/bdev/bdevio/bdevio.c (revision 8efa583f13dddb1adc12dcac27022d5cf1160d90)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/copy_engine.h"
38 #include "spdk/env.h"
39 #include "spdk/log.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
42 #include "spdk/rpc.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "CUnit/Basic.h"
47 
48 #define BUFFER_IOVS		1024
49 #define BUFFER_SIZE		260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE	2048
51 
52 pthread_mutex_t g_test_mutex;
53 pthread_cond_t g_test_cond;
54 
55 static struct spdk_thread *g_thread_init;
56 static struct spdk_thread *g_thread_ut;
57 static struct spdk_thread *g_thread_io;
58 static bool g_wait_for_tests = false;
59 static int g_num_failures = 0;
60 
61 struct io_target {
62 	struct spdk_bdev	*bdev;
63 	struct spdk_bdev_desc	*bdev_desc;
64 	struct spdk_io_channel	*ch;
65 	struct io_target	*next;
66 };
67 
68 struct bdevio_request {
69 	char *buf;
70 	int data_len;
71 	uint64_t offset;
72 	struct iovec iov[BUFFER_IOVS];
73 	int iovcnt;
74 	struct io_target *target;
75 };
76 
77 struct io_target *g_io_targets = NULL;
78 struct io_target *g_current_io_target = NULL;
79 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
80 
81 static void
82 execute_spdk_function(spdk_msg_fn fn, void *arg)
83 {
84 	pthread_mutex_lock(&g_test_mutex);
85 	spdk_thread_send_msg(g_thread_io, fn, arg);
86 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
87 	pthread_mutex_unlock(&g_test_mutex);
88 }
89 
90 static void
91 wake_ut_thread(void)
92 {
93 	pthread_mutex_lock(&g_test_mutex);
94 	pthread_cond_signal(&g_test_cond);
95 	pthread_mutex_unlock(&g_test_mutex);
96 }
97 
98 static void
99 __get_io_channel(void *arg)
100 {
101 	struct io_target *target = arg;
102 
103 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
104 	assert(target->ch);
105 	wake_ut_thread();
106 }
107 
108 static int
109 bdevio_construct_target(struct spdk_bdev *bdev)
110 {
111 	struct io_target *target;
112 	int rc;
113 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
114 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
115 
116 	target = malloc(sizeof(struct io_target));
117 	if (target == NULL) {
118 		return -ENOMEM;
119 	}
120 
121 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
122 	if (rc != 0) {
123 		free(target);
124 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
125 		return rc;
126 	}
127 
128 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
129 	       spdk_bdev_get_name(bdev),
130 	       num_blocks, block_size,
131 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
132 
133 	target->bdev = bdev;
134 	target->next = g_io_targets;
135 	execute_spdk_function(__get_io_channel, target);
136 	g_io_targets = target;
137 
138 	return 0;
139 }
140 
141 static int
142 bdevio_construct_targets(void)
143 {
144 	struct spdk_bdev *bdev;
145 	int rc;
146 
147 	printf("I/O targets:\n");
148 
149 	bdev = spdk_bdev_first_leaf();
150 	while (bdev != NULL) {
151 		rc = bdevio_construct_target(bdev);
152 		if (rc < 0) {
153 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
154 			return rc;
155 		}
156 		bdev = spdk_bdev_next_leaf(bdev);
157 	}
158 
159 	if (g_io_targets == NULL) {
160 		SPDK_ERRLOG("No bdevs to perform tests on\n");
161 		return -1;
162 	}
163 
164 	return 0;
165 }
166 
167 static void
168 __put_io_channel(void *arg)
169 {
170 	struct io_target *target = arg;
171 
172 	spdk_put_io_channel(target->ch);
173 	wake_ut_thread();
174 }
175 
176 static void
177 bdevio_cleanup_targets(void)
178 {
179 	struct io_target *target;
180 
181 	target = g_io_targets;
182 	while (target != NULL) {
183 		execute_spdk_function(__put_io_channel, target);
184 		spdk_bdev_close(target->bdev_desc);
185 		g_io_targets = target->next;
186 		free(target);
187 		target = g_io_targets;
188 	}
189 }
190 
191 static bool g_completion_success;
192 
193 static void
194 initialize_buffer(char **buf, int pattern, int size)
195 {
196 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
197 	memset(*buf, pattern, size);
198 }
199 
200 static void
201 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
202 {
203 	g_completion_success = success;
204 	spdk_bdev_free_io(bdev_io);
205 	wake_ut_thread();
206 }
207 
208 static void
209 __blockdev_write(void *arg)
210 {
211 	struct bdevio_request *req = arg;
212 	struct io_target *target = req->target;
213 	int rc;
214 
215 	if (req->iovcnt) {
216 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
217 				      req->data_len, quick_test_complete, NULL);
218 	} else {
219 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
220 				     req->data_len, quick_test_complete, NULL);
221 	}
222 
223 	if (rc) {
224 		g_completion_success = false;
225 		wake_ut_thread();
226 	}
227 }
228 
229 static void
230 __blockdev_write_zeroes(void *arg)
231 {
232 	struct bdevio_request *req = arg;
233 	struct io_target *target = req->target;
234 	int rc;
235 
236 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
237 				    req->data_len, quick_test_complete, NULL);
238 	if (rc) {
239 		g_completion_success = false;
240 		wake_ut_thread();
241 	}
242 }
243 
244 static void
245 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
246 {
247 	int data_len = req->data_len;
248 	char *buf = req->buf;
249 
250 	req->iovcnt = 0;
251 	if (!iov_len) {
252 		return;
253 	}
254 
255 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
256 		if (data_len < iov_len) {
257 			iov_len = data_len;
258 		}
259 
260 		req->iov[req->iovcnt].iov_base = buf;
261 		req->iov[req->iovcnt].iov_len = iov_len;
262 
263 		buf += iov_len;
264 		data_len -= iov_len;
265 	}
266 
267 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
268 }
269 
270 static void
271 blockdev_write(struct io_target *target, char *tx_buf,
272 	       uint64_t offset, int data_len, int iov_len)
273 {
274 	struct bdevio_request req;
275 
276 	req.target = target;
277 	req.buf = tx_buf;
278 	req.data_len = data_len;
279 	req.offset = offset;
280 	sgl_chop_buffer(&req, iov_len);
281 
282 	g_completion_success = false;
283 
284 	execute_spdk_function(__blockdev_write, &req);
285 }
286 
287 static void
288 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
289 		      uint64_t offset, int data_len)
290 {
291 	struct bdevio_request req;
292 
293 	req.target = target;
294 	req.buf = tx_buf;
295 	req.data_len = data_len;
296 	req.offset = offset;
297 
298 	g_completion_success = false;
299 
300 	execute_spdk_function(__blockdev_write_zeroes, &req);
301 }
302 
303 static void
304 __blockdev_read(void *arg)
305 {
306 	struct bdevio_request *req = arg;
307 	struct io_target *target = req->target;
308 	int rc;
309 
310 	if (req->iovcnt) {
311 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
312 				     req->data_len, quick_test_complete, NULL);
313 	} else {
314 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
315 				    req->data_len, quick_test_complete, NULL);
316 	}
317 
318 	if (rc) {
319 		g_completion_success = false;
320 		wake_ut_thread();
321 	}
322 }
323 
324 static void
325 blockdev_read(struct io_target *target, char *rx_buf,
326 	      uint64_t offset, int data_len, int iov_len)
327 {
328 	struct bdevio_request req;
329 
330 	req.target = target;
331 	req.buf = rx_buf;
332 	req.data_len = data_len;
333 	req.offset = offset;
334 	req.iovcnt = 0;
335 	sgl_chop_buffer(&req, iov_len);
336 
337 	g_completion_success = false;
338 
339 	execute_spdk_function(__blockdev_read, &req);
340 }
341 
342 static int
343 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
344 {
345 	int rc;
346 	rc = memcmp(rx_buf, tx_buf, data_length);
347 
348 	spdk_free(rx_buf);
349 	spdk_free(tx_buf);
350 
351 	return rc;
352 }
353 
354 static void
355 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
356 		    int expected_rc, bool write_zeroes)
357 {
358 	struct io_target *target;
359 	char	*tx_buf = NULL;
360 	char	*rx_buf = NULL;
361 	int	rc;
362 
363 	target = g_current_io_target;
364 
365 	if (data_length < spdk_bdev_get_block_size(target->bdev) ||
366 	    data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) {
367 		return;
368 	}
369 
370 	if (!write_zeroes) {
371 		initialize_buffer(&tx_buf, pattern, data_length);
372 		initialize_buffer(&rx_buf, 0, data_length);
373 
374 		blockdev_write(target, tx_buf, offset, data_length, iov_len);
375 	} else {
376 		initialize_buffer(&tx_buf, 0, data_length);
377 		initialize_buffer(&rx_buf, pattern, data_length);
378 
379 		blockdev_write_zeroes(target, tx_buf, offset, data_length);
380 	}
381 
382 
383 	if (expected_rc == 0) {
384 		CU_ASSERT_EQUAL(g_completion_success, true);
385 	} else {
386 		CU_ASSERT_EQUAL(g_completion_success, false);
387 	}
388 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
389 
390 	if (expected_rc == 0) {
391 		CU_ASSERT_EQUAL(g_completion_success, true);
392 	} else {
393 		CU_ASSERT_EQUAL(g_completion_success, false);
394 	}
395 
396 	if (g_completion_success) {
397 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
398 		/* Assert the write by comparing it with values read
399 		 * from each blockdev */
400 		CU_ASSERT_EQUAL(rc, 0);
401 	}
402 }
403 
404 static void
405 blockdev_write_read_4k(void)
406 {
407 	uint32_t data_length;
408 	uint64_t offset;
409 	int pattern;
410 	int expected_rc;
411 
412 	/* Data size = 4K */
413 	data_length = 4096;
414 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
415 	offset = 0;
416 	pattern = 0xA3;
417 	/* Params are valid, hence the expected return value
418 	 * of write and read for all blockdevs is 0. */
419 	expected_rc = 0;
420 
421 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
422 }
423 
424 static void
425 blockdev_write_zeroes_read_4k(void)
426 {
427 	uint32_t data_length;
428 	uint64_t offset;
429 	int pattern;
430 	int expected_rc;
431 
432 	/* Data size = 4K */
433 	data_length = 4096;
434 	offset = 0;
435 	pattern = 0xA3;
436 	/* Params are valid, hence the expected return value
437 	 * of write_zeroes and read for all blockdevs is 0. */
438 	expected_rc = 0;
439 
440 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
441 }
442 
443 /*
444  * This i/o will not have to split at the bdev layer.
445  */
446 static void
447 blockdev_write_zeroes_read_1m(void)
448 {
449 	uint32_t data_length;
450 	uint64_t offset;
451 	int pattern;
452 	int expected_rc;
453 
454 	/* Data size = 1M */
455 	data_length = 1048576;
456 	offset = 0;
457 	pattern = 0xA3;
458 	/* Params are valid, hence the expected return value
459 	 * of write_zeroes and read for all blockdevs is 0. */
460 	expected_rc = 0;
461 
462 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
463 }
464 
465 /*
466  * This i/o will have to split at the bdev layer if
467  * write-zeroes is not supported by the bdev.
468  */
469 static void
470 blockdev_write_zeroes_read_3m(void)
471 {
472 	uint32_t data_length;
473 	uint64_t offset;
474 	int pattern;
475 	int expected_rc;
476 
477 	/* Data size = 3M */
478 	data_length = 3145728;
479 	offset = 0;
480 	pattern = 0xA3;
481 	/* Params are valid, hence the expected return value
482 	 * of write_zeroes and read for all blockdevs is 0. */
483 	expected_rc = 0;
484 
485 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
486 }
487 
488 /*
489  * This i/o will have to split at the bdev layer if
490  * write-zeroes is not supported by the bdev. It also
491  * tests a write size that is not an even multiple of
492  * the bdev layer zero buffer size.
493  */
494 static void
495 blockdev_write_zeroes_read_3m_500k(void)
496 {
497 	uint32_t data_length;
498 	uint64_t offset;
499 	int pattern;
500 	int expected_rc;
501 
502 	/* Data size = 3.5M */
503 	data_length = 3670016;
504 	offset = 0;
505 	pattern = 0xA3;
506 	/* Params are valid, hence the expected return value
507 	 * of write_zeroes and read for all blockdevs is 0. */
508 	expected_rc = 0;
509 
510 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
511 }
512 
513 static void
514 blockdev_writev_readv_4k(void)
515 {
516 	uint32_t data_length, iov_len;
517 	uint64_t offset;
518 	int pattern;
519 	int expected_rc;
520 
521 	/* Data size = 4K */
522 	data_length = 4096;
523 	iov_len = 4096;
524 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
525 	offset = 0;
526 	pattern = 0xA3;
527 	/* Params are valid, hence the expected return value
528 	 * of write and read for all blockdevs is 0. */
529 	expected_rc = 0;
530 
531 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
532 }
533 
534 static void
535 blockdev_writev_readv_30x4k(void)
536 {
537 	uint32_t data_length, iov_len;
538 	uint64_t offset;
539 	int pattern;
540 	int expected_rc;
541 
542 	/* Data size = 4K */
543 	data_length = 4096 * 30;
544 	iov_len = 4096;
545 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
546 	offset = 0;
547 	pattern = 0xA3;
548 	/* Params are valid, hence the expected return value
549 	 * of write and read for all blockdevs is 0. */
550 	expected_rc = 0;
551 
552 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
553 }
554 
555 static void
556 blockdev_write_read_512Bytes(void)
557 {
558 	uint32_t data_length;
559 	uint64_t offset;
560 	int pattern;
561 	int expected_rc;
562 
563 	/* Data size = 512 */
564 	data_length = 512;
565 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
566 	offset = 8192;
567 	pattern = 0xA3;
568 	/* Params are valid, hence the expected return value
569 	 * of write and read for all blockdevs is 0. */
570 	expected_rc = 0;
571 
572 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
573 }
574 
575 static void
576 blockdev_writev_readv_512Bytes(void)
577 {
578 	uint32_t data_length, iov_len;
579 	uint64_t offset;
580 	int pattern;
581 	int expected_rc;
582 
583 	/* Data size = 512 */
584 	data_length = 512;
585 	iov_len = 512;
586 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
587 	offset = 8192;
588 	pattern = 0xA3;
589 	/* Params are valid, hence the expected return value
590 	 * of write and read for all blockdevs is 0. */
591 	expected_rc = 0;
592 
593 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
594 }
595 
596 static void
597 blockdev_write_read_size_gt_128k(void)
598 {
599 	uint32_t data_length;
600 	uint64_t offset;
601 	int pattern;
602 	int expected_rc;
603 
604 	/* Data size = 132K */
605 	data_length = 135168;
606 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
607 	offset = 8192;
608 	pattern = 0xA3;
609 	/* Params are valid, hence the expected return value
610 	 * of write and read for all blockdevs is 0. */
611 	expected_rc = 0;
612 
613 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
614 }
615 
616 static void
617 blockdev_writev_readv_size_gt_128k(void)
618 {
619 	uint32_t data_length, iov_len;
620 	uint64_t offset;
621 	int pattern;
622 	int expected_rc;
623 
624 	/* Data size = 132K */
625 	data_length = 135168;
626 	iov_len = 135168;
627 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
628 	offset = 8192;
629 	pattern = 0xA3;
630 	/* Params are valid, hence the expected return value
631 	 * of write and read for all blockdevs is 0. */
632 	expected_rc = 0;
633 
634 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
635 }
636 
637 static void
638 blockdev_writev_readv_size_gt_128k_two_iov(void)
639 {
640 	uint32_t data_length, iov_len;
641 	uint64_t offset;
642 	int pattern;
643 	int expected_rc;
644 
645 	/* Data size = 132K */
646 	data_length = 135168;
647 	iov_len = 128 * 1024;
648 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
649 	offset = 8192;
650 	pattern = 0xA3;
651 	/* Params are valid, hence the expected return value
652 	 * of write and read for all blockdevs is 0. */
653 	expected_rc = 0;
654 
655 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
656 }
657 
658 static void
659 blockdev_write_read_invalid_size(void)
660 {
661 	uint32_t data_length;
662 	uint64_t offset;
663 	int pattern;
664 	int expected_rc;
665 
666 	/* Data size is not a multiple of the block size */
667 	data_length = 0x1015;
668 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
669 	offset = 8192;
670 	pattern = 0xA3;
671 	/* Params are invalid, hence the expected return value
672 	 * of write and read for all blockdevs is < 0 */
673 	expected_rc = -1;
674 
675 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
676 }
677 
678 static void
679 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
680 {
681 	struct io_target *target;
682 	struct spdk_bdev *bdev;
683 	char	*tx_buf = NULL;
684 	char	*rx_buf = NULL;
685 	uint64_t offset;
686 	uint32_t block_size;
687 	int rc;
688 
689 	target = g_current_io_target;
690 	bdev = target->bdev;
691 
692 	block_size = spdk_bdev_get_block_size(bdev);
693 
694 	/* The start offset has been set to a marginal value
695 	 * such that offset + nbytes == Total size of
696 	 * blockdev. */
697 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
698 
699 	initialize_buffer(&tx_buf, 0xA3, block_size);
700 	initialize_buffer(&rx_buf, 0, block_size);
701 
702 	blockdev_write(target, tx_buf, offset, block_size, 0);
703 	CU_ASSERT_EQUAL(g_completion_success, true);
704 
705 	blockdev_read(target, rx_buf, offset, block_size, 0);
706 	CU_ASSERT_EQUAL(g_completion_success, true);
707 
708 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
709 	/* Assert the write by comparing it with values read
710 	 * from each blockdev */
711 	CU_ASSERT_EQUAL(rc, 0);
712 }
713 
714 static void
715 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
716 {
717 	struct io_target *target;
718 	struct spdk_bdev *bdev;
719 	char	*tx_buf = NULL;
720 	char	*rx_buf = NULL;
721 	int	data_length;
722 	uint64_t offset;
723 	int pattern;
724 
725 	/* Tests the overflow condition of the blockdevs. */
726 	data_length = 4096;
727 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
728 	pattern = 0xA3;
729 
730 	target = g_current_io_target;
731 	bdev = target->bdev;
732 
733 	/* The start offset has been set to a valid value
734 	 * but offset + nbytes is greater than the Total size
735 	 * of the blockdev. The test should fail. */
736 	offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
737 
738 	initialize_buffer(&tx_buf, pattern, data_length);
739 	initialize_buffer(&rx_buf, 0, data_length);
740 
741 	blockdev_write(target, tx_buf, offset, data_length, 0);
742 	CU_ASSERT_EQUAL(g_completion_success, false);
743 
744 	blockdev_read(target, rx_buf, offset, data_length, 0);
745 	CU_ASSERT_EQUAL(g_completion_success, false);
746 }
747 
748 static void
749 blockdev_write_read_max_offset(void)
750 {
751 	int	data_length;
752 	uint64_t offset;
753 	int pattern;
754 	int expected_rc;
755 
756 	data_length = 4096;
757 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
758 	/* The start offset has been set to UINT64_MAX such that
759 	 * adding nbytes wraps around and points to an invalid address. */
760 	offset = UINT64_MAX;
761 	pattern = 0xA3;
762 	/* Params are invalid, hence the expected return value
763 	 * of write and read for all blockdevs is < 0 */
764 	expected_rc = -1;
765 
766 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
767 }
768 
769 static void
770 blockdev_overlapped_write_read_8k(void)
771 {
772 	int	data_length;
773 	uint64_t offset;
774 	int pattern;
775 	int expected_rc;
776 
777 	/* Data size = 8K */
778 	data_length = 8192;
779 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
780 	offset = 0;
781 	pattern = 0xA3;
782 	/* Params are valid, hence the expected return value
783 	 * of write and read for all blockdevs is 0. */
784 	expected_rc = 0;
785 	/* Assert the write by comparing it with values read
786 	 * from the same offset for each blockdev */
787 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
788 
789 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
790 	 * with the address written above and assert the new value in
791 	 * the overlapped address range */
792 	/* Populate 8k with value 0xBB */
793 	pattern = 0xBB;
794 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
795 	offset = 4096;
796 	/* Assert the write by comparing it with values read
797 	 * from the overlapped offset for each blockdev */
798 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
799 }
800 
801 static void
802 __blockdev_reset(void *arg)
803 {
804 	struct bdevio_request *req = arg;
805 	struct io_target *target = req->target;
806 	int rc;
807 
808 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
809 	if (rc < 0) {
810 		g_completion_success = false;
811 		wake_ut_thread();
812 	}
813 }
814 
815 static void
816 blockdev_test_reset(void)
817 {
818 	struct bdevio_request req;
819 	struct io_target *target;
820 
821 	target = g_current_io_target;
822 	req.target = target;
823 
824 	g_completion_success = false;
825 
826 	execute_spdk_function(__blockdev_reset, &req);
827 
828 	/* Workaround: NVMe-oF target doesn't support reset yet - so for now
829 	 *  don't fail the test if it's an NVMe bdev.
830 	 */
831 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
832 		CU_ASSERT_EQUAL(g_completion_success, true);
833 	}
834 }
835 
836 struct bdevio_passthrough_request {
837 	struct spdk_nvme_cmd cmd;
838 	void *buf;
839 	uint32_t len;
840 	struct io_target *target;
841 	int sct;
842 	int sc;
843 };
844 
845 static void
846 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
847 {
848 	struct bdevio_passthrough_request *pt_req = arg;
849 
850 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->sct, &pt_req->sc);
851 	spdk_bdev_free_io(bdev_io);
852 	wake_ut_thread();
853 }
854 
855 static void
856 __blockdev_nvme_passthru(void *arg)
857 {
858 	struct bdevio_passthrough_request *pt_req = arg;
859 	struct io_target *target = pt_req->target;
860 	int rc;
861 
862 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
863 					&pt_req->cmd, pt_req->buf, pt_req->len,
864 					nvme_pt_test_complete, pt_req);
865 	if (rc) {
866 		wake_ut_thread();
867 	}
868 }
869 
870 static void
871 blockdev_test_nvme_passthru_rw(void)
872 {
873 	struct bdevio_passthrough_request pt_req;
874 	void *write_buf, *read_buf;
875 	struct io_target *target;
876 
877 	target = g_current_io_target;
878 
879 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
880 		return;
881 	}
882 
883 	memset(&pt_req, 0, sizeof(pt_req));
884 	pt_req.target = target;
885 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
886 	pt_req.cmd.nsid = 1;
887 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
888 	pt_req.cmd.cdw12 = 0;
889 
890 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
891 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
892 	memset(write_buf, 0xA5, pt_req.len);
893 	pt_req.buf = write_buf;
894 
895 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
896 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
897 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
898 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
899 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
900 
901 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
902 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
903 	pt_req.buf = read_buf;
904 
905 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
906 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
907 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
908 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
909 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
910 
911 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
912 	spdk_free(read_buf);
913 	spdk_free(write_buf);
914 }
915 
916 static void
917 blockdev_test_nvme_passthru_vendor_specific(void)
918 {
919 	struct bdevio_passthrough_request pt_req;
920 	struct io_target *target;
921 
922 	target = g_current_io_target;
923 
924 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
925 		return;
926 	}
927 
928 	memset(&pt_req, 0, sizeof(pt_req));
929 	pt_req.target = target;
930 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
931 	pt_req.cmd.nsid = 1;
932 
933 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
934 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
935 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
936 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
937 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
938 }
939 
940 static void
941 __blockdev_nvme_admin_passthru(void *arg)
942 {
943 	struct bdevio_passthrough_request *pt_req = arg;
944 	struct io_target *target = pt_req->target;
945 	int rc;
946 
947 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
948 					   &pt_req->cmd, pt_req->buf, pt_req->len,
949 					   nvme_pt_test_complete, pt_req);
950 	if (rc) {
951 		wake_ut_thread();
952 	}
953 }
954 
955 static void
956 blockdev_test_nvme_admin_passthru(void)
957 {
958 	struct io_target *target;
959 	struct bdevio_passthrough_request pt_req;
960 
961 	target = g_current_io_target;
962 
963 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
964 		return;
965 	}
966 
967 	memset(&pt_req, 0, sizeof(pt_req));
968 	pt_req.target = target;
969 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
970 	pt_req.cmd.nsid = 0;
971 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
972 
973 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
974 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
975 
976 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
977 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
978 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
979 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
980 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
981 }
982 
983 static void
984 __stop_init_thread(void *arg)
985 {
986 	unsigned num_failures = g_num_failures;
987 	struct spdk_jsonrpc_request *request = arg;
988 
989 	g_num_failures = 0;
990 
991 	bdevio_cleanup_targets();
992 	if (g_wait_for_tests) {
993 		/* Do not stop the app yet, wait for another RPC */
994 		rpc_perform_tests_cb(num_failures, request);
995 		return;
996 	}
997 	spdk_app_stop(num_failures);
998 }
999 
1000 static void
1001 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1002 {
1003 	g_num_failures = num_failures;
1004 
1005 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1006 }
1007 
1008 static int
1009 suite_init(void)
1010 {
1011 	if (g_current_io_target == NULL) {
1012 		g_current_io_target = g_io_targets;
1013 	}
1014 	return 0;
1015 }
1016 
1017 static int
1018 suite_fini(void)
1019 {
1020 	g_current_io_target = g_current_io_target->next;
1021 	return 0;
1022 }
1023 
1024 #define SUITE_NAME_MAX 64
1025 
1026 static int
1027 __setup_ut_on_single_target(struct io_target *target)
1028 {
1029 	unsigned rc = 0;
1030 	CU_pSuite suite = NULL;
1031 	char name[SUITE_NAME_MAX];
1032 
1033 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1034 	suite = CU_add_suite(name, suite_init, suite_fini);
1035 	if (suite == NULL) {
1036 		CU_cleanup_registry();
1037 		rc = CU_get_error();
1038 		return -rc;
1039 	}
1040 
1041 	if (
1042 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
1043 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
1044 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
1045 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
1046 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
1047 		|| CU_add_test(suite, "blockdev reset",
1048 			       blockdev_test_reset) == NULL
1049 		|| CU_add_test(suite, "blockdev write read 512 bytes",
1050 			       blockdev_write_read_512Bytes) == NULL
1051 		|| CU_add_test(suite, "blockdev write read size > 128k",
1052 			       blockdev_write_read_size_gt_128k) == NULL
1053 		|| CU_add_test(suite, "blockdev write read invalid size",
1054 			       blockdev_write_read_invalid_size) == NULL
1055 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1056 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1057 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1058 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1059 		|| CU_add_test(suite, "blockdev write read max offset",
1060 			       blockdev_write_read_max_offset) == NULL
1061 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
1062 			       blockdev_overlapped_write_read_8k) == NULL
1063 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
1064 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
1065 			       blockdev_writev_readv_30x4k) == NULL
1066 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
1067 			       blockdev_writev_readv_512Bytes) == NULL
1068 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1069 			       blockdev_writev_readv_size_gt_128k) == NULL
1070 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1071 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1072 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1073 			       blockdev_test_nvme_passthru_rw) == NULL
1074 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1075 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1076 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1077 			       blockdev_test_nvme_admin_passthru) == NULL
1078 	) {
1079 		CU_cleanup_registry();
1080 		rc = CU_get_error();
1081 		return -rc;
1082 	}
1083 	return 0;
1084 }
1085 
1086 static void
1087 __run_ut_thread(void *arg)
1088 {
1089 	struct spdk_jsonrpc_request *request = arg;
1090 	int rc = 0;
1091 	struct io_target *target;
1092 	unsigned num_failures;
1093 
1094 	if (CU_initialize_registry() != CUE_SUCCESS) {
1095 		/* CUnit error, probably won't recover */
1096 		rc = CU_get_error();
1097 		stop_init_thread(-rc, request);
1098 	}
1099 
1100 	target = g_io_targets;
1101 	while (target != NULL) {
1102 		rc = __setup_ut_on_single_target(target);
1103 		if (rc < 0) {
1104 			/* CUnit error, probably won't recover */
1105 			stop_init_thread(-rc, request);
1106 		}
1107 		target = target->next;
1108 	}
1109 	CU_basic_set_mode(CU_BRM_VERBOSE);
1110 	CU_basic_run_tests();
1111 	num_failures = CU_get_number_of_failures();
1112 	CU_cleanup_registry();
1113 
1114 	stop_init_thread(num_failures, request);
1115 }
1116 
1117 static void
1118 __construct_targets(void *arg)
1119 {
1120 	if (bdevio_construct_targets() < 0) {
1121 		spdk_app_stop(-1);
1122 		return;
1123 	}
1124 
1125 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1126 }
1127 
1128 static void
1129 test_main(void *arg1)
1130 {
1131 	struct spdk_cpuset *tmpmask, *appmask;
1132 	uint32_t cpu, init_cpu;
1133 
1134 	pthread_mutex_init(&g_test_mutex, NULL);
1135 	pthread_cond_init(&g_test_cond, NULL);
1136 
1137 	tmpmask = spdk_cpuset_alloc();
1138 	if (tmpmask == NULL) {
1139 		spdk_app_stop(-1);
1140 		return;
1141 	}
1142 
1143 	appmask = spdk_app_get_core_mask();
1144 
1145 	if (spdk_cpuset_count(appmask) < 3) {
1146 		spdk_cpuset_free(tmpmask);
1147 		spdk_app_stop(-1);
1148 		return;
1149 	}
1150 
1151 	init_cpu = spdk_env_get_current_core();
1152 	g_thread_init = spdk_get_thread();
1153 
1154 	for (cpu = 0; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) {
1155 		if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) {
1156 			spdk_cpuset_zero(tmpmask);
1157 			spdk_cpuset_set_cpu(tmpmask, cpu, true);
1158 			g_thread_ut = spdk_thread_create("ut_thread", tmpmask);
1159 			break;
1160 		}
1161 	}
1162 
1163 	if (cpu == SPDK_ENV_LCORE_ID_ANY) {
1164 		spdk_cpuset_free(tmpmask);
1165 		spdk_app_stop(-1);
1166 		return;
1167 	}
1168 
1169 	for (cpu++; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) {
1170 		if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) {
1171 			spdk_cpuset_zero(tmpmask);
1172 			spdk_cpuset_set_cpu(tmpmask, cpu, true);
1173 			g_thread_io = spdk_thread_create("io_thread", tmpmask);
1174 			break;
1175 		}
1176 	}
1177 
1178 	if (cpu == SPDK_ENV_LCORE_ID_ANY) {
1179 		spdk_cpuset_free(tmpmask);
1180 		spdk_app_stop(-1);
1181 		return;
1182 	}
1183 
1184 	spdk_cpuset_free(tmpmask);
1185 
1186 	if (g_wait_for_tests) {
1187 		/* Do not perform any tests until RPC is received */
1188 		return;
1189 	}
1190 
1191 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1192 }
1193 
1194 static void
1195 bdevio_usage(void)
1196 {
1197 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1198 }
1199 
1200 static int
1201 bdevio_parse_arg(int ch, char *arg)
1202 {
1203 	switch (ch) {
1204 	case 'w':
1205 		g_wait_for_tests =  true;
1206 		break;
1207 	default:
1208 		return -EINVAL;
1209 	}
1210 	return 0;
1211 }
1212 
1213 struct rpc_perform_tests {
1214 	char *name;
1215 };
1216 
1217 static void
1218 free_rpc_perform_tests(struct rpc_perform_tests *r)
1219 {
1220 	free(r->name);
1221 }
1222 
1223 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1224 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1225 };
1226 
1227 static void
1228 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1229 {
1230 	struct spdk_json_write_ctx *w;
1231 
1232 	if (num_failures == 0) {
1233 		w = spdk_jsonrpc_begin_result(request);
1234 		spdk_json_write_uint32(w, num_failures);
1235 		spdk_jsonrpc_end_result(request, w);
1236 	} else {
1237 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1238 						     "%d test cases failed", num_failures);
1239 	}
1240 }
1241 
1242 static void
1243 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1244 {
1245 	struct rpc_perform_tests req = {NULL};
1246 	struct spdk_bdev *bdev;
1247 	int rc;
1248 
1249 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1250 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1251 					      &req)) {
1252 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1253 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1254 		goto invalid;
1255 	}
1256 
1257 	if (req.name) {
1258 		bdev = spdk_bdev_get_by_name(req.name);
1259 		if (bdev == NULL) {
1260 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1261 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1262 							     "Bdev '%s' does not exist: %s",
1263 							     req.name, spdk_strerror(ENODEV));
1264 			goto invalid;
1265 		}
1266 		rc = bdevio_construct_target(bdev);
1267 		if (rc < 0) {
1268 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1269 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1270 							     "Could not construct target for bdev '%s': %s",
1271 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1272 			goto invalid;
1273 		}
1274 	} else {
1275 		rc = bdevio_construct_targets();
1276 		if (rc < 0) {
1277 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1278 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1279 							     "Could not construct targets for all bdevs: %s",
1280 							     spdk_strerror(-rc));
1281 			goto invalid;
1282 		}
1283 	}
1284 	free_rpc_perform_tests(&req);
1285 
1286 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1287 
1288 	return;
1289 
1290 invalid:
1291 	free_rpc_perform_tests(&req);
1292 }
1293 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1294 
1295 int
1296 main(int argc, char **argv)
1297 {
1298 	int			rc;
1299 	struct spdk_app_opts	opts = {};
1300 
1301 	spdk_app_opts_init(&opts);
1302 	opts.name = "bdevio";
1303 	opts.reactor_mask = "0x7";
1304 
1305 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1306 				      bdevio_parse_arg, bdevio_usage)) !=
1307 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1308 		return rc;
1309 	}
1310 
1311 	rc = spdk_app_start(&opts, test_main, NULL);
1312 	spdk_app_fini();
1313 
1314 	return rc;
1315 }
1316