xref: /spdk/test/bdev/bdevio/bdevio.c (revision 9889ab2dc80e40dae92dcef361d53dcba722043d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/copy_engine.h"
38 #include "spdk/env.h"
39 #include "spdk/log.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
42 #include "spdk/rpc.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "CUnit/Basic.h"
47 
48 #define BUFFER_IOVS		1024
49 #define BUFFER_SIZE		260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE	2048
51 
52 pthread_mutex_t g_test_mutex;
53 pthread_cond_t g_test_cond;
54 
55 static struct spdk_thread *g_thread_init;
56 static struct spdk_thread *g_thread_ut;
57 static struct spdk_thread *g_thread_io;
58 static bool g_wait_for_tests = false;
59 static int g_num_failures = 0;
60 
61 struct io_target {
62 	struct spdk_bdev	*bdev;
63 	struct spdk_bdev_desc	*bdev_desc;
64 	struct spdk_io_channel	*ch;
65 	struct io_target	*next;
66 };
67 
68 struct bdevio_request {
69 	char *buf;
70 	int data_len;
71 	uint64_t offset;
72 	struct iovec iov[BUFFER_IOVS];
73 	int iovcnt;
74 	struct io_target *target;
75 };
76 
77 struct io_target *g_io_targets = NULL;
78 struct io_target *g_current_io_target = NULL;
79 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
80 
81 static void
82 execute_spdk_function(spdk_msg_fn fn, void *arg)
83 {
84 	pthread_mutex_lock(&g_test_mutex);
85 	spdk_thread_send_msg(g_thread_io, fn, arg);
86 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
87 	pthread_mutex_unlock(&g_test_mutex);
88 }
89 
90 static void
91 wake_ut_thread(void)
92 {
93 	pthread_mutex_lock(&g_test_mutex);
94 	pthread_cond_signal(&g_test_cond);
95 	pthread_mutex_unlock(&g_test_mutex);
96 }
97 
98 static void
99 __get_io_channel(void *arg)
100 {
101 	struct io_target *target = arg;
102 
103 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
104 	assert(target->ch);
105 	wake_ut_thread();
106 }
107 
108 static int
109 bdevio_construct_target(struct spdk_bdev *bdev)
110 {
111 	struct io_target *target;
112 	int rc;
113 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
114 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
115 
116 	target = malloc(sizeof(struct io_target));
117 	if (target == NULL) {
118 		return -ENOMEM;
119 	}
120 
121 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
122 	if (rc != 0) {
123 		free(target);
124 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
125 		return rc;
126 	}
127 
128 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
129 	       spdk_bdev_get_name(bdev),
130 	       num_blocks, block_size,
131 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
132 
133 	target->bdev = bdev;
134 	target->next = g_io_targets;
135 	execute_spdk_function(__get_io_channel, target);
136 	g_io_targets = target;
137 
138 	return 0;
139 }
140 
141 static int
142 bdevio_construct_targets(void)
143 {
144 	struct spdk_bdev *bdev;
145 	int rc;
146 
147 	printf("I/O targets:\n");
148 
149 	bdev = spdk_bdev_first_leaf();
150 	while (bdev != NULL) {
151 		rc = bdevio_construct_target(bdev);
152 		if (rc < 0) {
153 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
154 			return rc;
155 		}
156 		bdev = spdk_bdev_next_leaf(bdev);
157 	}
158 
159 	if (g_io_targets == NULL) {
160 		SPDK_ERRLOG("No bdevs to perform tests on\n");
161 		return -1;
162 	}
163 
164 	return 0;
165 }
166 
167 static void
168 __put_io_channel(void *arg)
169 {
170 	struct io_target *target = arg;
171 
172 	spdk_put_io_channel(target->ch);
173 	wake_ut_thread();
174 }
175 
176 static void
177 bdevio_cleanup_targets(void)
178 {
179 	struct io_target *target;
180 
181 	target = g_io_targets;
182 	while (target != NULL) {
183 		execute_spdk_function(__put_io_channel, target);
184 		spdk_bdev_close(target->bdev_desc);
185 		g_io_targets = target->next;
186 		free(target);
187 		target = g_io_targets;
188 	}
189 }
190 
191 static bool g_completion_success;
192 
193 static void
194 initialize_buffer(char **buf, int pattern, int size)
195 {
196 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
197 	memset(*buf, pattern, size);
198 }
199 
200 static void
201 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
202 {
203 	g_completion_success = success;
204 	spdk_bdev_free_io(bdev_io);
205 	wake_ut_thread();
206 }
207 
208 static void
209 __blockdev_write(void *arg)
210 {
211 	struct bdevio_request *req = arg;
212 	struct io_target *target = req->target;
213 	int rc;
214 
215 	if (req->iovcnt) {
216 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
217 				      req->data_len, quick_test_complete, NULL);
218 	} else {
219 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
220 				     req->data_len, quick_test_complete, NULL);
221 	}
222 
223 	if (rc) {
224 		g_completion_success = false;
225 		wake_ut_thread();
226 	}
227 }
228 
229 static void
230 __blockdev_write_zeroes(void *arg)
231 {
232 	struct bdevio_request *req = arg;
233 	struct io_target *target = req->target;
234 	int rc;
235 
236 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
237 				    req->data_len, quick_test_complete, NULL);
238 	if (rc) {
239 		g_completion_success = false;
240 		wake_ut_thread();
241 	}
242 }
243 
244 static void
245 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
246 {
247 	int data_len = req->data_len;
248 	char *buf = req->buf;
249 
250 	req->iovcnt = 0;
251 	if (!iov_len) {
252 		return;
253 	}
254 
255 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
256 		if (data_len < iov_len) {
257 			iov_len = data_len;
258 		}
259 
260 		req->iov[req->iovcnt].iov_base = buf;
261 		req->iov[req->iovcnt].iov_len = iov_len;
262 
263 		buf += iov_len;
264 		data_len -= iov_len;
265 	}
266 
267 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
268 }
269 
270 static void
271 blockdev_write(struct io_target *target, char *tx_buf,
272 	       uint64_t offset, int data_len, int iov_len)
273 {
274 	struct bdevio_request req;
275 
276 	req.target = target;
277 	req.buf = tx_buf;
278 	req.data_len = data_len;
279 	req.offset = offset;
280 	sgl_chop_buffer(&req, iov_len);
281 
282 	g_completion_success = false;
283 
284 	execute_spdk_function(__blockdev_write, &req);
285 }
286 
287 static void
288 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
289 		      uint64_t offset, int data_len)
290 {
291 	struct bdevio_request req;
292 
293 	req.target = target;
294 	req.buf = tx_buf;
295 	req.data_len = data_len;
296 	req.offset = offset;
297 
298 	g_completion_success = false;
299 
300 	execute_spdk_function(__blockdev_write_zeroes, &req);
301 }
302 
303 static void
304 __blockdev_read(void *arg)
305 {
306 	struct bdevio_request *req = arg;
307 	struct io_target *target = req->target;
308 	int rc;
309 
310 	if (req->iovcnt) {
311 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
312 				     req->data_len, quick_test_complete, NULL);
313 	} else {
314 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
315 				    req->data_len, quick_test_complete, NULL);
316 	}
317 
318 	if (rc) {
319 		g_completion_success = false;
320 		wake_ut_thread();
321 	}
322 }
323 
324 static void
325 blockdev_read(struct io_target *target, char *rx_buf,
326 	      uint64_t offset, int data_len, int iov_len)
327 {
328 	struct bdevio_request req;
329 
330 	req.target = target;
331 	req.buf = rx_buf;
332 	req.data_len = data_len;
333 	req.offset = offset;
334 	req.iovcnt = 0;
335 	sgl_chop_buffer(&req, iov_len);
336 
337 	g_completion_success = false;
338 
339 	execute_spdk_function(__blockdev_read, &req);
340 }
341 
342 static int
343 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
344 {
345 	int rc;
346 	rc = memcmp(rx_buf, tx_buf, data_length);
347 
348 	spdk_free(rx_buf);
349 	spdk_free(tx_buf);
350 
351 	return rc;
352 }
353 
354 static void
355 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
356 		    int expected_rc, bool write_zeroes)
357 {
358 	struct io_target *target;
359 	char	*tx_buf = NULL;
360 	char	*rx_buf = NULL;
361 	int	rc;
362 
363 	target = g_current_io_target;
364 
365 	if (data_length < spdk_bdev_get_block_size(target->bdev) ||
366 	    data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) {
367 		return;
368 	}
369 
370 	if (!write_zeroes) {
371 		initialize_buffer(&tx_buf, pattern, data_length);
372 		initialize_buffer(&rx_buf, 0, data_length);
373 
374 		blockdev_write(target, tx_buf, offset, data_length, iov_len);
375 	} else {
376 		initialize_buffer(&tx_buf, 0, data_length);
377 		initialize_buffer(&rx_buf, pattern, data_length);
378 
379 		blockdev_write_zeroes(target, tx_buf, offset, data_length);
380 	}
381 
382 
383 	if (expected_rc == 0) {
384 		CU_ASSERT_EQUAL(g_completion_success, true);
385 	} else {
386 		CU_ASSERT_EQUAL(g_completion_success, false);
387 	}
388 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
389 
390 	if (expected_rc == 0) {
391 		CU_ASSERT_EQUAL(g_completion_success, true);
392 	} else {
393 		CU_ASSERT_EQUAL(g_completion_success, false);
394 	}
395 
396 	if (g_completion_success) {
397 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
398 		/* Assert the write by comparing it with values read
399 		 * from each blockdev */
400 		CU_ASSERT_EQUAL(rc, 0);
401 	}
402 }
403 
404 static void
405 blockdev_write_read_4k(void)
406 {
407 	uint32_t data_length;
408 	uint64_t offset;
409 	int pattern;
410 	int expected_rc;
411 
412 	/* Data size = 4K */
413 	data_length = 4096;
414 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
415 	offset = 0;
416 	pattern = 0xA3;
417 	/* Params are valid, hence the expected return value
418 	 * of write and read for all blockdevs is 0. */
419 	expected_rc = 0;
420 
421 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
422 }
423 
424 static void
425 blockdev_write_zeroes_read_4k(void)
426 {
427 	uint32_t data_length;
428 	uint64_t offset;
429 	int pattern;
430 	int expected_rc;
431 
432 	/* Data size = 4K */
433 	data_length = 4096;
434 	offset = 0;
435 	pattern = 0xA3;
436 	/* Params are valid, hence the expected return value
437 	 * of write_zeroes and read for all blockdevs is 0. */
438 	expected_rc = 0;
439 
440 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
441 }
442 
443 /*
444  * This i/o will not have to split at the bdev layer.
445  */
446 static void
447 blockdev_write_zeroes_read_1m(void)
448 {
449 	uint32_t data_length;
450 	uint64_t offset;
451 	int pattern;
452 	int expected_rc;
453 
454 	/* Data size = 1M */
455 	data_length = 1048576;
456 	offset = 0;
457 	pattern = 0xA3;
458 	/* Params are valid, hence the expected return value
459 	 * of write_zeroes and read for all blockdevs is 0. */
460 	expected_rc = 0;
461 
462 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
463 }
464 
465 /*
466  * This i/o will have to split at the bdev layer if
467  * write-zeroes is not supported by the bdev.
468  */
469 static void
470 blockdev_write_zeroes_read_3m(void)
471 {
472 	uint32_t data_length;
473 	uint64_t offset;
474 	int pattern;
475 	int expected_rc;
476 
477 	/* Data size = 3M */
478 	data_length = 3145728;
479 	offset = 0;
480 	pattern = 0xA3;
481 	/* Params are valid, hence the expected return value
482 	 * of write_zeroes and read for all blockdevs is 0. */
483 	expected_rc = 0;
484 
485 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
486 }
487 
488 /*
489  * This i/o will have to split at the bdev layer if
490  * write-zeroes is not supported by the bdev. It also
491  * tests a write size that is not an even multiple of
492  * the bdev layer zero buffer size.
493  */
494 static void
495 blockdev_write_zeroes_read_3m_500k(void)
496 {
497 	uint32_t data_length;
498 	uint64_t offset;
499 	int pattern;
500 	int expected_rc;
501 
502 	/* Data size = 3.5M */
503 	data_length = 3670016;
504 	offset = 0;
505 	pattern = 0xA3;
506 	/* Params are valid, hence the expected return value
507 	 * of write_zeroes and read for all blockdevs is 0. */
508 	expected_rc = 0;
509 
510 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
511 }
512 
513 static void
514 blockdev_writev_readv_4k(void)
515 {
516 	uint32_t data_length, iov_len;
517 	uint64_t offset;
518 	int pattern;
519 	int expected_rc;
520 
521 	/* Data size = 4K */
522 	data_length = 4096;
523 	iov_len = 4096;
524 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
525 	offset = 0;
526 	pattern = 0xA3;
527 	/* Params are valid, hence the expected return value
528 	 * of write and read for all blockdevs is 0. */
529 	expected_rc = 0;
530 
531 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
532 }
533 
534 static void
535 blockdev_writev_readv_30x4k(void)
536 {
537 	uint32_t data_length, iov_len;
538 	uint64_t offset;
539 	int pattern;
540 	int expected_rc;
541 
542 	/* Data size = 4K */
543 	data_length = 4096 * 30;
544 	iov_len = 4096;
545 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
546 	offset = 0;
547 	pattern = 0xA3;
548 	/* Params are valid, hence the expected return value
549 	 * of write and read for all blockdevs is 0. */
550 	expected_rc = 0;
551 
552 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
553 }
554 
555 static void
556 blockdev_write_read_512Bytes(void)
557 {
558 	uint32_t data_length;
559 	uint64_t offset;
560 	int pattern;
561 	int expected_rc;
562 
563 	/* Data size = 512 */
564 	data_length = 512;
565 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
566 	offset = 8192;
567 	pattern = 0xA3;
568 	/* Params are valid, hence the expected return value
569 	 * of write and read for all blockdevs is 0. */
570 	expected_rc = 0;
571 
572 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
573 }
574 
575 static void
576 blockdev_writev_readv_512Bytes(void)
577 {
578 	uint32_t data_length, iov_len;
579 	uint64_t offset;
580 	int pattern;
581 	int expected_rc;
582 
583 	/* Data size = 512 */
584 	data_length = 512;
585 	iov_len = 512;
586 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
587 	offset = 8192;
588 	pattern = 0xA3;
589 	/* Params are valid, hence the expected return value
590 	 * of write and read for all blockdevs is 0. */
591 	expected_rc = 0;
592 
593 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
594 }
595 
596 static void
597 blockdev_write_read_size_gt_128k(void)
598 {
599 	uint32_t data_length;
600 	uint64_t offset;
601 	int pattern;
602 	int expected_rc;
603 
604 	/* Data size = 132K */
605 	data_length = 135168;
606 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
607 	offset = 8192;
608 	pattern = 0xA3;
609 	/* Params are valid, hence the expected return value
610 	 * of write and read for all blockdevs is 0. */
611 	expected_rc = 0;
612 
613 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
614 }
615 
616 static void
617 blockdev_writev_readv_size_gt_128k(void)
618 {
619 	uint32_t data_length, iov_len;
620 	uint64_t offset;
621 	int pattern;
622 	int expected_rc;
623 
624 	/* Data size = 132K */
625 	data_length = 135168;
626 	iov_len = 135168;
627 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
628 	offset = 8192;
629 	pattern = 0xA3;
630 	/* Params are valid, hence the expected return value
631 	 * of write and read for all blockdevs is 0. */
632 	expected_rc = 0;
633 
634 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
635 }
636 
637 static void
638 blockdev_writev_readv_size_gt_128k_two_iov(void)
639 {
640 	uint32_t data_length, iov_len;
641 	uint64_t offset;
642 	int pattern;
643 	int expected_rc;
644 
645 	/* Data size = 132K */
646 	data_length = 135168;
647 	iov_len = 128 * 1024;
648 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
649 	offset = 8192;
650 	pattern = 0xA3;
651 	/* Params are valid, hence the expected return value
652 	 * of write and read for all blockdevs is 0. */
653 	expected_rc = 0;
654 
655 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
656 }
657 
658 static void
659 blockdev_write_read_invalid_size(void)
660 {
661 	uint32_t data_length;
662 	uint64_t offset;
663 	int pattern;
664 	int expected_rc;
665 
666 	/* Data size is not a multiple of the block size */
667 	data_length = 0x1015;
668 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
669 	offset = 8192;
670 	pattern = 0xA3;
671 	/* Params are invalid, hence the expected return value
672 	 * of write and read for all blockdevs is < 0 */
673 	expected_rc = -1;
674 
675 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
676 }
677 
678 static void
679 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
680 {
681 	struct io_target *target;
682 	struct spdk_bdev *bdev;
683 	char	*tx_buf = NULL;
684 	char	*rx_buf = NULL;
685 	uint64_t offset;
686 	uint32_t block_size;
687 	int rc;
688 
689 	target = g_current_io_target;
690 	bdev = target->bdev;
691 
692 	block_size = spdk_bdev_get_block_size(bdev);
693 
694 	/* The start offset has been set to a marginal value
695 	 * such that offset + nbytes == Total size of
696 	 * blockdev. */
697 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
698 
699 	initialize_buffer(&tx_buf, 0xA3, block_size);
700 	initialize_buffer(&rx_buf, 0, block_size);
701 
702 	blockdev_write(target, tx_buf, offset, block_size, 0);
703 	CU_ASSERT_EQUAL(g_completion_success, true);
704 
705 	blockdev_read(target, rx_buf, offset, block_size, 0);
706 	CU_ASSERT_EQUAL(g_completion_success, true);
707 
708 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
709 	/* Assert the write by comparing it with values read
710 	 * from each blockdev */
711 	CU_ASSERT_EQUAL(rc, 0);
712 }
713 
714 static void
715 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
716 {
717 	struct io_target *target;
718 	struct spdk_bdev *bdev;
719 	char	*tx_buf = NULL;
720 	char	*rx_buf = NULL;
721 	int	data_length;
722 	uint64_t offset;
723 	int pattern;
724 
725 	/* Tests the overflow condition of the blockdevs. */
726 	data_length = 4096;
727 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
728 	pattern = 0xA3;
729 
730 	target = g_current_io_target;
731 	bdev = target->bdev;
732 
733 	/* The start offset has been set to a valid value
734 	 * but offset + nbytes is greater than the Total size
735 	 * of the blockdev. The test should fail. */
736 	offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
737 
738 	initialize_buffer(&tx_buf, pattern, data_length);
739 	initialize_buffer(&rx_buf, 0, data_length);
740 
741 	blockdev_write(target, tx_buf, offset, data_length, 0);
742 	CU_ASSERT_EQUAL(g_completion_success, false);
743 
744 	blockdev_read(target, rx_buf, offset, data_length, 0);
745 	CU_ASSERT_EQUAL(g_completion_success, false);
746 }
747 
748 static void
749 blockdev_write_read_max_offset(void)
750 {
751 	int	data_length;
752 	uint64_t offset;
753 	int pattern;
754 	int expected_rc;
755 
756 	data_length = 4096;
757 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
758 	/* The start offset has been set to UINT64_MAX such that
759 	 * adding nbytes wraps around and points to an invalid address. */
760 	offset = UINT64_MAX;
761 	pattern = 0xA3;
762 	/* Params are invalid, hence the expected return value
763 	 * of write and read for all blockdevs is < 0 */
764 	expected_rc = -1;
765 
766 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
767 }
768 
769 static void
770 blockdev_overlapped_write_read_8k(void)
771 {
772 	int	data_length;
773 	uint64_t offset;
774 	int pattern;
775 	int expected_rc;
776 
777 	/* Data size = 8K */
778 	data_length = 8192;
779 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
780 	offset = 0;
781 	pattern = 0xA3;
782 	/* Params are valid, hence the expected return value
783 	 * of write and read for all blockdevs is 0. */
784 	expected_rc = 0;
785 	/* Assert the write by comparing it with values read
786 	 * from the same offset for each blockdev */
787 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
788 
789 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
790 	 * with the address written above and assert the new value in
791 	 * the overlapped address range */
792 	/* Populate 8k with value 0xBB */
793 	pattern = 0xBB;
794 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
795 	offset = 4096;
796 	/* Assert the write by comparing it with values read
797 	 * from the overlapped offset for each blockdev */
798 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
799 }
800 
801 static void
802 __blockdev_reset(void *arg)
803 {
804 	struct bdevio_request *req = arg;
805 	struct io_target *target = req->target;
806 	int rc;
807 
808 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
809 	if (rc < 0) {
810 		g_completion_success = false;
811 		wake_ut_thread();
812 	}
813 }
814 
815 static void
816 blockdev_test_reset(void)
817 {
818 	struct bdevio_request req;
819 	struct io_target *target;
820 
821 	target = g_current_io_target;
822 	req.target = target;
823 
824 	g_completion_success = false;
825 
826 	execute_spdk_function(__blockdev_reset, &req);
827 
828 	/* Workaround: NVMe-oF target doesn't support reset yet - so for now
829 	 *  don't fail the test if it's an NVMe bdev.
830 	 */
831 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
832 		CU_ASSERT_EQUAL(g_completion_success, true);
833 	}
834 }
835 
836 struct bdevio_passthrough_request {
837 	struct spdk_nvme_cmd cmd;
838 	void *buf;
839 	uint32_t len;
840 	struct io_target *target;
841 	int sct;
842 	int sc;
843 	uint32_t cdw0;
844 };
845 
846 static void
847 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
848 {
849 	struct bdevio_passthrough_request *pt_req = arg;
850 
851 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
852 	spdk_bdev_free_io(bdev_io);
853 	wake_ut_thread();
854 }
855 
856 static void
857 __blockdev_nvme_passthru(void *arg)
858 {
859 	struct bdevio_passthrough_request *pt_req = arg;
860 	struct io_target *target = pt_req->target;
861 	int rc;
862 
863 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
864 					&pt_req->cmd, pt_req->buf, pt_req->len,
865 					nvme_pt_test_complete, pt_req);
866 	if (rc) {
867 		wake_ut_thread();
868 	}
869 }
870 
871 static void
872 blockdev_test_nvme_passthru_rw(void)
873 {
874 	struct bdevio_passthrough_request pt_req;
875 	void *write_buf, *read_buf;
876 	struct io_target *target;
877 
878 	target = g_current_io_target;
879 
880 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
881 		return;
882 	}
883 
884 	memset(&pt_req, 0, sizeof(pt_req));
885 	pt_req.target = target;
886 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
887 	pt_req.cmd.nsid = 1;
888 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
889 	pt_req.cmd.cdw12 = 0;
890 
891 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
892 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
893 	memset(write_buf, 0xA5, pt_req.len);
894 	pt_req.buf = write_buf;
895 
896 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
897 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
898 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
899 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
900 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
901 
902 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
903 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
904 	pt_req.buf = read_buf;
905 
906 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
907 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
908 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
909 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
910 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
911 
912 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
913 	spdk_free(read_buf);
914 	spdk_free(write_buf);
915 }
916 
917 static void
918 blockdev_test_nvme_passthru_vendor_specific(void)
919 {
920 	struct bdevio_passthrough_request pt_req;
921 	struct io_target *target;
922 
923 	target = g_current_io_target;
924 
925 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
926 		return;
927 	}
928 
929 	memset(&pt_req, 0, sizeof(pt_req));
930 	pt_req.target = target;
931 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
932 	pt_req.cmd.nsid = 1;
933 
934 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
935 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
936 	pt_req.cdw0 = 0xbeef;
937 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
938 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
939 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
940 	CU_ASSERT(pt_req.cdw0 == 0x0);
941 }
942 
943 static void
944 __blockdev_nvme_admin_passthru(void *arg)
945 {
946 	struct bdevio_passthrough_request *pt_req = arg;
947 	struct io_target *target = pt_req->target;
948 	int rc;
949 
950 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
951 					   &pt_req->cmd, pt_req->buf, pt_req->len,
952 					   nvme_pt_test_complete, pt_req);
953 	if (rc) {
954 		wake_ut_thread();
955 	}
956 }
957 
958 static void
959 blockdev_test_nvme_admin_passthru(void)
960 {
961 	struct io_target *target;
962 	struct bdevio_passthrough_request pt_req;
963 
964 	target = g_current_io_target;
965 
966 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
967 		return;
968 	}
969 
970 	memset(&pt_req, 0, sizeof(pt_req));
971 	pt_req.target = target;
972 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
973 	pt_req.cmd.nsid = 0;
974 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
975 
976 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
977 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
978 
979 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
980 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
981 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
982 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
983 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
984 }
985 
986 static void
987 __stop_init_thread(void *arg)
988 {
989 	unsigned num_failures = g_num_failures;
990 	struct spdk_jsonrpc_request *request = arg;
991 
992 	g_num_failures = 0;
993 
994 	bdevio_cleanup_targets();
995 	if (g_wait_for_tests) {
996 		/* Do not stop the app yet, wait for another RPC */
997 		rpc_perform_tests_cb(num_failures, request);
998 		return;
999 	}
1000 	spdk_app_stop(num_failures);
1001 }
1002 
1003 static void
1004 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1005 {
1006 	g_num_failures = num_failures;
1007 
1008 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1009 }
1010 
1011 static int
1012 suite_init(void)
1013 {
1014 	if (g_current_io_target == NULL) {
1015 		g_current_io_target = g_io_targets;
1016 	}
1017 	return 0;
1018 }
1019 
1020 static int
1021 suite_fini(void)
1022 {
1023 	g_current_io_target = g_current_io_target->next;
1024 	return 0;
1025 }
1026 
1027 #define SUITE_NAME_MAX 64
1028 
1029 static int
1030 __setup_ut_on_single_target(struct io_target *target)
1031 {
1032 	unsigned rc = 0;
1033 	CU_pSuite suite = NULL;
1034 	char name[SUITE_NAME_MAX];
1035 
1036 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1037 	suite = CU_add_suite(name, suite_init, suite_fini);
1038 	if (suite == NULL) {
1039 		CU_cleanup_registry();
1040 		rc = CU_get_error();
1041 		return -rc;
1042 	}
1043 
1044 	if (
1045 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
1046 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
1047 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
1048 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
1049 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
1050 		|| CU_add_test(suite, "blockdev reset",
1051 			       blockdev_test_reset) == NULL
1052 		|| CU_add_test(suite, "blockdev write read 512 bytes",
1053 			       blockdev_write_read_512Bytes) == NULL
1054 		|| CU_add_test(suite, "blockdev write read size > 128k",
1055 			       blockdev_write_read_size_gt_128k) == NULL
1056 		|| CU_add_test(suite, "blockdev write read invalid size",
1057 			       blockdev_write_read_invalid_size) == NULL
1058 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1059 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1060 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1061 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1062 		|| CU_add_test(suite, "blockdev write read max offset",
1063 			       blockdev_write_read_max_offset) == NULL
1064 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
1065 			       blockdev_overlapped_write_read_8k) == NULL
1066 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
1067 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
1068 			       blockdev_writev_readv_30x4k) == NULL
1069 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
1070 			       blockdev_writev_readv_512Bytes) == NULL
1071 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1072 			       blockdev_writev_readv_size_gt_128k) == NULL
1073 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1074 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1075 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1076 			       blockdev_test_nvme_passthru_rw) == NULL
1077 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1078 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1079 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1080 			       blockdev_test_nvme_admin_passthru) == NULL
1081 	) {
1082 		CU_cleanup_registry();
1083 		rc = CU_get_error();
1084 		return -rc;
1085 	}
1086 	return 0;
1087 }
1088 
1089 static void
1090 __run_ut_thread(void *arg)
1091 {
1092 	struct spdk_jsonrpc_request *request = arg;
1093 	int rc = 0;
1094 	struct io_target *target;
1095 	unsigned num_failures;
1096 
1097 	if (CU_initialize_registry() != CUE_SUCCESS) {
1098 		/* CUnit error, probably won't recover */
1099 		rc = CU_get_error();
1100 		stop_init_thread(-rc, request);
1101 	}
1102 
1103 	target = g_io_targets;
1104 	while (target != NULL) {
1105 		rc = __setup_ut_on_single_target(target);
1106 		if (rc < 0) {
1107 			/* CUnit error, probably won't recover */
1108 			stop_init_thread(-rc, request);
1109 		}
1110 		target = target->next;
1111 	}
1112 	CU_basic_set_mode(CU_BRM_VERBOSE);
1113 	CU_basic_run_tests();
1114 	num_failures = CU_get_number_of_failures();
1115 	CU_cleanup_registry();
1116 
1117 	stop_init_thread(num_failures, request);
1118 }
1119 
1120 static void
1121 __construct_targets(void *arg)
1122 {
1123 	if (bdevio_construct_targets() < 0) {
1124 		spdk_app_stop(-1);
1125 		return;
1126 	}
1127 
1128 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1129 }
1130 
1131 static void
1132 test_main(void *arg1)
1133 {
1134 	struct spdk_cpuset *tmpmask, *appmask;
1135 	uint32_t cpu, init_cpu;
1136 
1137 	pthread_mutex_init(&g_test_mutex, NULL);
1138 	pthread_cond_init(&g_test_cond, NULL);
1139 
1140 	tmpmask = spdk_cpuset_alloc();
1141 	if (tmpmask == NULL) {
1142 		spdk_app_stop(-1);
1143 		return;
1144 	}
1145 
1146 	appmask = spdk_app_get_core_mask();
1147 
1148 	if (spdk_cpuset_count(appmask) < 3) {
1149 		spdk_cpuset_free(tmpmask);
1150 		spdk_app_stop(-1);
1151 		return;
1152 	}
1153 
1154 	init_cpu = spdk_env_get_current_core();
1155 	g_thread_init = spdk_get_thread();
1156 
1157 	for (cpu = 0; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) {
1158 		if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) {
1159 			spdk_cpuset_zero(tmpmask);
1160 			spdk_cpuset_set_cpu(tmpmask, cpu, true);
1161 			g_thread_ut = spdk_thread_create("ut_thread", tmpmask);
1162 			break;
1163 		}
1164 	}
1165 
1166 	if (cpu == SPDK_ENV_LCORE_ID_ANY) {
1167 		spdk_cpuset_free(tmpmask);
1168 		spdk_app_stop(-1);
1169 		return;
1170 	}
1171 
1172 	for (cpu++; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) {
1173 		if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) {
1174 			spdk_cpuset_zero(tmpmask);
1175 			spdk_cpuset_set_cpu(tmpmask, cpu, true);
1176 			g_thread_io = spdk_thread_create("io_thread", tmpmask);
1177 			break;
1178 		}
1179 	}
1180 
1181 	if (cpu == SPDK_ENV_LCORE_ID_ANY) {
1182 		spdk_cpuset_free(tmpmask);
1183 		spdk_app_stop(-1);
1184 		return;
1185 	}
1186 
1187 	spdk_cpuset_free(tmpmask);
1188 
1189 	if (g_wait_for_tests) {
1190 		/* Do not perform any tests until RPC is received */
1191 		return;
1192 	}
1193 
1194 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1195 }
1196 
1197 static void
1198 bdevio_usage(void)
1199 {
1200 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1201 }
1202 
1203 static int
1204 bdevio_parse_arg(int ch, char *arg)
1205 {
1206 	switch (ch) {
1207 	case 'w':
1208 		g_wait_for_tests =  true;
1209 		break;
1210 	default:
1211 		return -EINVAL;
1212 	}
1213 	return 0;
1214 }
1215 
1216 struct rpc_perform_tests {
1217 	char *name;
1218 };
1219 
1220 static void
1221 free_rpc_perform_tests(struct rpc_perform_tests *r)
1222 {
1223 	free(r->name);
1224 }
1225 
1226 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1227 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1228 };
1229 
1230 static void
1231 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1232 {
1233 	struct spdk_json_write_ctx *w;
1234 
1235 	if (num_failures == 0) {
1236 		w = spdk_jsonrpc_begin_result(request);
1237 		spdk_json_write_uint32(w, num_failures);
1238 		spdk_jsonrpc_end_result(request, w);
1239 	} else {
1240 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1241 						     "%d test cases failed", num_failures);
1242 	}
1243 }
1244 
1245 static void
1246 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1247 {
1248 	struct rpc_perform_tests req = {NULL};
1249 	struct spdk_bdev *bdev;
1250 	int rc;
1251 
1252 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1253 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1254 					      &req)) {
1255 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1256 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1257 		goto invalid;
1258 	}
1259 
1260 	if (req.name) {
1261 		bdev = spdk_bdev_get_by_name(req.name);
1262 		if (bdev == NULL) {
1263 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1264 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1265 							     "Bdev '%s' does not exist: %s",
1266 							     req.name, spdk_strerror(ENODEV));
1267 			goto invalid;
1268 		}
1269 		rc = bdevio_construct_target(bdev);
1270 		if (rc < 0) {
1271 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1272 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1273 							     "Could not construct target for bdev '%s': %s",
1274 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1275 			goto invalid;
1276 		}
1277 	} else {
1278 		rc = bdevio_construct_targets();
1279 		if (rc < 0) {
1280 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1281 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1282 							     "Could not construct targets for all bdevs: %s",
1283 							     spdk_strerror(-rc));
1284 			goto invalid;
1285 		}
1286 	}
1287 	free_rpc_perform_tests(&req);
1288 
1289 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1290 
1291 	return;
1292 
1293 invalid:
1294 	free_rpc_perform_tests(&req);
1295 }
1296 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1297 
1298 int
1299 main(int argc, char **argv)
1300 {
1301 	int			rc;
1302 	struct spdk_app_opts	opts = {};
1303 
1304 	spdk_app_opts_init(&opts);
1305 	opts.name = "bdevio";
1306 	opts.reactor_mask = "0x7";
1307 
1308 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1309 				      bdevio_parse_arg, bdevio_usage)) !=
1310 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1311 		return rc;
1312 	}
1313 
1314 	rc = spdk_app_start(&opts, test_main, NULL);
1315 	spdk_app_fini();
1316 
1317 	return rc;
1318 }
1319