xref: /spdk/test/bdev/bdevio/bdevio.c (revision ae7b5890ef728af40bd233a5011b924c482603bf)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/copy_engine.h"
38 #include "spdk/env.h"
39 #include "spdk/log.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
42 #include "spdk/rpc.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "CUnit/Basic.h"
47 
48 #define BUFFER_IOVS		1024
49 #define BUFFER_SIZE		260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE	2048
51 
52 pthread_mutex_t g_test_mutex;
53 pthread_cond_t g_test_cond;
54 
55 static uint32_t g_lcore_id_init;
56 static uint32_t g_lcore_id_ut;
57 static uint32_t g_lcore_id_io;
58 static bool g_wait_for_tests = false;
59 
60 struct io_target {
61 	struct spdk_bdev	*bdev;
62 	struct spdk_bdev_desc	*bdev_desc;
63 	struct spdk_io_channel	*ch;
64 	struct io_target	*next;
65 };
66 
67 struct bdevio_request {
68 	char *buf;
69 	int data_len;
70 	uint64_t offset;
71 	struct iovec iov[BUFFER_IOVS];
72 	int iovcnt;
73 	struct io_target *target;
74 };
75 
76 struct io_target *g_io_targets = NULL;
77 struct io_target *g_current_io_target = NULL;
78 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
79 
80 static void
81 execute_spdk_function(spdk_event_fn fn, void *arg1, void *arg2)
82 {
83 	struct spdk_event *event;
84 
85 	event = spdk_event_allocate(g_lcore_id_io, fn, arg1, arg2);
86 	pthread_mutex_lock(&g_test_mutex);
87 	spdk_event_call(event);
88 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
89 	pthread_mutex_unlock(&g_test_mutex);
90 }
91 
92 static void
93 wake_ut_thread(void)
94 {
95 	pthread_mutex_lock(&g_test_mutex);
96 	pthread_cond_signal(&g_test_cond);
97 	pthread_mutex_unlock(&g_test_mutex);
98 }
99 
100 static void
101 __get_io_channel(void *arg1, void *arg2)
102 {
103 	struct io_target *target = arg1;
104 
105 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
106 	assert(target->ch);
107 	wake_ut_thread();
108 }
109 
110 static int
111 bdevio_construct_target(struct spdk_bdev *bdev)
112 {
113 	struct io_target *target;
114 	int rc;
115 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
116 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
117 
118 	target = malloc(sizeof(struct io_target));
119 	if (target == NULL) {
120 		return -ENOMEM;
121 	}
122 
123 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
124 	if (rc != 0) {
125 		free(target);
126 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
127 		return rc;
128 	}
129 
130 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
131 	       spdk_bdev_get_name(bdev),
132 	       num_blocks, block_size,
133 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
134 
135 	target->bdev = bdev;
136 	target->next = g_io_targets;
137 	execute_spdk_function(__get_io_channel, target, NULL);
138 	g_io_targets = target;
139 
140 	return 0;
141 }
142 
143 static int
144 bdevio_construct_targets(void)
145 {
146 	struct spdk_bdev *bdev;
147 	int rc;
148 
149 	printf("I/O targets:\n");
150 
151 	bdev = spdk_bdev_first_leaf();
152 	while (bdev != NULL) {
153 		rc = bdevio_construct_target(bdev);
154 		if (rc < 0) {
155 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
156 			return rc;
157 		}
158 		bdev = spdk_bdev_next_leaf(bdev);
159 	}
160 
161 	if (g_io_targets == NULL) {
162 		SPDK_ERRLOG("No bdevs to perform tests on\n");
163 		return -1;
164 	}
165 
166 	return 0;
167 }
168 
169 static void
170 __put_io_channel(void *arg1, void *arg2)
171 {
172 	struct io_target *target = arg1;
173 
174 	spdk_put_io_channel(target->ch);
175 	wake_ut_thread();
176 }
177 
178 static void
179 bdevio_cleanup_targets(void)
180 {
181 	struct io_target *target;
182 
183 	target = g_io_targets;
184 	while (target != NULL) {
185 		execute_spdk_function(__put_io_channel, target, NULL);
186 		spdk_bdev_close(target->bdev_desc);
187 		g_io_targets = target->next;
188 		free(target);
189 		target = g_io_targets;
190 	}
191 }
192 
193 static bool g_completion_success;
194 
195 static void
196 initialize_buffer(char **buf, int pattern, int size)
197 {
198 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
199 	memset(*buf, pattern, size);
200 }
201 
202 static void
203 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
204 {
205 	g_completion_success = success;
206 	spdk_bdev_free_io(bdev_io);
207 	wake_ut_thread();
208 }
209 
210 static void
211 __blockdev_write(void *arg1, void *arg2)
212 {
213 	struct bdevio_request *req = arg1;
214 	struct io_target *target = req->target;
215 	int rc;
216 
217 	if (req->iovcnt) {
218 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
219 				      req->data_len, quick_test_complete, NULL);
220 	} else {
221 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
222 				     req->data_len, quick_test_complete, NULL);
223 	}
224 
225 	if (rc) {
226 		g_completion_success = false;
227 		wake_ut_thread();
228 	}
229 }
230 
231 static void
232 __blockdev_write_zeroes(void *arg1, void *arg2)
233 {
234 	struct bdevio_request *req = arg1;
235 	struct io_target *target = req->target;
236 	int rc;
237 
238 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
239 				    req->data_len, quick_test_complete, NULL);
240 	if (rc) {
241 		g_completion_success = false;
242 		wake_ut_thread();
243 	}
244 }
245 
246 static void
247 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
248 {
249 	int data_len = req->data_len;
250 	char *buf = req->buf;
251 
252 	req->iovcnt = 0;
253 	if (!iov_len) {
254 		return;
255 	}
256 
257 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
258 		if (data_len < iov_len) {
259 			iov_len = data_len;
260 		}
261 
262 		req->iov[req->iovcnt].iov_base = buf;
263 		req->iov[req->iovcnt].iov_len = iov_len;
264 
265 		buf += iov_len;
266 		data_len -= iov_len;
267 	}
268 
269 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
270 }
271 
272 static void
273 blockdev_write(struct io_target *target, char *tx_buf,
274 	       uint64_t offset, int data_len, int iov_len)
275 {
276 	struct bdevio_request req;
277 
278 	req.target = target;
279 	req.buf = tx_buf;
280 	req.data_len = data_len;
281 	req.offset = offset;
282 	sgl_chop_buffer(&req, iov_len);
283 
284 	g_completion_success = false;
285 
286 	execute_spdk_function(__blockdev_write, &req, NULL);
287 }
288 
289 static void
290 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
291 		      uint64_t offset, int data_len)
292 {
293 	struct bdevio_request req;
294 
295 	req.target = target;
296 	req.buf = tx_buf;
297 	req.data_len = data_len;
298 	req.offset = offset;
299 
300 	g_completion_success = false;
301 
302 	execute_spdk_function(__blockdev_write_zeroes, &req, NULL);
303 }
304 
305 static void
306 __blockdev_read(void *arg1, void *arg2)
307 {
308 	struct bdevio_request *req = arg1;
309 	struct io_target *target = req->target;
310 	int rc;
311 
312 	if (req->iovcnt) {
313 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
314 				     req->data_len, quick_test_complete, NULL);
315 	} else {
316 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
317 				    req->data_len, quick_test_complete, NULL);
318 	}
319 
320 	if (rc) {
321 		g_completion_success = false;
322 		wake_ut_thread();
323 	}
324 }
325 
326 static void
327 blockdev_read(struct io_target *target, char *rx_buf,
328 	      uint64_t offset, int data_len, int iov_len)
329 {
330 	struct bdevio_request req;
331 
332 	req.target = target;
333 	req.buf = rx_buf;
334 	req.data_len = data_len;
335 	req.offset = offset;
336 	req.iovcnt = 0;
337 	sgl_chop_buffer(&req, iov_len);
338 
339 	g_completion_success = false;
340 
341 	execute_spdk_function(__blockdev_read, &req, NULL);
342 }
343 
344 static int
345 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
346 {
347 	int rc;
348 	rc = memcmp(rx_buf, tx_buf, data_length);
349 
350 	spdk_free(rx_buf);
351 	spdk_free(tx_buf);
352 
353 	return rc;
354 }
355 
356 static void
357 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
358 		    int expected_rc, bool write_zeroes)
359 {
360 	struct io_target *target;
361 	char	*tx_buf = NULL;
362 	char	*rx_buf = NULL;
363 	int	rc;
364 
365 	target = g_current_io_target;
366 
367 	if (data_length < spdk_bdev_get_block_size(target->bdev) ||
368 	    data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) {
369 		return;
370 	}
371 
372 	if (!write_zeroes) {
373 		initialize_buffer(&tx_buf, pattern, data_length);
374 		initialize_buffer(&rx_buf, 0, data_length);
375 
376 		blockdev_write(target, tx_buf, offset, data_length, iov_len);
377 	} else {
378 		initialize_buffer(&tx_buf, 0, data_length);
379 		initialize_buffer(&rx_buf, pattern, data_length);
380 
381 		blockdev_write_zeroes(target, tx_buf, offset, data_length);
382 	}
383 
384 
385 	if (expected_rc == 0) {
386 		CU_ASSERT_EQUAL(g_completion_success, true);
387 	} else {
388 		CU_ASSERT_EQUAL(g_completion_success, false);
389 	}
390 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
391 
392 	if (expected_rc == 0) {
393 		CU_ASSERT_EQUAL(g_completion_success, true);
394 	} else {
395 		CU_ASSERT_EQUAL(g_completion_success, false);
396 	}
397 
398 	if (g_completion_success) {
399 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
400 		/* Assert the write by comparing it with values read
401 		 * from each blockdev */
402 		CU_ASSERT_EQUAL(rc, 0);
403 	}
404 }
405 
406 static void
407 blockdev_write_read_4k(void)
408 {
409 	uint32_t data_length;
410 	uint64_t offset;
411 	int pattern;
412 	int expected_rc;
413 
414 	/* Data size = 4K */
415 	data_length = 4096;
416 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
417 	offset = 0;
418 	pattern = 0xA3;
419 	/* Params are valid, hence the expected return value
420 	 * of write and read for all blockdevs is 0. */
421 	expected_rc = 0;
422 
423 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
424 }
425 
426 static void
427 blockdev_write_zeroes_read_4k(void)
428 {
429 	uint32_t data_length;
430 	uint64_t offset;
431 	int pattern;
432 	int expected_rc;
433 
434 	/* Data size = 4K */
435 	data_length = 4096;
436 	offset = 0;
437 	pattern = 0xA3;
438 	/* Params are valid, hence the expected return value
439 	 * of write_zeroes and read for all blockdevs is 0. */
440 	expected_rc = 0;
441 
442 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
443 }
444 
445 /*
446  * This i/o will not have to split at the bdev layer.
447  */
448 static void
449 blockdev_write_zeroes_read_1m(void)
450 {
451 	uint32_t data_length;
452 	uint64_t offset;
453 	int pattern;
454 	int expected_rc;
455 
456 	/* Data size = 1M */
457 	data_length = 1048576;
458 	offset = 0;
459 	pattern = 0xA3;
460 	/* Params are valid, hence the expected return value
461 	 * of write_zeroes and read for all blockdevs is 0. */
462 	expected_rc = 0;
463 
464 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
465 }
466 
467 /*
468  * This i/o will have to split at the bdev layer if
469  * write-zeroes is not supported by the bdev.
470  */
471 static void
472 blockdev_write_zeroes_read_3m(void)
473 {
474 	uint32_t data_length;
475 	uint64_t offset;
476 	int pattern;
477 	int expected_rc;
478 
479 	/* Data size = 3M */
480 	data_length = 3145728;
481 	offset = 0;
482 	pattern = 0xA3;
483 	/* Params are valid, hence the expected return value
484 	 * of write_zeroes and read for all blockdevs is 0. */
485 	expected_rc = 0;
486 
487 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
488 }
489 
490 /*
491  * This i/o will have to split at the bdev layer if
492  * write-zeroes is not supported by the bdev. It also
493  * tests a write size that is not an even multiple of
494  * the bdev layer zero buffer size.
495  */
496 static void
497 blockdev_write_zeroes_read_3m_500k(void)
498 {
499 	uint32_t data_length;
500 	uint64_t offset;
501 	int pattern;
502 	int expected_rc;
503 
504 	/* Data size = 3.5M */
505 	data_length = 3670016;
506 	offset = 0;
507 	pattern = 0xA3;
508 	/* Params are valid, hence the expected return value
509 	 * of write_zeroes and read for all blockdevs is 0. */
510 	expected_rc = 0;
511 
512 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
513 }
514 
515 static void
516 blockdev_writev_readv_4k(void)
517 {
518 	uint32_t data_length, iov_len;
519 	uint64_t offset;
520 	int pattern;
521 	int expected_rc;
522 
523 	/* Data size = 4K */
524 	data_length = 4096;
525 	iov_len = 4096;
526 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
527 	offset = 0;
528 	pattern = 0xA3;
529 	/* Params are valid, hence the expected return value
530 	 * of write and read for all blockdevs is 0. */
531 	expected_rc = 0;
532 
533 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
534 }
535 
536 static void
537 blockdev_writev_readv_30x4k(void)
538 {
539 	uint32_t data_length, iov_len;
540 	uint64_t offset;
541 	int pattern;
542 	int expected_rc;
543 
544 	/* Data size = 4K */
545 	data_length = 4096 * 30;
546 	iov_len = 4096;
547 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
548 	offset = 0;
549 	pattern = 0xA3;
550 	/* Params are valid, hence the expected return value
551 	 * of write and read for all blockdevs is 0. */
552 	expected_rc = 0;
553 
554 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
555 }
556 
557 static void
558 blockdev_write_read_512Bytes(void)
559 {
560 	uint32_t data_length;
561 	uint64_t offset;
562 	int pattern;
563 	int expected_rc;
564 
565 	/* Data size = 512 */
566 	data_length = 512;
567 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
568 	offset = 8192;
569 	pattern = 0xA3;
570 	/* Params are valid, hence the expected return value
571 	 * of write and read for all blockdevs is 0. */
572 	expected_rc = 0;
573 
574 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
575 }
576 
577 static void
578 blockdev_writev_readv_512Bytes(void)
579 {
580 	uint32_t data_length, iov_len;
581 	uint64_t offset;
582 	int pattern;
583 	int expected_rc;
584 
585 	/* Data size = 512 */
586 	data_length = 512;
587 	iov_len = 512;
588 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
589 	offset = 8192;
590 	pattern = 0xA3;
591 	/* Params are valid, hence the expected return value
592 	 * of write and read for all blockdevs is 0. */
593 	expected_rc = 0;
594 
595 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
596 }
597 
598 static void
599 blockdev_write_read_size_gt_128k(void)
600 {
601 	uint32_t data_length;
602 	uint64_t offset;
603 	int pattern;
604 	int expected_rc;
605 
606 	/* Data size = 132K */
607 	data_length = 135168;
608 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
609 	offset = 8192;
610 	pattern = 0xA3;
611 	/* Params are valid, hence the expected return value
612 	 * of write and read for all blockdevs is 0. */
613 	expected_rc = 0;
614 
615 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
616 }
617 
618 static void
619 blockdev_writev_readv_size_gt_128k(void)
620 {
621 	uint32_t data_length, iov_len;
622 	uint64_t offset;
623 	int pattern;
624 	int expected_rc;
625 
626 	/* Data size = 132K */
627 	data_length = 135168;
628 	iov_len = 135168;
629 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
630 	offset = 8192;
631 	pattern = 0xA3;
632 	/* Params are valid, hence the expected return value
633 	 * of write and read for all blockdevs is 0. */
634 	expected_rc = 0;
635 
636 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
637 }
638 
639 static void
640 blockdev_writev_readv_size_gt_128k_two_iov(void)
641 {
642 	uint32_t data_length, iov_len;
643 	uint64_t offset;
644 	int pattern;
645 	int expected_rc;
646 
647 	/* Data size = 132K */
648 	data_length = 135168;
649 	iov_len = 128 * 1024;
650 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
651 	offset = 8192;
652 	pattern = 0xA3;
653 	/* Params are valid, hence the expected return value
654 	 * of write and read for all blockdevs is 0. */
655 	expected_rc = 0;
656 
657 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
658 }
659 
660 static void
661 blockdev_write_read_invalid_size(void)
662 {
663 	uint32_t data_length;
664 	uint64_t offset;
665 	int pattern;
666 	int expected_rc;
667 
668 	/* Data size is not a multiple of the block size */
669 	data_length = 0x1015;
670 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
671 	offset = 8192;
672 	pattern = 0xA3;
673 	/* Params are invalid, hence the expected return value
674 	 * of write and read for all blockdevs is < 0 */
675 	expected_rc = -1;
676 
677 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
678 }
679 
680 static void
681 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
682 {
683 	struct io_target *target;
684 	struct spdk_bdev *bdev;
685 	char	*tx_buf = NULL;
686 	char	*rx_buf = NULL;
687 	uint64_t offset;
688 	uint32_t block_size;
689 	int rc;
690 
691 	target = g_current_io_target;
692 	bdev = target->bdev;
693 
694 	block_size = spdk_bdev_get_block_size(bdev);
695 
696 	/* The start offset has been set to a marginal value
697 	 * such that offset + nbytes == Total size of
698 	 * blockdev. */
699 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
700 
701 	initialize_buffer(&tx_buf, 0xA3, block_size);
702 	initialize_buffer(&rx_buf, 0, block_size);
703 
704 	blockdev_write(target, tx_buf, offset, block_size, 0);
705 	CU_ASSERT_EQUAL(g_completion_success, true);
706 
707 	blockdev_read(target, rx_buf, offset, block_size, 0);
708 	CU_ASSERT_EQUAL(g_completion_success, true);
709 
710 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
711 	/* Assert the write by comparing it with values read
712 	 * from each blockdev */
713 	CU_ASSERT_EQUAL(rc, 0);
714 }
715 
716 static void
717 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
718 {
719 	struct io_target *target;
720 	struct spdk_bdev *bdev;
721 	char	*tx_buf = NULL;
722 	char	*rx_buf = NULL;
723 	int	data_length;
724 	uint64_t offset;
725 	int pattern;
726 
727 	/* Tests the overflow condition of the blockdevs. */
728 	data_length = 4096;
729 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
730 	pattern = 0xA3;
731 
732 	target = g_current_io_target;
733 	bdev = target->bdev;
734 
735 	/* The start offset has been set to a valid value
736 	 * but offset + nbytes is greater than the Total size
737 	 * of the blockdev. The test should fail. */
738 	offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
739 
740 	initialize_buffer(&tx_buf, pattern, data_length);
741 	initialize_buffer(&rx_buf, 0, data_length);
742 
743 	blockdev_write(target, tx_buf, offset, data_length, 0);
744 	CU_ASSERT_EQUAL(g_completion_success, false);
745 
746 	blockdev_read(target, rx_buf, offset, data_length, 0);
747 	CU_ASSERT_EQUAL(g_completion_success, false);
748 }
749 
750 static void
751 blockdev_write_read_max_offset(void)
752 {
753 	int	data_length;
754 	uint64_t offset;
755 	int pattern;
756 	int expected_rc;
757 
758 	data_length = 4096;
759 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
760 	/* The start offset has been set to UINT64_MAX such that
761 	 * adding nbytes wraps around and points to an invalid address. */
762 	offset = UINT64_MAX;
763 	pattern = 0xA3;
764 	/* Params are invalid, hence the expected return value
765 	 * of write and read for all blockdevs is < 0 */
766 	expected_rc = -1;
767 
768 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
769 }
770 
771 static void
772 blockdev_overlapped_write_read_8k(void)
773 {
774 	int	data_length;
775 	uint64_t offset;
776 	int pattern;
777 	int expected_rc;
778 
779 	/* Data size = 8K */
780 	data_length = 8192;
781 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
782 	offset = 0;
783 	pattern = 0xA3;
784 	/* Params are valid, hence the expected return value
785 	 * of write and read for all blockdevs is 0. */
786 	expected_rc = 0;
787 	/* Assert the write by comparing it with values read
788 	 * from the same offset for each blockdev */
789 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
790 
791 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
792 	 * with the address written above and assert the new value in
793 	 * the overlapped address range */
794 	/* Populate 8k with value 0xBB */
795 	pattern = 0xBB;
796 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
797 	offset = 4096;
798 	/* Assert the write by comparing it with values read
799 	 * from the overlapped offset for each blockdev */
800 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
801 }
802 
803 static void
804 __blockdev_reset(void *arg1, void *arg2)
805 {
806 	struct bdevio_request *req = arg1;
807 	struct io_target *target = req->target;
808 	int rc;
809 
810 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
811 	if (rc < 0) {
812 		g_completion_success = false;
813 		wake_ut_thread();
814 	}
815 }
816 
817 static void
818 blockdev_test_reset(void)
819 {
820 	struct bdevio_request req;
821 	struct io_target *target;
822 
823 	target = g_current_io_target;
824 	req.target = target;
825 
826 	g_completion_success = false;
827 
828 	execute_spdk_function(__blockdev_reset, &req, NULL);
829 
830 	/* Workaround: NVMe-oF target doesn't support reset yet - so for now
831 	 *  don't fail the test if it's an NVMe bdev.
832 	 */
833 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
834 		CU_ASSERT_EQUAL(g_completion_success, true);
835 	}
836 }
837 
838 struct bdevio_passthrough_request {
839 	struct spdk_nvme_cmd cmd;
840 	void *buf;
841 	uint32_t len;
842 	struct io_target *target;
843 	int sct;
844 	int sc;
845 };
846 
847 static void
848 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
849 {
850 	struct bdevio_passthrough_request *pt_req = arg;
851 
852 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->sct, &pt_req->sc);
853 	spdk_bdev_free_io(bdev_io);
854 	wake_ut_thread();
855 }
856 
857 static void
858 __blockdev_nvme_passthru(void *arg1, void *arg2)
859 {
860 	struct bdevio_passthrough_request *pt_req = arg1;
861 	struct io_target *target = pt_req->target;
862 	int rc;
863 
864 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
865 					&pt_req->cmd, pt_req->buf, pt_req->len,
866 					nvme_pt_test_complete, pt_req);
867 	if (rc) {
868 		wake_ut_thread();
869 	}
870 }
871 
872 static void
873 blockdev_test_nvme_passthru_rw(void)
874 {
875 	struct bdevio_passthrough_request pt_req;
876 	void *write_buf, *read_buf;
877 	struct io_target *target;
878 
879 	target = g_current_io_target;
880 
881 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
882 		return;
883 	}
884 
885 	memset(&pt_req, 0, sizeof(pt_req));
886 	pt_req.target = target;
887 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
888 	pt_req.cmd.nsid = 1;
889 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
890 	pt_req.cmd.cdw12 = 0;
891 
892 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
893 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
894 	memset(write_buf, 0xA5, pt_req.len);
895 	pt_req.buf = write_buf;
896 
897 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
898 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
899 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req, NULL);
900 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
901 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
902 
903 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
904 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
905 	pt_req.buf = read_buf;
906 
907 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
908 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
909 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req, NULL);
910 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
911 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
912 
913 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
914 	spdk_free(read_buf);
915 	spdk_free(write_buf);
916 }
917 
918 static void
919 blockdev_test_nvme_passthru_vendor_specific(void)
920 {
921 	struct bdevio_passthrough_request pt_req;
922 	struct io_target *target;
923 
924 	target = g_current_io_target;
925 
926 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
927 		return;
928 	}
929 
930 	memset(&pt_req, 0, sizeof(pt_req));
931 	pt_req.target = target;
932 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
933 	pt_req.cmd.nsid = 1;
934 
935 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
936 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
937 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req, NULL);
938 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
939 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
940 }
941 
942 static void
943 __blockdev_nvme_admin_passthru(void *arg1, void *arg2)
944 {
945 	struct bdevio_passthrough_request *pt_req = arg1;
946 	struct io_target *target = pt_req->target;
947 	int rc;
948 
949 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
950 					   &pt_req->cmd, pt_req->buf, pt_req->len,
951 					   nvme_pt_test_complete, pt_req);
952 	if (rc) {
953 		wake_ut_thread();
954 	}
955 }
956 
957 static void
958 blockdev_test_nvme_admin_passthru(void)
959 {
960 	struct io_target *target;
961 	struct bdevio_passthrough_request pt_req;
962 
963 	target = g_current_io_target;
964 
965 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
966 		return;
967 	}
968 
969 	memset(&pt_req, 0, sizeof(pt_req));
970 	pt_req.target = target;
971 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
972 	pt_req.cmd.nsid = 0;
973 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
974 
975 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
976 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
977 
978 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
979 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
980 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req, NULL);
981 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
982 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
983 }
984 
985 static void
986 __stop_init_thread(void *arg1, void *arg2)
987 {
988 	unsigned num_failures = (unsigned)(uintptr_t)arg1;
989 	struct spdk_jsonrpc_request *request = arg2;
990 
991 	bdevio_cleanup_targets();
992 	if (g_wait_for_tests) {
993 		/* Do not stop the app yet, wait for another RPC */
994 		rpc_perform_tests_cb(num_failures, request);
995 		return;
996 	}
997 	spdk_app_stop(num_failures);
998 }
999 
1000 static void
1001 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1002 {
1003 	struct spdk_event *event;
1004 
1005 	event = spdk_event_allocate(g_lcore_id_init, __stop_init_thread,
1006 				    (void *)(uintptr_t)num_failures, request);
1007 	spdk_event_call(event);
1008 }
1009 
1010 static int
1011 suite_init(void)
1012 {
1013 	if (g_current_io_target == NULL) {
1014 		g_current_io_target = g_io_targets;
1015 	}
1016 	return 0;
1017 }
1018 
1019 static int
1020 suite_fini(void)
1021 {
1022 	g_current_io_target = g_current_io_target->next;
1023 	return 0;
1024 }
1025 
1026 #define SUITE_NAME_MAX 64
1027 
1028 static int
1029 __setup_ut_on_single_target(struct io_target *target)
1030 {
1031 	unsigned rc = 0;
1032 	CU_pSuite suite = NULL;
1033 	char name[SUITE_NAME_MAX];
1034 
1035 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1036 	suite = CU_add_suite(name, suite_init, suite_fini);
1037 	if (suite == NULL) {
1038 		CU_cleanup_registry();
1039 		rc = CU_get_error();
1040 		return -rc;
1041 	}
1042 
1043 	if (
1044 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
1045 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
1046 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
1047 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
1048 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
1049 		|| CU_add_test(suite, "blockdev reset",
1050 			       blockdev_test_reset) == NULL
1051 		|| CU_add_test(suite, "blockdev write read 512 bytes",
1052 			       blockdev_write_read_512Bytes) == NULL
1053 		|| CU_add_test(suite, "blockdev write read size > 128k",
1054 			       blockdev_write_read_size_gt_128k) == NULL
1055 		|| CU_add_test(suite, "blockdev write read invalid size",
1056 			       blockdev_write_read_invalid_size) == NULL
1057 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1058 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1059 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1060 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1061 		|| CU_add_test(suite, "blockdev write read max offset",
1062 			       blockdev_write_read_max_offset) == NULL
1063 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
1064 			       blockdev_overlapped_write_read_8k) == NULL
1065 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
1066 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
1067 			       blockdev_writev_readv_30x4k) == NULL
1068 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
1069 			       blockdev_writev_readv_512Bytes) == NULL
1070 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1071 			       blockdev_writev_readv_size_gt_128k) == NULL
1072 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1073 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1074 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1075 			       blockdev_test_nvme_passthru_rw) == NULL
1076 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1077 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1078 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1079 			       blockdev_test_nvme_admin_passthru) == NULL
1080 	) {
1081 		CU_cleanup_registry();
1082 		rc = CU_get_error();
1083 		return -rc;
1084 	}
1085 	return 0;
1086 }
1087 
1088 static void
1089 __run_ut_thread(void *arg1, void *arg2)
1090 {
1091 	struct spdk_jsonrpc_request *request = arg2;
1092 	int rc = 0;
1093 	struct io_target *target;
1094 	unsigned num_failures;
1095 
1096 	if (CU_initialize_registry() != CUE_SUCCESS) {
1097 		/* CUnit error, probably won't recover */
1098 		rc = CU_get_error();
1099 		stop_init_thread(-rc, request);
1100 	}
1101 
1102 	target = g_io_targets;
1103 	while (target != NULL) {
1104 		rc = __setup_ut_on_single_target(target);
1105 		if (rc < 0) {
1106 			/* CUnit error, probably won't recover */
1107 			stop_init_thread(-rc, request);
1108 		}
1109 		target = target->next;
1110 	}
1111 	CU_basic_set_mode(CU_BRM_VERBOSE);
1112 	CU_basic_run_tests();
1113 	num_failures = CU_get_number_of_failures();
1114 	CU_cleanup_registry();
1115 
1116 	stop_init_thread(num_failures, request);
1117 }
1118 
1119 static void
1120 test_main(void *arg1)
1121 {
1122 	struct spdk_event *event;
1123 
1124 	pthread_mutex_init(&g_test_mutex, NULL);
1125 	pthread_cond_init(&g_test_cond, NULL);
1126 
1127 	g_lcore_id_init = spdk_env_get_first_core();
1128 	g_lcore_id_ut = spdk_env_get_next_core(g_lcore_id_init);
1129 	g_lcore_id_io = spdk_env_get_next_core(g_lcore_id_ut);
1130 
1131 	if (g_lcore_id_init == SPDK_ENV_LCORE_ID_ANY ||
1132 	    g_lcore_id_ut == SPDK_ENV_LCORE_ID_ANY ||
1133 	    g_lcore_id_io == SPDK_ENV_LCORE_ID_ANY) {
1134 		SPDK_ERRLOG("Could not reserve 3 separate threads.\n");
1135 		spdk_app_stop(-1);
1136 	}
1137 
1138 	if (g_wait_for_tests) {
1139 		/* Do not perform any tests until RPC is received */
1140 		return;
1141 	}
1142 
1143 	if (bdevio_construct_targets() < 0) {
1144 		spdk_app_stop(-1);
1145 		return;
1146 	}
1147 
1148 	event = spdk_event_allocate(g_lcore_id_ut, __run_ut_thread, NULL, NULL);
1149 	spdk_event_call(event);
1150 }
1151 
1152 static void
1153 bdevio_usage(void)
1154 {
1155 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1156 }
1157 
1158 static int
1159 bdevio_parse_arg(int ch, char *arg)
1160 {
1161 	switch (ch) {
1162 	case 'w':
1163 		g_wait_for_tests =  true;
1164 		break;
1165 	default:
1166 		return -EINVAL;
1167 	}
1168 	return 0;
1169 }
1170 
1171 struct rpc_perform_tests {
1172 	char *name;
1173 };
1174 
1175 static void
1176 free_rpc_perform_tests(struct rpc_perform_tests *r)
1177 {
1178 	free(r->name);
1179 }
1180 
1181 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1182 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1183 };
1184 
1185 static void
1186 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1187 {
1188 	struct spdk_json_write_ctx *w;
1189 
1190 	if (num_failures == 0) {
1191 		w = spdk_jsonrpc_begin_result(request);
1192 		if (w == NULL) {
1193 			return;
1194 		}
1195 		spdk_json_write_uint32(w, num_failures);
1196 		spdk_jsonrpc_end_result(request, w);
1197 	} else {
1198 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1199 						     "%d test cases failed", num_failures);
1200 	}
1201 }
1202 
1203 static void
1204 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1205 {
1206 	struct rpc_perform_tests req = {NULL};
1207 	struct spdk_event *event;
1208 	struct spdk_bdev *bdev;
1209 	int rc;
1210 
1211 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1212 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1213 					      &req)) {
1214 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1215 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1216 		goto invalid;
1217 	}
1218 
1219 	if (req.name) {
1220 		bdev = spdk_bdev_get_by_name(req.name);
1221 		if (bdev == NULL) {
1222 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1223 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1224 							     "Bdev '%s' does not exist: %s",
1225 							     req.name, spdk_strerror(ENODEV));
1226 			goto invalid;
1227 		}
1228 		rc = bdevio_construct_target(bdev);
1229 		if (rc < 0) {
1230 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1231 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1232 							     "Could not construct target for bdev '%s': %s",
1233 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1234 			goto invalid;
1235 		}
1236 	} else {
1237 		rc = bdevio_construct_targets();
1238 		if (rc < 0) {
1239 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1240 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1241 							     "Could not construct targets for all bdevs: %s",
1242 							     spdk_strerror(-rc));
1243 			goto invalid;
1244 		}
1245 	}
1246 	free_rpc_perform_tests(&req);
1247 
1248 	event = spdk_event_allocate(g_lcore_id_ut, __run_ut_thread, NULL, request);
1249 	spdk_event_call(event);
1250 
1251 	return;
1252 
1253 invalid:
1254 	free_rpc_perform_tests(&req);
1255 }
1256 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1257 
1258 int
1259 main(int argc, char **argv)
1260 {
1261 	int			rc;
1262 	struct spdk_app_opts	opts = {};
1263 
1264 	spdk_app_opts_init(&opts);
1265 	opts.name = "bdevio";
1266 	opts.reactor_mask = "0x7";
1267 
1268 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1269 				      bdevio_parse_arg, bdevio_usage)) !=
1270 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1271 		return rc;
1272 	}
1273 
1274 	rc = spdk_app_start(&opts, test_main, NULL);
1275 	spdk_app_fini();
1276 
1277 	return rc;
1278 }
1279