xref: /spdk/test/bdev/bdevio/bdevio.c (revision 32999ab917f67af61872f868585fd3d78ad6fb8a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/accel_engine.h"
38 #include "spdk/env.h"
39 #include "spdk/log.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
42 #include "spdk/rpc.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "CUnit/Basic.h"
47 
48 #define BUFFER_IOVS		1024
49 #define BUFFER_SIZE		260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE	2048
51 
52 pthread_mutex_t g_test_mutex;
53 pthread_cond_t g_test_cond;
54 
55 static struct spdk_thread *g_thread_init;
56 static struct spdk_thread *g_thread_ut;
57 static struct spdk_thread *g_thread_io;
58 static bool g_wait_for_tests = false;
59 static int g_num_failures = 0;
60 static bool g_shutdown = false;
61 
62 struct io_target {
63 	struct spdk_bdev	*bdev;
64 	struct spdk_bdev_desc	*bdev_desc;
65 	struct spdk_io_channel	*ch;
66 	struct io_target	*next;
67 };
68 
69 struct bdevio_request {
70 	char *buf;
71 	char *fused_buf;
72 	int data_len;
73 	uint64_t offset;
74 	struct iovec iov[BUFFER_IOVS];
75 	int iovcnt;
76 	struct iovec fused_iov[BUFFER_IOVS];
77 	int fused_iovcnt;
78 	struct io_target *target;
79 };
80 
81 struct io_target *g_io_targets = NULL;
82 struct io_target *g_current_io_target = NULL;
83 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
84 
85 static void
86 execute_spdk_function(spdk_msg_fn fn, void *arg)
87 {
88 	pthread_mutex_lock(&g_test_mutex);
89 	spdk_thread_send_msg(g_thread_io, fn, arg);
90 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
91 	pthread_mutex_unlock(&g_test_mutex);
92 }
93 
94 static void
95 wake_ut_thread(void)
96 {
97 	pthread_mutex_lock(&g_test_mutex);
98 	pthread_cond_signal(&g_test_cond);
99 	pthread_mutex_unlock(&g_test_mutex);
100 }
101 
102 static void
103 __get_io_channel(void *arg)
104 {
105 	struct io_target *target = arg;
106 
107 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
108 	assert(target->ch);
109 	wake_ut_thread();
110 }
111 
112 static void
113 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
114 				void *event_ctx)
115 {
116 }
117 
118 static int
119 bdevio_construct_target(struct spdk_bdev *bdev)
120 {
121 	struct io_target *target;
122 	int rc;
123 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
124 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
125 
126 	target = malloc(sizeof(struct io_target));
127 	if (target == NULL) {
128 		return -ENOMEM;
129 	}
130 
131 	rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL,
132 				&target->bdev_desc);
133 	if (rc != 0) {
134 		free(target);
135 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
136 		return rc;
137 	}
138 
139 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
140 	       spdk_bdev_get_name(bdev),
141 	       num_blocks, block_size,
142 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
143 
144 	target->bdev = bdev;
145 	target->next = g_io_targets;
146 	execute_spdk_function(__get_io_channel, target);
147 	g_io_targets = target;
148 
149 	return 0;
150 }
151 
152 static int
153 bdevio_construct_targets(void)
154 {
155 	struct spdk_bdev *bdev;
156 	int rc;
157 
158 	printf("I/O targets:\n");
159 
160 	bdev = spdk_bdev_first_leaf();
161 	while (bdev != NULL) {
162 		rc = bdevio_construct_target(bdev);
163 		if (rc < 0) {
164 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
165 			return rc;
166 		}
167 		bdev = spdk_bdev_next_leaf(bdev);
168 	}
169 
170 	if (g_io_targets == NULL) {
171 		SPDK_ERRLOG("No bdevs to perform tests on\n");
172 		return -1;
173 	}
174 
175 	return 0;
176 }
177 
178 static void
179 __put_io_channel(void *arg)
180 {
181 	struct io_target *target = arg;
182 
183 	spdk_put_io_channel(target->ch);
184 	wake_ut_thread();
185 }
186 
187 static void
188 bdevio_cleanup_targets(void)
189 {
190 	struct io_target *target;
191 
192 	target = g_io_targets;
193 	while (target != NULL) {
194 		execute_spdk_function(__put_io_channel, target);
195 		spdk_bdev_close(target->bdev_desc);
196 		g_io_targets = target->next;
197 		free(target);
198 		target = g_io_targets;
199 	}
200 }
201 
202 static bool g_completion_success;
203 
204 static void
205 initialize_buffer(char **buf, int pattern, int size)
206 {
207 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
208 	memset(*buf, pattern, size);
209 }
210 
211 static void
212 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
213 {
214 	g_completion_success = success;
215 	spdk_bdev_free_io(bdev_io);
216 	wake_ut_thread();
217 }
218 
219 static void
220 __blockdev_write(void *arg)
221 {
222 	struct bdevio_request *req = arg;
223 	struct io_target *target = req->target;
224 	int rc;
225 
226 	if (req->iovcnt) {
227 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
228 				      req->data_len, quick_test_complete, NULL);
229 	} else {
230 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
231 				     req->data_len, quick_test_complete, NULL);
232 	}
233 
234 	if (rc) {
235 		g_completion_success = false;
236 		wake_ut_thread();
237 	}
238 }
239 
240 static void
241 __blockdev_write_zeroes(void *arg)
242 {
243 	struct bdevio_request *req = arg;
244 	struct io_target *target = req->target;
245 	int rc;
246 
247 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
248 				    req->data_len, quick_test_complete, NULL);
249 	if (rc) {
250 		g_completion_success = false;
251 		wake_ut_thread();
252 	}
253 }
254 
255 static void
256 __blockdev_compare_and_write(void *arg)
257 {
258 	struct bdevio_request *req = arg;
259 	struct io_target *target = req->target;
260 	int rc;
261 
262 	rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
263 			req->fused_iov, req->fused_iovcnt, req->offset, req->data_len, quick_test_complete, NULL);
264 
265 	if (rc) {
266 		g_completion_success = false;
267 		wake_ut_thread();
268 	}
269 }
270 
271 static void
272 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
273 {
274 	int data_len = req->data_len;
275 	char *buf = req->buf;
276 
277 	req->iovcnt = 0;
278 	if (!iov_len) {
279 		return;
280 	}
281 
282 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
283 		if (data_len < iov_len) {
284 			iov_len = data_len;
285 		}
286 
287 		req->iov[req->iovcnt].iov_base = buf;
288 		req->iov[req->iovcnt].iov_len = iov_len;
289 
290 		buf += iov_len;
291 		data_len -= iov_len;
292 	}
293 
294 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
295 }
296 
297 static void
298 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
299 {
300 	int data_len = req->data_len;
301 	char *buf = req->fused_buf;
302 
303 	req->fused_iovcnt = 0;
304 	if (!iov_len) {
305 		return;
306 	}
307 
308 	for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
309 		if (data_len < iov_len) {
310 			iov_len = data_len;
311 		}
312 
313 		req->fused_iov[req->fused_iovcnt].iov_base = buf;
314 		req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
315 
316 		buf += iov_len;
317 		data_len -= iov_len;
318 	}
319 
320 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
321 }
322 
323 static void
324 blockdev_write(struct io_target *target, char *tx_buf,
325 	       uint64_t offset, int data_len, int iov_len)
326 {
327 	struct bdevio_request req;
328 
329 	req.target = target;
330 	req.buf = tx_buf;
331 	req.data_len = data_len;
332 	req.offset = offset;
333 	sgl_chop_buffer(&req, iov_len);
334 
335 	g_completion_success = false;
336 
337 	execute_spdk_function(__blockdev_write, &req);
338 }
339 
340 static void
341 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
342 			    uint64_t offset, int data_len, int iov_len)
343 {
344 	struct bdevio_request req;
345 
346 	req.target = target;
347 	req.buf = cmp_buf;
348 	req.fused_buf = write_buf;
349 	req.data_len = data_len;
350 	req.offset = offset;
351 	sgl_chop_buffer(&req, iov_len);
352 	sgl_chop_fused_buffer(&req, iov_len);
353 
354 	g_completion_success = false;
355 
356 	execute_spdk_function(__blockdev_compare_and_write, &req);
357 }
358 
359 static void
360 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
361 		      uint64_t offset, int data_len)
362 {
363 	struct bdevio_request req;
364 
365 	req.target = target;
366 	req.buf = tx_buf;
367 	req.data_len = data_len;
368 	req.offset = offset;
369 
370 	g_completion_success = false;
371 
372 	execute_spdk_function(__blockdev_write_zeroes, &req);
373 }
374 
375 static void
376 __blockdev_read(void *arg)
377 {
378 	struct bdevio_request *req = arg;
379 	struct io_target *target = req->target;
380 	int rc;
381 
382 	if (req->iovcnt) {
383 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
384 				     req->data_len, quick_test_complete, NULL);
385 	} else {
386 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
387 				    req->data_len, quick_test_complete, NULL);
388 	}
389 
390 	if (rc) {
391 		g_completion_success = false;
392 		wake_ut_thread();
393 	}
394 }
395 
396 static void
397 blockdev_read(struct io_target *target, char *rx_buf,
398 	      uint64_t offset, int data_len, int iov_len)
399 {
400 	struct bdevio_request req;
401 
402 	req.target = target;
403 	req.buf = rx_buf;
404 	req.data_len = data_len;
405 	req.offset = offset;
406 	req.iovcnt = 0;
407 	sgl_chop_buffer(&req, iov_len);
408 
409 	g_completion_success = false;
410 
411 	execute_spdk_function(__blockdev_read, &req);
412 }
413 
414 static int
415 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
416 {
417 	int rc;
418 	rc = memcmp(rx_buf, tx_buf, data_length);
419 
420 	spdk_free(rx_buf);
421 	spdk_free(tx_buf);
422 
423 	return rc;
424 }
425 
426 static bool
427 blockdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t data_length)
428 {
429 	if (data_length < spdk_bdev_get_block_size(bdev) ||
430 	    data_length % spdk_bdev_get_block_size(bdev) ||
431 	    data_length / spdk_bdev_get_block_size(bdev) > spdk_bdev_get_num_blocks(bdev)) {
432 		return false;
433 	}
434 
435 	return true;
436 }
437 
438 static void
439 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
440 		    int expected_rc, bool write_zeroes)
441 {
442 	struct io_target *target;
443 	char	*tx_buf = NULL;
444 	char	*rx_buf = NULL;
445 	int	rc;
446 
447 	target = g_current_io_target;
448 
449 	if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
450 		return;
451 	}
452 
453 	if (!write_zeroes) {
454 		initialize_buffer(&tx_buf, pattern, data_length);
455 		initialize_buffer(&rx_buf, 0, data_length);
456 
457 		blockdev_write(target, tx_buf, offset, data_length, iov_len);
458 	} else {
459 		initialize_buffer(&tx_buf, 0, data_length);
460 		initialize_buffer(&rx_buf, pattern, data_length);
461 
462 		blockdev_write_zeroes(target, tx_buf, offset, data_length);
463 	}
464 
465 
466 	if (expected_rc == 0) {
467 		CU_ASSERT_EQUAL(g_completion_success, true);
468 	} else {
469 		CU_ASSERT_EQUAL(g_completion_success, false);
470 	}
471 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
472 
473 	if (expected_rc == 0) {
474 		CU_ASSERT_EQUAL(g_completion_success, true);
475 	} else {
476 		CU_ASSERT_EQUAL(g_completion_success, false);
477 	}
478 
479 	if (g_completion_success) {
480 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
481 		/* Assert the write by comparing it with values read
482 		 * from each blockdev */
483 		CU_ASSERT_EQUAL(rc, 0);
484 	}
485 }
486 
487 static void
488 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
489 {
490 	struct io_target *target;
491 	char	*tx_buf = NULL;
492 	char	*write_buf = NULL;
493 	char	*rx_buf = NULL;
494 	int	rc;
495 
496 	target = g_current_io_target;
497 
498 	if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
499 		return;
500 	}
501 
502 	initialize_buffer(&tx_buf, 0xAA, data_length);
503 	initialize_buffer(&rx_buf, 0, data_length);
504 	initialize_buffer(&write_buf, 0xBB, data_length);
505 
506 	blockdev_write(target, tx_buf, offset, data_length, iov_len);
507 	CU_ASSERT_EQUAL(g_completion_success, true);
508 
509 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
510 	CU_ASSERT_EQUAL(g_completion_success, true);
511 
512 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
513 	CU_ASSERT_EQUAL(g_completion_success, false);
514 
515 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
516 	CU_ASSERT_EQUAL(g_completion_success, true);
517 	rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
518 	/* Assert the write by comparing it with values read
519 	 * from each blockdev */
520 	CU_ASSERT_EQUAL(rc, 0);
521 }
522 
523 static void
524 blockdev_write_read_4k(void)
525 {
526 	uint32_t data_length;
527 	uint64_t offset;
528 	int pattern;
529 	int expected_rc;
530 
531 	/* Data size = 4K */
532 	data_length = 4096;
533 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
534 	offset = 0;
535 	pattern = 0xA3;
536 	/* Params are valid, hence the expected return value
537 	 * of write and read for all blockdevs is 0. */
538 	expected_rc = 0;
539 
540 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
541 }
542 
543 static void
544 blockdev_write_zeroes_read_4k(void)
545 {
546 	uint32_t data_length;
547 	uint64_t offset;
548 	int pattern;
549 	int expected_rc;
550 
551 	/* Data size = 4K */
552 	data_length = 4096;
553 	offset = 0;
554 	pattern = 0xA3;
555 	/* Params are valid, hence the expected return value
556 	 * of write_zeroes and read for all blockdevs is 0. */
557 	expected_rc = 0;
558 
559 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
560 }
561 
562 /*
563  * This i/o will not have to split at the bdev layer.
564  */
565 static void
566 blockdev_write_zeroes_read_1m(void)
567 {
568 	uint32_t data_length;
569 	uint64_t offset;
570 	int pattern;
571 	int expected_rc;
572 
573 	/* Data size = 1M */
574 	data_length = 1048576;
575 	offset = 0;
576 	pattern = 0xA3;
577 	/* Params are valid, hence the expected return value
578 	 * of write_zeroes and read for all blockdevs is 0. */
579 	expected_rc = 0;
580 
581 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
582 }
583 
584 /*
585  * This i/o will have to split at the bdev layer if
586  * write-zeroes is not supported by the bdev.
587  */
588 static void
589 blockdev_write_zeroes_read_3m(void)
590 {
591 	uint32_t data_length;
592 	uint64_t offset;
593 	int pattern;
594 	int expected_rc;
595 
596 	/* Data size = 3M */
597 	data_length = 3145728;
598 	offset = 0;
599 	pattern = 0xA3;
600 	/* Params are valid, hence the expected return value
601 	 * of write_zeroes and read for all blockdevs is 0. */
602 	expected_rc = 0;
603 
604 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
605 }
606 
607 /*
608  * This i/o will have to split at the bdev layer if
609  * write-zeroes is not supported by the bdev. It also
610  * tests a write size that is not an even multiple of
611  * the bdev layer zero buffer size.
612  */
613 static void
614 blockdev_write_zeroes_read_3m_500k(void)
615 {
616 	uint32_t data_length;
617 	uint64_t offset;
618 	int pattern;
619 	int expected_rc;
620 
621 	/* Data size = 3.5M */
622 	data_length = 3670016;
623 	offset = 0;
624 	pattern = 0xA3;
625 	/* Params are valid, hence the expected return value
626 	 * of write_zeroes and read for all blockdevs is 0. */
627 	expected_rc = 0;
628 
629 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
630 }
631 
632 static void
633 blockdev_writev_readv_4k(void)
634 {
635 	uint32_t data_length, iov_len;
636 	uint64_t offset;
637 	int pattern;
638 	int expected_rc;
639 
640 	/* Data size = 4K */
641 	data_length = 4096;
642 	iov_len = 4096;
643 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
644 	offset = 0;
645 	pattern = 0xA3;
646 	/* Params are valid, hence the expected return value
647 	 * of write and read for all blockdevs is 0. */
648 	expected_rc = 0;
649 
650 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
651 }
652 
653 static void
654 blockdev_comparev_and_writev(void)
655 {
656 	uint32_t data_length, iov_len;
657 	uint64_t offset;
658 
659 	data_length = 1;
660 	iov_len = 1;
661 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
662 	offset = 0;
663 
664 	blockdev_compare_and_write(data_length, iov_len, offset);
665 }
666 
667 static void
668 blockdev_writev_readv_30x4k(void)
669 {
670 	uint32_t data_length, iov_len;
671 	uint64_t offset;
672 	int pattern;
673 	int expected_rc;
674 
675 	/* Data size = 4K */
676 	data_length = 4096 * 30;
677 	iov_len = 4096;
678 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
679 	offset = 0;
680 	pattern = 0xA3;
681 	/* Params are valid, hence the expected return value
682 	 * of write and read for all blockdevs is 0. */
683 	expected_rc = 0;
684 
685 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
686 }
687 
688 static void
689 blockdev_write_read_512Bytes(void)
690 {
691 	uint32_t data_length;
692 	uint64_t offset;
693 	int pattern;
694 	int expected_rc;
695 
696 	/* Data size = 512 */
697 	data_length = 512;
698 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
699 	offset = 8192;
700 	pattern = 0xA3;
701 	/* Params are valid, hence the expected return value
702 	 * of write and read for all blockdevs is 0. */
703 	expected_rc = 0;
704 
705 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
706 }
707 
708 static void
709 blockdev_writev_readv_512Bytes(void)
710 {
711 	uint32_t data_length, iov_len;
712 	uint64_t offset;
713 	int pattern;
714 	int expected_rc;
715 
716 	/* Data size = 512 */
717 	data_length = 512;
718 	iov_len = 512;
719 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
720 	offset = 8192;
721 	pattern = 0xA3;
722 	/* Params are valid, hence the expected return value
723 	 * of write and read for all blockdevs is 0. */
724 	expected_rc = 0;
725 
726 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
727 }
728 
729 static void
730 blockdev_write_read_size_gt_128k(void)
731 {
732 	uint32_t data_length;
733 	uint64_t offset;
734 	int pattern;
735 	int expected_rc;
736 
737 	/* Data size = 132K */
738 	data_length = 135168;
739 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
740 	offset = 8192;
741 	pattern = 0xA3;
742 	/* Params are valid, hence the expected return value
743 	 * of write and read for all blockdevs is 0. */
744 	expected_rc = 0;
745 
746 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
747 }
748 
749 static void
750 blockdev_writev_readv_size_gt_128k(void)
751 {
752 	uint32_t data_length, iov_len;
753 	uint64_t offset;
754 	int pattern;
755 	int expected_rc;
756 
757 	/* Data size = 132K */
758 	data_length = 135168;
759 	iov_len = 135168;
760 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
761 	offset = 8192;
762 	pattern = 0xA3;
763 	/* Params are valid, hence the expected return value
764 	 * of write and read for all blockdevs is 0. */
765 	expected_rc = 0;
766 
767 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
768 }
769 
770 static void
771 blockdev_writev_readv_size_gt_128k_two_iov(void)
772 {
773 	uint32_t data_length, iov_len;
774 	uint64_t offset;
775 	int pattern;
776 	int expected_rc;
777 
778 	/* Data size = 132K */
779 	data_length = 135168;
780 	iov_len = 128 * 1024;
781 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
782 	offset = 8192;
783 	pattern = 0xA3;
784 	/* Params are valid, hence the expected return value
785 	 * of write and read for all blockdevs is 0. */
786 	expected_rc = 0;
787 
788 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
789 }
790 
791 static void
792 blockdev_write_read_invalid_size(void)
793 {
794 	uint32_t data_length;
795 	uint64_t offset;
796 	int pattern;
797 	int expected_rc;
798 
799 	/* Data size is not a multiple of the block size */
800 	data_length = 0x1015;
801 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
802 	offset = 8192;
803 	pattern = 0xA3;
804 	/* Params are invalid, hence the expected return value
805 	 * of write and read for all blockdevs is < 0 */
806 	expected_rc = -1;
807 
808 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
809 }
810 
811 static void
812 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
813 {
814 	struct io_target *target;
815 	struct spdk_bdev *bdev;
816 	char	*tx_buf = NULL;
817 	char	*rx_buf = NULL;
818 	uint64_t offset;
819 	uint32_t block_size;
820 	int rc;
821 
822 	target = g_current_io_target;
823 	bdev = target->bdev;
824 
825 	block_size = spdk_bdev_get_block_size(bdev);
826 
827 	/* The start offset has been set to a marginal value
828 	 * such that offset + nbytes == Total size of
829 	 * blockdev. */
830 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
831 
832 	initialize_buffer(&tx_buf, 0xA3, block_size);
833 	initialize_buffer(&rx_buf, 0, block_size);
834 
835 	blockdev_write(target, tx_buf, offset, block_size, 0);
836 	CU_ASSERT_EQUAL(g_completion_success, true);
837 
838 	blockdev_read(target, rx_buf, offset, block_size, 0);
839 	CU_ASSERT_EQUAL(g_completion_success, true);
840 
841 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
842 	/* Assert the write by comparing it with values read
843 	 * from each blockdev */
844 	CU_ASSERT_EQUAL(rc, 0);
845 }
846 
847 static void
848 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
849 {
850 	struct io_target *target;
851 	struct spdk_bdev *bdev;
852 	char	*tx_buf = NULL;
853 	char	*rx_buf = NULL;
854 	int	data_length;
855 	uint64_t offset;
856 	int pattern;
857 
858 	/* Tests the overflow condition of the blockdevs. */
859 	data_length = 4096;
860 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
861 	pattern = 0xA3;
862 
863 	target = g_current_io_target;
864 	bdev = target->bdev;
865 
866 	/* The start offset has been set to a valid value
867 	 * but offset + nbytes is greater than the Total size
868 	 * of the blockdev. The test should fail. */
869 	offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
870 
871 	initialize_buffer(&tx_buf, pattern, data_length);
872 	initialize_buffer(&rx_buf, 0, data_length);
873 
874 	blockdev_write(target, tx_buf, offset, data_length, 0);
875 	CU_ASSERT_EQUAL(g_completion_success, false);
876 
877 	blockdev_read(target, rx_buf, offset, data_length, 0);
878 	CU_ASSERT_EQUAL(g_completion_success, false);
879 }
880 
881 static void
882 blockdev_write_read_max_offset(void)
883 {
884 	int	data_length;
885 	uint64_t offset;
886 	int pattern;
887 	int expected_rc;
888 
889 	data_length = 4096;
890 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
891 	/* The start offset has been set to UINT64_MAX such that
892 	 * adding nbytes wraps around and points to an invalid address. */
893 	offset = UINT64_MAX;
894 	pattern = 0xA3;
895 	/* Params are invalid, hence the expected return value
896 	 * of write and read for all blockdevs is < 0 */
897 	expected_rc = -1;
898 
899 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
900 }
901 
902 static void
903 blockdev_overlapped_write_read_8k(void)
904 {
905 	int	data_length;
906 	uint64_t offset;
907 	int pattern;
908 	int expected_rc;
909 
910 	/* Data size = 8K */
911 	data_length = 8192;
912 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
913 	offset = 0;
914 	pattern = 0xA3;
915 	/* Params are valid, hence the expected return value
916 	 * of write and read for all blockdevs is 0. */
917 	expected_rc = 0;
918 	/* Assert the write by comparing it with values read
919 	 * from the same offset for each blockdev */
920 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
921 
922 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
923 	 * with the address written above and assert the new value in
924 	 * the overlapped address range */
925 	/* Populate 8k with value 0xBB */
926 	pattern = 0xBB;
927 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
928 	offset = 4096;
929 	/* Assert the write by comparing it with values read
930 	 * from the overlapped offset for each blockdev */
931 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
932 }
933 
934 static void
935 __blockdev_reset(void *arg)
936 {
937 	struct bdevio_request *req = arg;
938 	struct io_target *target = req->target;
939 	int rc;
940 
941 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
942 	if (rc < 0) {
943 		g_completion_success = false;
944 		wake_ut_thread();
945 	}
946 }
947 
948 static void
949 blockdev_test_reset(void)
950 {
951 	struct bdevio_request req;
952 	struct io_target *target;
953 
954 	target = g_current_io_target;
955 	req.target = target;
956 
957 	g_completion_success = false;
958 
959 	execute_spdk_function(__blockdev_reset, &req);
960 
961 	/* Workaround: NVMe-oF target doesn't support reset yet - so for now
962 	 *  don't fail the test if it's an NVMe bdev.
963 	 */
964 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
965 		CU_ASSERT_EQUAL(g_completion_success, true);
966 	}
967 }
968 
969 struct bdevio_passthrough_request {
970 	struct spdk_nvme_cmd cmd;
971 	void *buf;
972 	uint32_t len;
973 	struct io_target *target;
974 	int sct;
975 	int sc;
976 	uint32_t cdw0;
977 };
978 
979 static void
980 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
981 {
982 	struct bdevio_passthrough_request *pt_req = arg;
983 
984 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
985 	spdk_bdev_free_io(bdev_io);
986 	wake_ut_thread();
987 }
988 
989 static void
990 __blockdev_nvme_passthru(void *arg)
991 {
992 	struct bdevio_passthrough_request *pt_req = arg;
993 	struct io_target *target = pt_req->target;
994 	int rc;
995 
996 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
997 					&pt_req->cmd, pt_req->buf, pt_req->len,
998 					nvme_pt_test_complete, pt_req);
999 	if (rc) {
1000 		wake_ut_thread();
1001 	}
1002 }
1003 
1004 static void
1005 blockdev_test_nvme_passthru_rw(void)
1006 {
1007 	struct bdevio_passthrough_request pt_req;
1008 	void *write_buf, *read_buf;
1009 	struct io_target *target;
1010 
1011 	target = g_current_io_target;
1012 
1013 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1014 		return;
1015 	}
1016 
1017 	memset(&pt_req, 0, sizeof(pt_req));
1018 	pt_req.target = target;
1019 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
1020 	pt_req.cmd.nsid = 1;
1021 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
1022 	pt_req.cmd.cdw12 = 0;
1023 
1024 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
1025 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1026 	memset(write_buf, 0xA5, pt_req.len);
1027 	pt_req.buf = write_buf;
1028 
1029 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1030 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1031 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1032 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1033 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1034 
1035 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
1036 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1037 	pt_req.buf = read_buf;
1038 
1039 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1040 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1041 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1042 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1043 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1044 
1045 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
1046 	spdk_free(read_buf);
1047 	spdk_free(write_buf);
1048 }
1049 
1050 static void
1051 blockdev_test_nvme_passthru_vendor_specific(void)
1052 {
1053 	struct bdevio_passthrough_request pt_req;
1054 	struct io_target *target;
1055 
1056 	target = g_current_io_target;
1057 
1058 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1059 		return;
1060 	}
1061 
1062 	memset(&pt_req, 0, sizeof(pt_req));
1063 	pt_req.target = target;
1064 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
1065 	pt_req.cmd.nsid = 1;
1066 
1067 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1068 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1069 	pt_req.cdw0 = 0xbeef;
1070 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1071 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1072 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
1073 	CU_ASSERT(pt_req.cdw0 == 0x0);
1074 }
1075 
1076 static void
1077 __blockdev_nvme_admin_passthru(void *arg)
1078 {
1079 	struct bdevio_passthrough_request *pt_req = arg;
1080 	struct io_target *target = pt_req->target;
1081 	int rc;
1082 
1083 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
1084 					   &pt_req->cmd, pt_req->buf, pt_req->len,
1085 					   nvme_pt_test_complete, pt_req);
1086 	if (rc) {
1087 		wake_ut_thread();
1088 	}
1089 }
1090 
1091 static void
1092 blockdev_test_nvme_admin_passthru(void)
1093 {
1094 	struct io_target *target;
1095 	struct bdevio_passthrough_request pt_req;
1096 
1097 	target = g_current_io_target;
1098 
1099 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
1100 		return;
1101 	}
1102 
1103 	memset(&pt_req, 0, sizeof(pt_req));
1104 	pt_req.target = target;
1105 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1106 	pt_req.cmd.nsid = 0;
1107 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
1108 
1109 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
1110 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1111 
1112 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
1113 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1114 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
1115 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1116 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1117 }
1118 
1119 static void
1120 __stop_init_thread(void *arg)
1121 {
1122 	unsigned num_failures = g_num_failures;
1123 	struct spdk_jsonrpc_request *request = arg;
1124 
1125 	g_num_failures = 0;
1126 
1127 	bdevio_cleanup_targets();
1128 	if (g_wait_for_tests && !g_shutdown) {
1129 		/* Do not stop the app yet, wait for another RPC */
1130 		rpc_perform_tests_cb(num_failures, request);
1131 		return;
1132 	}
1133 	spdk_app_stop(num_failures);
1134 }
1135 
1136 static void
1137 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1138 {
1139 	g_num_failures = num_failures;
1140 
1141 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1142 }
1143 
1144 static int
1145 suite_init(void)
1146 {
1147 	if (g_current_io_target == NULL) {
1148 		g_current_io_target = g_io_targets;
1149 	}
1150 	return 0;
1151 }
1152 
1153 static int
1154 suite_fini(void)
1155 {
1156 	g_current_io_target = g_current_io_target->next;
1157 	return 0;
1158 }
1159 
1160 #define SUITE_NAME_MAX 64
1161 
1162 static int
1163 __setup_ut_on_single_target(struct io_target *target)
1164 {
1165 	unsigned rc = 0;
1166 	CU_pSuite suite = NULL;
1167 	char name[SUITE_NAME_MAX];
1168 
1169 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1170 	suite = CU_add_suite(name, suite_init, suite_fini);
1171 	if (suite == NULL) {
1172 		CU_cleanup_registry();
1173 		rc = CU_get_error();
1174 		return -rc;
1175 	}
1176 
1177 	if (
1178 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
1179 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
1180 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
1181 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
1182 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
1183 		|| CU_add_test(suite, "blockdev reset",
1184 			       blockdev_test_reset) == NULL
1185 		|| CU_add_test(suite, "blockdev write read 512 bytes",
1186 			       blockdev_write_read_512Bytes) == NULL
1187 		|| CU_add_test(suite, "blockdev write read size > 128k",
1188 			       blockdev_write_read_size_gt_128k) == NULL
1189 		|| CU_add_test(suite, "blockdev write read invalid size",
1190 			       blockdev_write_read_invalid_size) == NULL
1191 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1192 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1193 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1194 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1195 		|| CU_add_test(suite, "blockdev write read max offset",
1196 			       blockdev_write_read_max_offset) == NULL
1197 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
1198 			       blockdev_overlapped_write_read_8k) == NULL
1199 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
1200 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
1201 			       blockdev_writev_readv_30x4k) == NULL
1202 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
1203 			       blockdev_writev_readv_512Bytes) == NULL
1204 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1205 			       blockdev_writev_readv_size_gt_128k) == NULL
1206 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1207 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1208 		|| CU_add_test(suite, "blockdev comparev and writev", blockdev_comparev_and_writev) == NULL
1209 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1210 			       blockdev_test_nvme_passthru_rw) == NULL
1211 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1212 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1213 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1214 			       blockdev_test_nvme_admin_passthru) == NULL
1215 	) {
1216 		CU_cleanup_registry();
1217 		rc = CU_get_error();
1218 		return -rc;
1219 	}
1220 	return 0;
1221 }
1222 
1223 static void
1224 __run_ut_thread(void *arg)
1225 {
1226 	struct spdk_jsonrpc_request *request = arg;
1227 	int rc = 0;
1228 	struct io_target *target;
1229 	unsigned num_failures;
1230 
1231 	if (CU_initialize_registry() != CUE_SUCCESS) {
1232 		/* CUnit error, probably won't recover */
1233 		rc = CU_get_error();
1234 		stop_init_thread(-rc, request);
1235 	}
1236 
1237 	target = g_io_targets;
1238 	while (target != NULL) {
1239 		rc = __setup_ut_on_single_target(target);
1240 		if (rc < 0) {
1241 			/* CUnit error, probably won't recover */
1242 			stop_init_thread(-rc, request);
1243 		}
1244 		target = target->next;
1245 	}
1246 	CU_basic_set_mode(CU_BRM_VERBOSE);
1247 	CU_basic_run_tests();
1248 	num_failures = CU_get_number_of_failures();
1249 	CU_cleanup_registry();
1250 
1251 	stop_init_thread(num_failures, request);
1252 }
1253 
1254 static void
1255 __construct_targets(void *arg)
1256 {
1257 	if (bdevio_construct_targets() < 0) {
1258 		spdk_app_stop(-1);
1259 		return;
1260 	}
1261 
1262 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1263 }
1264 
1265 static void
1266 test_main(void *arg1)
1267 {
1268 	struct spdk_cpuset tmpmask = {};
1269 	uint32_t i;
1270 
1271 	pthread_mutex_init(&g_test_mutex, NULL);
1272 	pthread_cond_init(&g_test_cond, NULL);
1273 
1274 	/* This test runs specifically on at least three cores.
1275 	 * g_thread_init is the app_thread on main core from event framework.
1276 	 * Next two are only for the tests and should always be on separate CPU cores. */
1277 	if (spdk_env_get_core_count() < 3) {
1278 		spdk_app_stop(-1);
1279 		return;
1280 	}
1281 
1282 	SPDK_ENV_FOREACH_CORE(i) {
1283 		if (i == spdk_env_get_current_core()) {
1284 			g_thread_init = spdk_get_thread();
1285 			continue;
1286 		}
1287 		spdk_cpuset_zero(&tmpmask);
1288 		spdk_cpuset_set_cpu(&tmpmask, i, true);
1289 		if (g_thread_ut == NULL) {
1290 			g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
1291 		} else if (g_thread_io == NULL) {
1292 			g_thread_io = spdk_thread_create("io_thread", &tmpmask);
1293 		}
1294 
1295 	}
1296 
1297 	if (g_wait_for_tests) {
1298 		/* Do not perform any tests until RPC is received */
1299 		return;
1300 	}
1301 
1302 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1303 }
1304 
1305 static void
1306 bdevio_usage(void)
1307 {
1308 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1309 }
1310 
1311 static int
1312 bdevio_parse_arg(int ch, char *arg)
1313 {
1314 	switch (ch) {
1315 	case 'w':
1316 		g_wait_for_tests =  true;
1317 		break;
1318 	default:
1319 		return -EINVAL;
1320 	}
1321 	return 0;
1322 }
1323 
1324 struct rpc_perform_tests {
1325 	char *name;
1326 };
1327 
1328 static void
1329 free_rpc_perform_tests(struct rpc_perform_tests *r)
1330 {
1331 	free(r->name);
1332 }
1333 
1334 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1335 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1336 };
1337 
1338 static void
1339 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1340 {
1341 	struct spdk_json_write_ctx *w;
1342 
1343 	if (num_failures == 0) {
1344 		w = spdk_jsonrpc_begin_result(request);
1345 		spdk_json_write_uint32(w, num_failures);
1346 		spdk_jsonrpc_end_result(request, w);
1347 	} else {
1348 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1349 						     "%d test cases failed", num_failures);
1350 	}
1351 }
1352 
1353 static void
1354 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1355 {
1356 	struct rpc_perform_tests req = {NULL};
1357 	struct spdk_bdev *bdev;
1358 	int rc;
1359 
1360 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1361 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1362 					      &req)) {
1363 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1364 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1365 		goto invalid;
1366 	}
1367 
1368 	if (req.name) {
1369 		bdev = spdk_bdev_get_by_name(req.name);
1370 		if (bdev == NULL) {
1371 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1372 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1373 							     "Bdev '%s' does not exist: %s",
1374 							     req.name, spdk_strerror(ENODEV));
1375 			goto invalid;
1376 		}
1377 		rc = bdevio_construct_target(bdev);
1378 		if (rc < 0) {
1379 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1380 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1381 							     "Could not construct target for bdev '%s': %s",
1382 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1383 			goto invalid;
1384 		}
1385 	} else {
1386 		rc = bdevio_construct_targets();
1387 		if (rc < 0) {
1388 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1389 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1390 							     "Could not construct targets for all bdevs: %s",
1391 							     spdk_strerror(-rc));
1392 			goto invalid;
1393 		}
1394 	}
1395 	free_rpc_perform_tests(&req);
1396 
1397 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1398 
1399 	return;
1400 
1401 invalid:
1402 	free_rpc_perform_tests(&req);
1403 }
1404 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1405 
1406 static void
1407 spdk_bdevio_shutdown_cb(void)
1408 {
1409 	g_shutdown = true;
1410 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL);
1411 }
1412 
1413 int
1414 main(int argc, char **argv)
1415 {
1416 	int			rc;
1417 	struct spdk_app_opts	opts = {};
1418 
1419 	spdk_app_opts_init(&opts, sizeof(opts));
1420 	opts.name = "bdevio";
1421 	opts.reactor_mask = "0x7";
1422 	opts.shutdown_cb = spdk_bdevio_shutdown_cb;
1423 
1424 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1425 				      bdevio_parse_arg, bdevio_usage)) !=
1426 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1427 		return rc;
1428 	}
1429 
1430 	rc = spdk_app_start(&opts, test_main, NULL);
1431 	spdk_app_fini();
1432 
1433 	return rc;
1434 }
1435