xref: /spdk/test/bdev/bdevio/bdevio.c (revision 6e8e184bca3b27096663dcc22f4b8b179a46a82b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/accel_engine.h"
38 #include "spdk/env.h"
39 #include "spdk/log.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
42 #include "spdk/rpc.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "CUnit/Basic.h"
47 
48 #define BUFFER_IOVS		1024
49 #define BUFFER_SIZE		260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE	2048
51 
52 pthread_mutex_t g_test_mutex;
53 pthread_cond_t g_test_cond;
54 
55 static struct spdk_thread *g_thread_init;
56 static struct spdk_thread *g_thread_ut;
57 static struct spdk_thread *g_thread_io;
58 static bool g_wait_for_tests = false;
59 static int g_num_failures = 0;
60 static bool g_shutdown = false;
61 
62 struct io_target {
63 	struct spdk_bdev	*bdev;
64 	struct spdk_bdev_desc	*bdev_desc;
65 	struct spdk_io_channel	*ch;
66 	struct io_target	*next;
67 };
68 
69 struct bdevio_request {
70 	char *buf;
71 	char *fused_buf;
72 	int data_len;
73 	uint64_t offset;
74 	struct iovec iov[BUFFER_IOVS];
75 	int iovcnt;
76 	struct iovec fused_iov[BUFFER_IOVS];
77 	int fused_iovcnt;
78 	struct io_target *target;
79 };
80 
81 struct io_target *g_io_targets = NULL;
82 struct io_target *g_current_io_target = NULL;
83 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
84 
85 static void
86 execute_spdk_function(spdk_msg_fn fn, void *arg)
87 {
88 	pthread_mutex_lock(&g_test_mutex);
89 	spdk_thread_send_msg(g_thread_io, fn, arg);
90 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
91 	pthread_mutex_unlock(&g_test_mutex);
92 }
93 
94 static void
95 wake_ut_thread(void)
96 {
97 	pthread_mutex_lock(&g_test_mutex);
98 	pthread_cond_signal(&g_test_cond);
99 	pthread_mutex_unlock(&g_test_mutex);
100 }
101 
102 static void
103 __get_io_channel(void *arg)
104 {
105 	struct io_target *target = arg;
106 
107 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
108 	assert(target->ch);
109 	wake_ut_thread();
110 }
111 
112 static void
113 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
114 				void *event_ctx)
115 {
116 }
117 
118 static int
119 bdevio_construct_target(struct spdk_bdev *bdev)
120 {
121 	struct io_target *target;
122 	int rc;
123 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
124 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
125 
126 	target = malloc(sizeof(struct io_target));
127 	if (target == NULL) {
128 		return -ENOMEM;
129 	}
130 
131 	rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL,
132 				&target->bdev_desc);
133 	if (rc != 0) {
134 		free(target);
135 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
136 		return rc;
137 	}
138 
139 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
140 	       spdk_bdev_get_name(bdev),
141 	       num_blocks, block_size,
142 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
143 
144 	target->bdev = bdev;
145 	target->next = g_io_targets;
146 	execute_spdk_function(__get_io_channel, target);
147 	g_io_targets = target;
148 
149 	return 0;
150 }
151 
152 static int
153 bdevio_construct_targets(void)
154 {
155 	struct spdk_bdev *bdev;
156 	int rc;
157 
158 	printf("I/O targets:\n");
159 
160 	bdev = spdk_bdev_first_leaf();
161 	while (bdev != NULL) {
162 		rc = bdevio_construct_target(bdev);
163 		if (rc < 0) {
164 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
165 			return rc;
166 		}
167 		bdev = spdk_bdev_next_leaf(bdev);
168 	}
169 
170 	if (g_io_targets == NULL) {
171 		SPDK_ERRLOG("No bdevs to perform tests on\n");
172 		return -1;
173 	}
174 
175 	return 0;
176 }
177 
178 static void
179 __put_io_channel(void *arg)
180 {
181 	struct io_target *target = arg;
182 
183 	spdk_put_io_channel(target->ch);
184 	wake_ut_thread();
185 }
186 
187 static void
188 bdevio_cleanup_targets(void)
189 {
190 	struct io_target *target;
191 
192 	target = g_io_targets;
193 	while (target != NULL) {
194 		execute_spdk_function(__put_io_channel, target);
195 		spdk_bdev_close(target->bdev_desc);
196 		g_io_targets = target->next;
197 		free(target);
198 		target = g_io_targets;
199 	}
200 }
201 
202 static bool g_completion_success;
203 
204 static void
205 initialize_buffer(char **buf, int pattern, int size)
206 {
207 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
208 	memset(*buf, pattern, size);
209 }
210 
211 static void
212 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
213 {
214 	g_completion_success = success;
215 	spdk_bdev_free_io(bdev_io);
216 	wake_ut_thread();
217 }
218 
219 static void
220 __blockdev_write(void *arg)
221 {
222 	struct bdevio_request *req = arg;
223 	struct io_target *target = req->target;
224 	int rc;
225 
226 	if (req->iovcnt) {
227 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
228 				      req->data_len, quick_test_complete, NULL);
229 	} else {
230 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
231 				     req->data_len, quick_test_complete, NULL);
232 	}
233 
234 	if (rc) {
235 		g_completion_success = false;
236 		wake_ut_thread();
237 	}
238 }
239 
240 static void
241 __blockdev_write_zeroes(void *arg)
242 {
243 	struct bdevio_request *req = arg;
244 	struct io_target *target = req->target;
245 	int rc;
246 
247 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
248 				    req->data_len, quick_test_complete, NULL);
249 	if (rc) {
250 		g_completion_success = false;
251 		wake_ut_thread();
252 	}
253 }
254 
255 static void
256 __blockdev_compare_and_write(void *arg)
257 {
258 	struct bdevio_request *req = arg;
259 	struct io_target *target = req->target;
260 	int rc;
261 
262 	rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
263 			req->fused_iov, req->fused_iovcnt, req->offset, req->data_len, quick_test_complete, NULL);
264 
265 	if (rc) {
266 		g_completion_success = false;
267 		wake_ut_thread();
268 	}
269 }
270 
271 static void
272 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
273 {
274 	int data_len = req->data_len;
275 	char *buf = req->buf;
276 
277 	req->iovcnt = 0;
278 	if (!iov_len) {
279 		return;
280 	}
281 
282 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
283 		if (data_len < iov_len) {
284 			iov_len = data_len;
285 		}
286 
287 		req->iov[req->iovcnt].iov_base = buf;
288 		req->iov[req->iovcnt].iov_len = iov_len;
289 
290 		buf += iov_len;
291 		data_len -= iov_len;
292 	}
293 
294 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
295 }
296 
297 static void
298 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
299 {
300 	int data_len = req->data_len;
301 	char *buf = req->fused_buf;
302 
303 	req->fused_iovcnt = 0;
304 	if (!iov_len) {
305 		return;
306 	}
307 
308 	for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
309 		if (data_len < iov_len) {
310 			iov_len = data_len;
311 		}
312 
313 		req->fused_iov[req->fused_iovcnt].iov_base = buf;
314 		req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
315 
316 		buf += iov_len;
317 		data_len -= iov_len;
318 	}
319 
320 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
321 }
322 
323 static void
324 blockdev_write(struct io_target *target, char *tx_buf,
325 	       uint64_t offset, int data_len, int iov_len)
326 {
327 	struct bdevio_request req;
328 
329 	req.target = target;
330 	req.buf = tx_buf;
331 	req.data_len = data_len;
332 	req.offset = offset;
333 	sgl_chop_buffer(&req, iov_len);
334 
335 	g_completion_success = false;
336 
337 	execute_spdk_function(__blockdev_write, &req);
338 }
339 
340 static void
341 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
342 			    uint64_t offset, int data_len, int iov_len)
343 {
344 	struct bdevio_request req;
345 
346 	req.target = target;
347 	req.buf = cmp_buf;
348 	req.fused_buf = write_buf;
349 	req.data_len = data_len;
350 	req.offset = offset;
351 	sgl_chop_buffer(&req, iov_len);
352 	sgl_chop_fused_buffer(&req, iov_len);
353 
354 	g_completion_success = false;
355 
356 	execute_spdk_function(__blockdev_compare_and_write, &req);
357 }
358 
359 static void
360 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
361 		      uint64_t offset, int data_len)
362 {
363 	struct bdevio_request req;
364 
365 	req.target = target;
366 	req.buf = tx_buf;
367 	req.data_len = data_len;
368 	req.offset = offset;
369 
370 	g_completion_success = false;
371 
372 	execute_spdk_function(__blockdev_write_zeroes, &req);
373 }
374 
375 static void
376 __blockdev_read(void *arg)
377 {
378 	struct bdevio_request *req = arg;
379 	struct io_target *target = req->target;
380 	int rc;
381 
382 	if (req->iovcnt) {
383 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
384 				     req->data_len, quick_test_complete, NULL);
385 	} else {
386 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
387 				    req->data_len, quick_test_complete, NULL);
388 	}
389 
390 	if (rc) {
391 		g_completion_success = false;
392 		wake_ut_thread();
393 	}
394 }
395 
396 static void
397 blockdev_read(struct io_target *target, char *rx_buf,
398 	      uint64_t offset, int data_len, int iov_len)
399 {
400 	struct bdevio_request req;
401 
402 	req.target = target;
403 	req.buf = rx_buf;
404 	req.data_len = data_len;
405 	req.offset = offset;
406 	req.iovcnt = 0;
407 	sgl_chop_buffer(&req, iov_len);
408 
409 	g_completion_success = false;
410 
411 	execute_spdk_function(__blockdev_read, &req);
412 }
413 
414 static int
415 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
416 {
417 	int rc;
418 	rc = memcmp(rx_buf, tx_buf, data_length);
419 
420 	spdk_free(rx_buf);
421 	spdk_free(tx_buf);
422 
423 	return rc;
424 }
425 
426 static bool
427 blockdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t data_length)
428 {
429 	if (data_length < spdk_bdev_get_block_size(bdev) ||
430 	    data_length % spdk_bdev_get_block_size(bdev) ||
431 	    data_length / spdk_bdev_get_block_size(bdev) > spdk_bdev_get_num_blocks(bdev)) {
432 		return false;
433 	}
434 
435 	return true;
436 }
437 
438 static void
439 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
440 		    int expected_rc, bool write_zeroes)
441 {
442 	struct io_target *target;
443 	char	*tx_buf = NULL;
444 	char	*rx_buf = NULL;
445 	int	rc;
446 
447 	target = g_current_io_target;
448 
449 	if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
450 		return;
451 	}
452 
453 	if (!write_zeroes) {
454 		initialize_buffer(&tx_buf, pattern, data_length);
455 		initialize_buffer(&rx_buf, 0, data_length);
456 
457 		blockdev_write(target, tx_buf, offset, data_length, iov_len);
458 	} else {
459 		initialize_buffer(&tx_buf, 0, data_length);
460 		initialize_buffer(&rx_buf, pattern, data_length);
461 
462 		blockdev_write_zeroes(target, tx_buf, offset, data_length);
463 	}
464 
465 
466 	if (expected_rc == 0) {
467 		CU_ASSERT_EQUAL(g_completion_success, true);
468 	} else {
469 		CU_ASSERT_EQUAL(g_completion_success, false);
470 	}
471 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
472 
473 	if (expected_rc == 0) {
474 		CU_ASSERT_EQUAL(g_completion_success, true);
475 	} else {
476 		CU_ASSERT_EQUAL(g_completion_success, false);
477 	}
478 
479 	if (g_completion_success) {
480 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
481 		/* Assert the write by comparing it with values read
482 		 * from each blockdev */
483 		CU_ASSERT_EQUAL(rc, 0);
484 	}
485 }
486 
487 static void
488 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
489 {
490 	struct io_target *target;
491 	char	*tx_buf = NULL;
492 	char	*write_buf = NULL;
493 	char	*rx_buf = NULL;
494 	int	rc;
495 
496 	target = g_current_io_target;
497 
498 	if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
499 		return;
500 	}
501 
502 	initialize_buffer(&tx_buf, 0xAA, data_length);
503 	initialize_buffer(&rx_buf, 0, data_length);
504 	initialize_buffer(&write_buf, 0xBB, data_length);
505 
506 	blockdev_write(target, tx_buf, offset, data_length, iov_len);
507 	CU_ASSERT_EQUAL(g_completion_success, true);
508 
509 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
510 	CU_ASSERT_EQUAL(g_completion_success, true);
511 
512 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
513 	CU_ASSERT_EQUAL(g_completion_success, false);
514 
515 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
516 	CU_ASSERT_EQUAL(g_completion_success, true);
517 	rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
518 	/* Assert the write by comparing it with values read
519 	 * from each blockdev */
520 	CU_ASSERT_EQUAL(rc, 0);
521 }
522 
523 static void
524 blockdev_write_read_4k(void)
525 {
526 	uint32_t data_length;
527 	uint64_t offset;
528 	int pattern;
529 	int expected_rc;
530 
531 	/* Data size = 4K */
532 	data_length = 4096;
533 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
534 	offset = 0;
535 	pattern = 0xA3;
536 	/* Params are valid, hence the expected return value
537 	 * of write and read for all blockdevs is 0. */
538 	expected_rc = 0;
539 
540 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
541 }
542 
543 static void
544 blockdev_write_zeroes_read_4k(void)
545 {
546 	uint32_t data_length;
547 	uint64_t offset;
548 	int pattern;
549 	int expected_rc;
550 
551 	/* Data size = 4K */
552 	data_length = 4096;
553 	offset = 0;
554 	pattern = 0xA3;
555 	/* Params are valid, hence the expected return value
556 	 * of write_zeroes and read for all blockdevs is 0. */
557 	expected_rc = 0;
558 
559 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
560 }
561 
562 /*
563  * This i/o will not have to split at the bdev layer.
564  */
565 static void
566 blockdev_write_zeroes_read_1m(void)
567 {
568 	uint32_t data_length;
569 	uint64_t offset;
570 	int pattern;
571 	int expected_rc;
572 
573 	/* Data size = 1M */
574 	data_length = 1048576;
575 	offset = 0;
576 	pattern = 0xA3;
577 	/* Params are valid, hence the expected return value
578 	 * of write_zeroes and read for all blockdevs is 0. */
579 	expected_rc = 0;
580 
581 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
582 }
583 
584 /*
585  * This i/o will have to split at the bdev layer if
586  * write-zeroes is not supported by the bdev.
587  */
588 static void
589 blockdev_write_zeroes_read_3m(void)
590 {
591 	uint32_t data_length;
592 	uint64_t offset;
593 	int pattern;
594 	int expected_rc;
595 
596 	/* Data size = 3M */
597 	data_length = 3145728;
598 	offset = 0;
599 	pattern = 0xA3;
600 	/* Params are valid, hence the expected return value
601 	 * of write_zeroes and read for all blockdevs is 0. */
602 	expected_rc = 0;
603 
604 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
605 }
606 
607 /*
608  * This i/o will have to split at the bdev layer if
609  * write-zeroes is not supported by the bdev. It also
610  * tests a write size that is not an even multiple of
611  * the bdev layer zero buffer size.
612  */
613 static void
614 blockdev_write_zeroes_read_3m_500k(void)
615 {
616 	uint32_t data_length;
617 	uint64_t offset;
618 	int pattern;
619 	int expected_rc;
620 
621 	/* Data size = 3.5M */
622 	data_length = 3670016;
623 	offset = 0;
624 	pattern = 0xA3;
625 	/* Params are valid, hence the expected return value
626 	 * of write_zeroes and read for all blockdevs is 0. */
627 	expected_rc = 0;
628 
629 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
630 }
631 
632 static void
633 blockdev_writev_readv_4k(void)
634 {
635 	uint32_t data_length, iov_len;
636 	uint64_t offset;
637 	int pattern;
638 	int expected_rc;
639 
640 	/* Data size = 4K */
641 	data_length = 4096;
642 	iov_len = 4096;
643 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
644 	offset = 0;
645 	pattern = 0xA3;
646 	/* Params are valid, hence the expected return value
647 	 * of write and read for all blockdevs is 0. */
648 	expected_rc = 0;
649 
650 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
651 }
652 
653 static void
654 blockdev_comparev_and_writev(void)
655 {
656 	uint32_t data_length, iov_len;
657 	uint64_t offset;
658 
659 	data_length = 1;
660 	iov_len = 1;
661 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
662 	offset = 0;
663 
664 	blockdev_compare_and_write(data_length, iov_len, offset);
665 }
666 
667 static void
668 blockdev_writev_readv_30x4k(void)
669 {
670 	uint32_t data_length, iov_len;
671 	uint64_t offset;
672 	int pattern;
673 	int expected_rc;
674 
675 	/* Data size = 4K */
676 	data_length = 4096 * 30;
677 	iov_len = 4096;
678 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
679 	offset = 0;
680 	pattern = 0xA3;
681 	/* Params are valid, hence the expected return value
682 	 * of write and read for all blockdevs is 0. */
683 	expected_rc = 0;
684 
685 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
686 }
687 
688 static void
689 blockdev_write_read_512Bytes(void)
690 {
691 	uint32_t data_length;
692 	uint64_t offset;
693 	int pattern;
694 	int expected_rc;
695 
696 	/* Data size = 512 */
697 	data_length = 512;
698 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
699 	offset = 8192;
700 	pattern = 0xA3;
701 	/* Params are valid, hence the expected return value
702 	 * of write and read for all blockdevs is 0. */
703 	expected_rc = 0;
704 
705 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
706 }
707 
708 static void
709 blockdev_writev_readv_512Bytes(void)
710 {
711 	uint32_t data_length, iov_len;
712 	uint64_t offset;
713 	int pattern;
714 	int expected_rc;
715 
716 	/* Data size = 512 */
717 	data_length = 512;
718 	iov_len = 512;
719 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
720 	offset = 8192;
721 	pattern = 0xA3;
722 	/* Params are valid, hence the expected return value
723 	 * of write and read for all blockdevs is 0. */
724 	expected_rc = 0;
725 
726 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
727 }
728 
729 static void
730 blockdev_write_read_size_gt_128k(void)
731 {
732 	uint32_t data_length;
733 	uint64_t offset;
734 	int pattern;
735 	int expected_rc;
736 
737 	/* Data size = 132K */
738 	data_length = 135168;
739 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
740 	offset = 8192;
741 	pattern = 0xA3;
742 	/* Params are valid, hence the expected return value
743 	 * of write and read for all blockdevs is 0. */
744 	expected_rc = 0;
745 
746 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
747 }
748 
749 static void
750 blockdev_writev_readv_size_gt_128k(void)
751 {
752 	uint32_t data_length, iov_len;
753 	uint64_t offset;
754 	int pattern;
755 	int expected_rc;
756 
757 	/* Data size = 132K */
758 	data_length = 135168;
759 	iov_len = 135168;
760 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
761 	offset = 8192;
762 	pattern = 0xA3;
763 	/* Params are valid, hence the expected return value
764 	 * of write and read for all blockdevs is 0. */
765 	expected_rc = 0;
766 
767 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
768 }
769 
770 static void
771 blockdev_writev_readv_size_gt_128k_two_iov(void)
772 {
773 	uint32_t data_length, iov_len;
774 	uint64_t offset;
775 	int pattern;
776 	int expected_rc;
777 
778 	/* Data size = 132K */
779 	data_length = 135168;
780 	iov_len = 128 * 1024;
781 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
782 	offset = 8192;
783 	pattern = 0xA3;
784 	/* Params are valid, hence the expected return value
785 	 * of write and read for all blockdevs is 0. */
786 	expected_rc = 0;
787 
788 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
789 }
790 
791 static void
792 blockdev_write_read_invalid_size(void)
793 {
794 	uint32_t data_length;
795 	uint64_t offset;
796 	int pattern;
797 	int expected_rc;
798 
799 	/* Data size is not a multiple of the block size */
800 	data_length = 0x1015;
801 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
802 	offset = 8192;
803 	pattern = 0xA3;
804 	/* Params are invalid, hence the expected return value
805 	 * of write and read for all blockdevs is < 0 */
806 	expected_rc = -1;
807 
808 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
809 }
810 
811 static void
812 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
813 {
814 	struct io_target *target;
815 	struct spdk_bdev *bdev;
816 	char	*tx_buf = NULL;
817 	char	*rx_buf = NULL;
818 	uint64_t offset;
819 	uint32_t block_size;
820 	int rc;
821 
822 	target = g_current_io_target;
823 	bdev = target->bdev;
824 
825 	block_size = spdk_bdev_get_block_size(bdev);
826 
827 	/* The start offset has been set to a marginal value
828 	 * such that offset + nbytes == Total size of
829 	 * blockdev. */
830 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
831 
832 	initialize_buffer(&tx_buf, 0xA3, block_size);
833 	initialize_buffer(&rx_buf, 0, block_size);
834 
835 	blockdev_write(target, tx_buf, offset, block_size, 0);
836 	CU_ASSERT_EQUAL(g_completion_success, true);
837 
838 	blockdev_read(target, rx_buf, offset, block_size, 0);
839 	CU_ASSERT_EQUAL(g_completion_success, true);
840 
841 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
842 	/* Assert the write by comparing it with values read
843 	 * from each blockdev */
844 	CU_ASSERT_EQUAL(rc, 0);
845 }
846 
847 static void
848 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
849 {
850 	struct io_target *target;
851 	struct spdk_bdev *bdev;
852 	char	*tx_buf = NULL;
853 	char	*rx_buf = NULL;
854 	int	data_length;
855 	uint64_t offset;
856 	int pattern;
857 
858 	/* Tests the overflow condition of the blockdevs. */
859 	data_length = 4096;
860 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
861 	pattern = 0xA3;
862 
863 	target = g_current_io_target;
864 	bdev = target->bdev;
865 
866 	/* The start offset has been set to a valid value
867 	 * but offset + nbytes is greater than the Total size
868 	 * of the blockdev. The test should fail. */
869 	offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
870 
871 	initialize_buffer(&tx_buf, pattern, data_length);
872 	initialize_buffer(&rx_buf, 0, data_length);
873 
874 	blockdev_write(target, tx_buf, offset, data_length, 0);
875 	CU_ASSERT_EQUAL(g_completion_success, false);
876 
877 	blockdev_read(target, rx_buf, offset, data_length, 0);
878 	CU_ASSERT_EQUAL(g_completion_success, false);
879 }
880 
881 static void
882 blockdev_write_read_max_offset(void)
883 {
884 	int	data_length;
885 	uint64_t offset;
886 	int pattern;
887 	int expected_rc;
888 
889 	data_length = 4096;
890 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
891 	/* The start offset has been set to UINT64_MAX such that
892 	 * adding nbytes wraps around and points to an invalid address. */
893 	offset = UINT64_MAX;
894 	pattern = 0xA3;
895 	/* Params are invalid, hence the expected return value
896 	 * of write and read for all blockdevs is < 0 */
897 	expected_rc = -1;
898 
899 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
900 }
901 
902 static void
903 blockdev_overlapped_write_read_8k(void)
904 {
905 	int	data_length;
906 	uint64_t offset;
907 	int pattern;
908 	int expected_rc;
909 
910 	/* Data size = 8K */
911 	data_length = 8192;
912 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
913 	offset = 0;
914 	pattern = 0xA3;
915 	/* Params are valid, hence the expected return value
916 	 * of write and read for all blockdevs is 0. */
917 	expected_rc = 0;
918 	/* Assert the write by comparing it with values read
919 	 * from the same offset for each blockdev */
920 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
921 
922 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
923 	 * with the address written above and assert the new value in
924 	 * the overlapped address range */
925 	/* Populate 8k with value 0xBB */
926 	pattern = 0xBB;
927 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
928 	offset = 4096;
929 	/* Assert the write by comparing it with values read
930 	 * from the overlapped offset for each blockdev */
931 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
932 }
933 
934 static void
935 __blockdev_reset(void *arg)
936 {
937 	struct bdevio_request *req = arg;
938 	struct io_target *target = req->target;
939 	int rc;
940 
941 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
942 	if (rc < 0) {
943 		g_completion_success = false;
944 		wake_ut_thread();
945 	}
946 }
947 
948 static void
949 blockdev_test_reset(void)
950 {
951 	struct bdevio_request req;
952 	struct io_target *target;
953 
954 	target = g_current_io_target;
955 	req.target = target;
956 
957 	g_completion_success = false;
958 
959 	execute_spdk_function(__blockdev_reset, &req);
960 
961 	CU_ASSERT_EQUAL(g_completion_success, true);
962 }
963 
964 struct bdevio_passthrough_request {
965 	struct spdk_nvme_cmd cmd;
966 	void *buf;
967 	uint32_t len;
968 	struct io_target *target;
969 	int sct;
970 	int sc;
971 	uint32_t cdw0;
972 };
973 
974 static void
975 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
976 {
977 	struct bdevio_passthrough_request *pt_req = arg;
978 
979 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
980 	spdk_bdev_free_io(bdev_io);
981 	wake_ut_thread();
982 }
983 
984 static void
985 __blockdev_nvme_passthru(void *arg)
986 {
987 	struct bdevio_passthrough_request *pt_req = arg;
988 	struct io_target *target = pt_req->target;
989 	int rc;
990 
991 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
992 					&pt_req->cmd, pt_req->buf, pt_req->len,
993 					nvme_pt_test_complete, pt_req);
994 	if (rc) {
995 		wake_ut_thread();
996 	}
997 }
998 
999 static void
1000 blockdev_test_nvme_passthru_rw(void)
1001 {
1002 	struct bdevio_passthrough_request pt_req;
1003 	void *write_buf, *read_buf;
1004 	struct io_target *target;
1005 
1006 	target = g_current_io_target;
1007 
1008 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1009 		return;
1010 	}
1011 
1012 	memset(&pt_req, 0, sizeof(pt_req));
1013 	pt_req.target = target;
1014 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
1015 	pt_req.cmd.nsid = 1;
1016 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
1017 	pt_req.cmd.cdw12 = 0;
1018 
1019 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
1020 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1021 	memset(write_buf, 0xA5, pt_req.len);
1022 	pt_req.buf = write_buf;
1023 
1024 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1025 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1026 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1027 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1028 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1029 
1030 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
1031 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1032 	pt_req.buf = read_buf;
1033 
1034 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1035 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1036 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1037 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1038 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1039 
1040 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
1041 	spdk_free(read_buf);
1042 	spdk_free(write_buf);
1043 }
1044 
1045 static void
1046 blockdev_test_nvme_passthru_vendor_specific(void)
1047 {
1048 	struct bdevio_passthrough_request pt_req;
1049 	struct io_target *target;
1050 
1051 	target = g_current_io_target;
1052 
1053 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1054 		return;
1055 	}
1056 
1057 	memset(&pt_req, 0, sizeof(pt_req));
1058 	pt_req.target = target;
1059 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
1060 	pt_req.cmd.nsid = 1;
1061 
1062 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1063 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1064 	pt_req.cdw0 = 0xbeef;
1065 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1066 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1067 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
1068 	CU_ASSERT(pt_req.cdw0 == 0x0);
1069 }
1070 
1071 static void
1072 __blockdev_nvme_admin_passthru(void *arg)
1073 {
1074 	struct bdevio_passthrough_request *pt_req = arg;
1075 	struct io_target *target = pt_req->target;
1076 	int rc;
1077 
1078 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
1079 					   &pt_req->cmd, pt_req->buf, pt_req->len,
1080 					   nvme_pt_test_complete, pt_req);
1081 	if (rc) {
1082 		wake_ut_thread();
1083 	}
1084 }
1085 
1086 static void
1087 blockdev_test_nvme_admin_passthru(void)
1088 {
1089 	struct io_target *target;
1090 	struct bdevio_passthrough_request pt_req;
1091 
1092 	target = g_current_io_target;
1093 
1094 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
1095 		return;
1096 	}
1097 
1098 	memset(&pt_req, 0, sizeof(pt_req));
1099 	pt_req.target = target;
1100 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1101 	pt_req.cmd.nsid = 0;
1102 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
1103 
1104 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
1105 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1106 
1107 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
1108 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1109 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
1110 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1111 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1112 }
1113 
1114 static void
1115 __stop_init_thread(void *arg)
1116 {
1117 	unsigned num_failures = g_num_failures;
1118 	struct spdk_jsonrpc_request *request = arg;
1119 
1120 	g_num_failures = 0;
1121 
1122 	bdevio_cleanup_targets();
1123 	if (g_wait_for_tests && !g_shutdown) {
1124 		/* Do not stop the app yet, wait for another RPC */
1125 		rpc_perform_tests_cb(num_failures, request);
1126 		return;
1127 	}
1128 	spdk_app_stop(num_failures);
1129 }
1130 
1131 static void
1132 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1133 {
1134 	g_num_failures = num_failures;
1135 
1136 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1137 }
1138 
1139 static int
1140 suite_init(void)
1141 {
1142 	if (g_current_io_target == NULL) {
1143 		g_current_io_target = g_io_targets;
1144 	}
1145 	return 0;
1146 }
1147 
1148 static int
1149 suite_fini(void)
1150 {
1151 	g_current_io_target = g_current_io_target->next;
1152 	return 0;
1153 }
1154 
1155 #define SUITE_NAME_MAX 64
1156 
1157 static int
1158 __setup_ut_on_single_target(struct io_target *target)
1159 {
1160 	unsigned rc = 0;
1161 	CU_pSuite suite = NULL;
1162 	char name[SUITE_NAME_MAX];
1163 
1164 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1165 	suite = CU_add_suite(name, suite_init, suite_fini);
1166 	if (suite == NULL) {
1167 		CU_cleanup_registry();
1168 		rc = CU_get_error();
1169 		return -rc;
1170 	}
1171 
1172 	if (
1173 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
1174 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
1175 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
1176 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
1177 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
1178 		|| CU_add_test(suite, "blockdev reset",
1179 			       blockdev_test_reset) == NULL
1180 		|| CU_add_test(suite, "blockdev write read 512 bytes",
1181 			       blockdev_write_read_512Bytes) == NULL
1182 		|| CU_add_test(suite, "blockdev write read size > 128k",
1183 			       blockdev_write_read_size_gt_128k) == NULL
1184 		|| CU_add_test(suite, "blockdev write read invalid size",
1185 			       blockdev_write_read_invalid_size) == NULL
1186 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1187 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1188 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1189 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1190 		|| CU_add_test(suite, "blockdev write read max offset",
1191 			       blockdev_write_read_max_offset) == NULL
1192 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
1193 			       blockdev_overlapped_write_read_8k) == NULL
1194 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
1195 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
1196 			       blockdev_writev_readv_30x4k) == NULL
1197 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
1198 			       blockdev_writev_readv_512Bytes) == NULL
1199 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1200 			       blockdev_writev_readv_size_gt_128k) == NULL
1201 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1202 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1203 		|| CU_add_test(suite, "blockdev comparev and writev", blockdev_comparev_and_writev) == NULL
1204 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1205 			       blockdev_test_nvme_passthru_rw) == NULL
1206 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1207 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1208 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1209 			       blockdev_test_nvme_admin_passthru) == NULL
1210 	) {
1211 		CU_cleanup_registry();
1212 		rc = CU_get_error();
1213 		return -rc;
1214 	}
1215 	return 0;
1216 }
1217 
1218 static void
1219 __run_ut_thread(void *arg)
1220 {
1221 	struct spdk_jsonrpc_request *request = arg;
1222 	int rc = 0;
1223 	struct io_target *target;
1224 	unsigned num_failures;
1225 
1226 	if (CU_initialize_registry() != CUE_SUCCESS) {
1227 		/* CUnit error, probably won't recover */
1228 		rc = CU_get_error();
1229 		stop_init_thread(-rc, request);
1230 	}
1231 
1232 	target = g_io_targets;
1233 	while (target != NULL) {
1234 		rc = __setup_ut_on_single_target(target);
1235 		if (rc < 0) {
1236 			/* CUnit error, probably won't recover */
1237 			stop_init_thread(-rc, request);
1238 		}
1239 		target = target->next;
1240 	}
1241 	CU_basic_set_mode(CU_BRM_VERBOSE);
1242 	CU_basic_run_tests();
1243 	num_failures = CU_get_number_of_failures();
1244 	CU_cleanup_registry();
1245 
1246 	stop_init_thread(num_failures, request);
1247 }
1248 
1249 static void
1250 __construct_targets(void *arg)
1251 {
1252 	if (bdevio_construct_targets() < 0) {
1253 		spdk_app_stop(-1);
1254 		return;
1255 	}
1256 
1257 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1258 }
1259 
1260 static void
1261 test_main(void *arg1)
1262 {
1263 	struct spdk_cpuset tmpmask = {};
1264 	uint32_t i;
1265 
1266 	pthread_mutex_init(&g_test_mutex, NULL);
1267 	pthread_cond_init(&g_test_cond, NULL);
1268 
1269 	/* This test runs specifically on at least three cores.
1270 	 * g_thread_init is the app_thread on main core from event framework.
1271 	 * Next two are only for the tests and should always be on separate CPU cores. */
1272 	if (spdk_env_get_core_count() < 3) {
1273 		spdk_app_stop(-1);
1274 		return;
1275 	}
1276 
1277 	SPDK_ENV_FOREACH_CORE(i) {
1278 		if (i == spdk_env_get_current_core()) {
1279 			g_thread_init = spdk_get_thread();
1280 			continue;
1281 		}
1282 		spdk_cpuset_zero(&tmpmask);
1283 		spdk_cpuset_set_cpu(&tmpmask, i, true);
1284 		if (g_thread_ut == NULL) {
1285 			g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
1286 		} else if (g_thread_io == NULL) {
1287 			g_thread_io = spdk_thread_create("io_thread", &tmpmask);
1288 		}
1289 
1290 	}
1291 
1292 	if (g_wait_for_tests) {
1293 		/* Do not perform any tests until RPC is received */
1294 		return;
1295 	}
1296 
1297 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1298 }
1299 
1300 static void
1301 bdevio_usage(void)
1302 {
1303 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1304 }
1305 
1306 static int
1307 bdevio_parse_arg(int ch, char *arg)
1308 {
1309 	switch (ch) {
1310 	case 'w':
1311 		g_wait_for_tests =  true;
1312 		break;
1313 	default:
1314 		return -EINVAL;
1315 	}
1316 	return 0;
1317 }
1318 
1319 struct rpc_perform_tests {
1320 	char *name;
1321 };
1322 
1323 static void
1324 free_rpc_perform_tests(struct rpc_perform_tests *r)
1325 {
1326 	free(r->name);
1327 }
1328 
1329 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1330 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1331 };
1332 
1333 static void
1334 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1335 {
1336 	struct spdk_json_write_ctx *w;
1337 
1338 	if (num_failures == 0) {
1339 		w = spdk_jsonrpc_begin_result(request);
1340 		spdk_json_write_uint32(w, num_failures);
1341 		spdk_jsonrpc_end_result(request, w);
1342 	} else {
1343 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1344 						     "%d test cases failed", num_failures);
1345 	}
1346 }
1347 
1348 static void
1349 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1350 {
1351 	struct rpc_perform_tests req = {NULL};
1352 	struct spdk_bdev *bdev;
1353 	int rc;
1354 
1355 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1356 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1357 					      &req)) {
1358 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1359 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1360 		goto invalid;
1361 	}
1362 
1363 	if (req.name) {
1364 		bdev = spdk_bdev_get_by_name(req.name);
1365 		if (bdev == NULL) {
1366 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1367 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1368 							     "Bdev '%s' does not exist: %s",
1369 							     req.name, spdk_strerror(ENODEV));
1370 			goto invalid;
1371 		}
1372 		rc = bdevio_construct_target(bdev);
1373 		if (rc < 0) {
1374 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1375 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1376 							     "Could not construct target for bdev '%s': %s",
1377 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1378 			goto invalid;
1379 		}
1380 	} else {
1381 		rc = bdevio_construct_targets();
1382 		if (rc < 0) {
1383 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1384 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1385 							     "Could not construct targets for all bdevs: %s",
1386 							     spdk_strerror(-rc));
1387 			goto invalid;
1388 		}
1389 	}
1390 	free_rpc_perform_tests(&req);
1391 
1392 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1393 
1394 	return;
1395 
1396 invalid:
1397 	free_rpc_perform_tests(&req);
1398 }
1399 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1400 
1401 static void
1402 spdk_bdevio_shutdown_cb(void)
1403 {
1404 	g_shutdown = true;
1405 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL);
1406 }
1407 
1408 int
1409 main(int argc, char **argv)
1410 {
1411 	int			rc;
1412 	struct spdk_app_opts	opts = {};
1413 
1414 	spdk_app_opts_init(&opts, sizeof(opts));
1415 	opts.name = "bdevio";
1416 	opts.reactor_mask = "0x7";
1417 	opts.shutdown_cb = spdk_bdevio_shutdown_cb;
1418 
1419 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1420 				      bdevio_parse_arg, bdevio_usage)) !=
1421 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1422 		return rc;
1423 	}
1424 
1425 	rc = spdk_app_start(&opts, test_main, NULL);
1426 	spdk_app_fini();
1427 
1428 	return rc;
1429 }
1430