xref: /spdk/test/bdev/bdevio/bdevio.c (revision 13ca6e52d3c7d413a244c67cfbb419e55f3293da)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk/bdev.h"
38 #include "spdk/accel_engine.h"
39 #include "spdk/env.h"
40 #include "spdk/log.h"
41 #include "spdk/thread.h"
42 #include "spdk/event.h"
43 #include "spdk/rpc.h"
44 #include "spdk/util.h"
45 #include "spdk/string.h"
46 
47 #include "CUnit/Basic.h"
48 
49 #define BUFFER_IOVS		1024
50 #define BUFFER_SIZE		260 * 1024
51 #define BDEV_TASK_ARRAY_SIZE	2048
52 
53 pthread_mutex_t g_test_mutex;
54 pthread_cond_t g_test_cond;
55 
56 static struct spdk_thread *g_thread_init;
57 static struct spdk_thread *g_thread_ut;
58 static struct spdk_thread *g_thread_io;
59 static bool g_wait_for_tests = false;
60 static int g_num_failures = 0;
61 static bool g_shutdown = false;
62 
63 struct io_target {
64 	struct spdk_bdev	*bdev;
65 	struct spdk_bdev_desc	*bdev_desc;
66 	struct spdk_io_channel	*ch;
67 	struct io_target	*next;
68 };
69 
70 struct bdevio_request {
71 	char *buf;
72 	char *fused_buf;
73 	int data_len;
74 	uint64_t offset;
75 	struct iovec iov[BUFFER_IOVS];
76 	int iovcnt;
77 	struct iovec fused_iov[BUFFER_IOVS];
78 	int fused_iovcnt;
79 	struct io_target *target;
80 };
81 
82 struct io_target *g_io_targets = NULL;
83 struct io_target *g_current_io_target = NULL;
84 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
85 
86 static void
87 execute_spdk_function(spdk_msg_fn fn, void *arg)
88 {
89 	pthread_mutex_lock(&g_test_mutex);
90 	spdk_thread_send_msg(g_thread_io, fn, arg);
91 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
92 	pthread_mutex_unlock(&g_test_mutex);
93 }
94 
95 static void
96 wake_ut_thread(void)
97 {
98 	pthread_mutex_lock(&g_test_mutex);
99 	pthread_cond_signal(&g_test_cond);
100 	pthread_mutex_unlock(&g_test_mutex);
101 }
102 
103 static void
104 __get_io_channel(void *arg)
105 {
106 	struct io_target *target = arg;
107 
108 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
109 	assert(target->ch);
110 	wake_ut_thread();
111 }
112 
113 static void
114 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
115 				void *event_ctx)
116 {
117 }
118 
119 static int
120 bdevio_construct_target(struct spdk_bdev *bdev)
121 {
122 	struct io_target *target;
123 	int rc;
124 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
125 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
126 
127 	target = malloc(sizeof(struct io_target));
128 	if (target == NULL) {
129 		return -ENOMEM;
130 	}
131 
132 	rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL,
133 				&target->bdev_desc);
134 	if (rc != 0) {
135 		free(target);
136 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
137 		return rc;
138 	}
139 
140 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
141 	       spdk_bdev_get_name(bdev),
142 	       num_blocks, block_size,
143 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
144 
145 	target->bdev = bdev;
146 	target->next = g_io_targets;
147 	execute_spdk_function(__get_io_channel, target);
148 	g_io_targets = target;
149 
150 	return 0;
151 }
152 
153 static int
154 bdevio_construct_targets(void)
155 {
156 	struct spdk_bdev *bdev;
157 	int rc;
158 
159 	printf("I/O targets:\n");
160 
161 	bdev = spdk_bdev_first_leaf();
162 	while (bdev != NULL) {
163 		rc = bdevio_construct_target(bdev);
164 		if (rc < 0) {
165 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
166 			return rc;
167 		}
168 		bdev = spdk_bdev_next_leaf(bdev);
169 	}
170 
171 	if (g_io_targets == NULL) {
172 		SPDK_ERRLOG("No bdevs to perform tests on\n");
173 		return -1;
174 	}
175 
176 	return 0;
177 }
178 
179 static void
180 __put_io_channel(void *arg)
181 {
182 	struct io_target *target = arg;
183 
184 	spdk_put_io_channel(target->ch);
185 	wake_ut_thread();
186 }
187 
188 static void
189 bdevio_cleanup_targets(void)
190 {
191 	struct io_target *target;
192 
193 	target = g_io_targets;
194 	while (target != NULL) {
195 		execute_spdk_function(__put_io_channel, target);
196 		spdk_bdev_close(target->bdev_desc);
197 		g_io_targets = target->next;
198 		free(target);
199 		target = g_io_targets;
200 	}
201 }
202 
203 static bool g_completion_success;
204 
205 static void
206 initialize_buffer(char **buf, int pattern, int size)
207 {
208 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
209 	memset(*buf, pattern, size);
210 }
211 
212 static void
213 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
214 {
215 	g_completion_success = success;
216 	spdk_bdev_free_io(bdev_io);
217 	wake_ut_thread();
218 }
219 
220 static void
221 __blockdev_write(void *arg)
222 {
223 	struct bdevio_request *req = arg;
224 	struct io_target *target = req->target;
225 	int rc;
226 
227 	if (req->iovcnt) {
228 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
229 				      req->data_len, quick_test_complete, NULL);
230 	} else {
231 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
232 				     req->data_len, quick_test_complete, NULL);
233 	}
234 
235 	if (rc) {
236 		g_completion_success = false;
237 		wake_ut_thread();
238 	}
239 }
240 
241 static void
242 __blockdev_write_zeroes(void *arg)
243 {
244 	struct bdevio_request *req = arg;
245 	struct io_target *target = req->target;
246 	int rc;
247 
248 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
249 				    req->data_len, quick_test_complete, NULL);
250 	if (rc) {
251 		g_completion_success = false;
252 		wake_ut_thread();
253 	}
254 }
255 
256 static void
257 __blockdev_compare_and_write(void *arg)
258 {
259 	struct bdevio_request *req = arg;
260 	struct io_target *target = req->target;
261 	int rc;
262 
263 	rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
264 			req->fused_iov, req->fused_iovcnt, req->offset, req->data_len, quick_test_complete, NULL);
265 
266 	if (rc) {
267 		g_completion_success = false;
268 		wake_ut_thread();
269 	}
270 }
271 
272 static void
273 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
274 {
275 	int data_len = req->data_len;
276 	char *buf = req->buf;
277 
278 	req->iovcnt = 0;
279 	if (!iov_len) {
280 		return;
281 	}
282 
283 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
284 		if (data_len < iov_len) {
285 			iov_len = data_len;
286 		}
287 
288 		req->iov[req->iovcnt].iov_base = buf;
289 		req->iov[req->iovcnt].iov_len = iov_len;
290 
291 		buf += iov_len;
292 		data_len -= iov_len;
293 	}
294 
295 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
296 }
297 
298 static void
299 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
300 {
301 	int data_len = req->data_len;
302 	char *buf = req->fused_buf;
303 
304 	req->fused_iovcnt = 0;
305 	if (!iov_len) {
306 		return;
307 	}
308 
309 	for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
310 		if (data_len < iov_len) {
311 			iov_len = data_len;
312 		}
313 
314 		req->fused_iov[req->fused_iovcnt].iov_base = buf;
315 		req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
316 
317 		buf += iov_len;
318 		data_len -= iov_len;
319 	}
320 
321 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
322 }
323 
324 static void
325 blockdev_write(struct io_target *target, char *tx_buf,
326 	       uint64_t offset, int data_len, int iov_len)
327 {
328 	struct bdevio_request req;
329 
330 	req.target = target;
331 	req.buf = tx_buf;
332 	req.data_len = data_len;
333 	req.offset = offset;
334 	sgl_chop_buffer(&req, iov_len);
335 
336 	g_completion_success = false;
337 
338 	execute_spdk_function(__blockdev_write, &req);
339 }
340 
341 static void
342 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
343 			    uint64_t offset, int data_len, int iov_len)
344 {
345 	struct bdevio_request req;
346 
347 	req.target = target;
348 	req.buf = cmp_buf;
349 	req.fused_buf = write_buf;
350 	req.data_len = data_len;
351 	req.offset = offset;
352 	sgl_chop_buffer(&req, iov_len);
353 	sgl_chop_fused_buffer(&req, iov_len);
354 
355 	g_completion_success = false;
356 
357 	execute_spdk_function(__blockdev_compare_and_write, &req);
358 }
359 
360 static void
361 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
362 		      uint64_t offset, int data_len)
363 {
364 	struct bdevio_request req;
365 
366 	req.target = target;
367 	req.buf = tx_buf;
368 	req.data_len = data_len;
369 	req.offset = offset;
370 
371 	g_completion_success = false;
372 
373 	execute_spdk_function(__blockdev_write_zeroes, &req);
374 }
375 
376 static void
377 __blockdev_read(void *arg)
378 {
379 	struct bdevio_request *req = arg;
380 	struct io_target *target = req->target;
381 	int rc;
382 
383 	if (req->iovcnt) {
384 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
385 				     req->data_len, quick_test_complete, NULL);
386 	} else {
387 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
388 				    req->data_len, quick_test_complete, NULL);
389 	}
390 
391 	if (rc) {
392 		g_completion_success = false;
393 		wake_ut_thread();
394 	}
395 }
396 
397 static void
398 blockdev_read(struct io_target *target, char *rx_buf,
399 	      uint64_t offset, int data_len, int iov_len)
400 {
401 	struct bdevio_request req;
402 
403 	req.target = target;
404 	req.buf = rx_buf;
405 	req.data_len = data_len;
406 	req.offset = offset;
407 	req.iovcnt = 0;
408 	sgl_chop_buffer(&req, iov_len);
409 
410 	g_completion_success = false;
411 
412 	execute_spdk_function(__blockdev_read, &req);
413 }
414 
415 static int
416 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
417 {
418 	int rc;
419 	rc = memcmp(rx_buf, tx_buf, data_length);
420 
421 	spdk_free(rx_buf);
422 	spdk_free(tx_buf);
423 
424 	return rc;
425 }
426 
427 static bool
428 blockdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t data_length)
429 {
430 	if (data_length < spdk_bdev_get_block_size(bdev) ||
431 	    data_length % spdk_bdev_get_block_size(bdev) ||
432 	    data_length / spdk_bdev_get_block_size(bdev) > spdk_bdev_get_num_blocks(bdev)) {
433 		return false;
434 	}
435 
436 	return true;
437 }
438 
439 static void
440 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
441 		    int expected_rc, bool write_zeroes)
442 {
443 	struct io_target *target;
444 	char	*tx_buf = NULL;
445 	char	*rx_buf = NULL;
446 	int	rc;
447 
448 	target = g_current_io_target;
449 
450 	if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
451 		return;
452 	}
453 
454 	if (!write_zeroes) {
455 		initialize_buffer(&tx_buf, pattern, data_length);
456 		initialize_buffer(&rx_buf, 0, data_length);
457 
458 		blockdev_write(target, tx_buf, offset, data_length, iov_len);
459 	} else {
460 		initialize_buffer(&tx_buf, 0, data_length);
461 		initialize_buffer(&rx_buf, pattern, data_length);
462 
463 		blockdev_write_zeroes(target, tx_buf, offset, data_length);
464 	}
465 
466 
467 	if (expected_rc == 0) {
468 		CU_ASSERT_EQUAL(g_completion_success, true);
469 	} else {
470 		CU_ASSERT_EQUAL(g_completion_success, false);
471 	}
472 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
473 
474 	if (expected_rc == 0) {
475 		CU_ASSERT_EQUAL(g_completion_success, true);
476 	} else {
477 		CU_ASSERT_EQUAL(g_completion_success, false);
478 	}
479 
480 	if (g_completion_success) {
481 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
482 		/* Assert the write by comparing it with values read
483 		 * from each blockdev */
484 		CU_ASSERT_EQUAL(rc, 0);
485 	}
486 }
487 
488 static void
489 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
490 {
491 	struct io_target *target;
492 	char	*tx_buf = NULL;
493 	char	*write_buf = NULL;
494 	char	*rx_buf = NULL;
495 	int	rc;
496 
497 	target = g_current_io_target;
498 
499 	if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
500 		return;
501 	}
502 
503 	initialize_buffer(&tx_buf, 0xAA, data_length);
504 	initialize_buffer(&rx_buf, 0, data_length);
505 	initialize_buffer(&write_buf, 0xBB, data_length);
506 
507 	blockdev_write(target, tx_buf, offset, data_length, iov_len);
508 	CU_ASSERT_EQUAL(g_completion_success, true);
509 
510 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
511 	CU_ASSERT_EQUAL(g_completion_success, true);
512 
513 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
514 	CU_ASSERT_EQUAL(g_completion_success, false);
515 
516 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
517 	CU_ASSERT_EQUAL(g_completion_success, true);
518 	rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
519 	/* Assert the write by comparing it with values read
520 	 * from each blockdev */
521 	CU_ASSERT_EQUAL(rc, 0);
522 }
523 
524 static void
525 blockdev_write_read_4k(void)
526 {
527 	uint32_t data_length;
528 	uint64_t offset;
529 	int pattern;
530 	int expected_rc;
531 
532 	/* Data size = 4K */
533 	data_length = 4096;
534 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
535 	offset = 0;
536 	pattern = 0xA3;
537 	/* Params are valid, hence the expected return value
538 	 * of write and read for all blockdevs is 0. */
539 	expected_rc = 0;
540 
541 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
542 }
543 
544 static void
545 blockdev_write_zeroes_read_4k(void)
546 {
547 	uint32_t data_length;
548 	uint64_t offset;
549 	int pattern;
550 	int expected_rc;
551 
552 	/* Data size = 4K */
553 	data_length = 4096;
554 	offset = 0;
555 	pattern = 0xA3;
556 	/* Params are valid, hence the expected return value
557 	 * of write_zeroes and read for all blockdevs is 0. */
558 	expected_rc = 0;
559 
560 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
561 }
562 
563 /*
564  * This i/o will not have to split at the bdev layer.
565  */
566 static void
567 blockdev_write_zeroes_read_1m(void)
568 {
569 	uint32_t data_length;
570 	uint64_t offset;
571 	int pattern;
572 	int expected_rc;
573 
574 	/* Data size = 1M */
575 	data_length = 1048576;
576 	offset = 0;
577 	pattern = 0xA3;
578 	/* Params are valid, hence the expected return value
579 	 * of write_zeroes and read for all blockdevs is 0. */
580 	expected_rc = 0;
581 
582 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
583 }
584 
585 /*
586  * This i/o will have to split at the bdev layer if
587  * write-zeroes is not supported by the bdev.
588  */
589 static void
590 blockdev_write_zeroes_read_3m(void)
591 {
592 	uint32_t data_length;
593 	uint64_t offset;
594 	int pattern;
595 	int expected_rc;
596 
597 	/* Data size = 3M */
598 	data_length = 3145728;
599 	offset = 0;
600 	pattern = 0xA3;
601 	/* Params are valid, hence the expected return value
602 	 * of write_zeroes and read for all blockdevs is 0. */
603 	expected_rc = 0;
604 
605 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
606 }
607 
608 /*
609  * This i/o will have to split at the bdev layer if
610  * write-zeroes is not supported by the bdev. It also
611  * tests a write size that is not an even multiple of
612  * the bdev layer zero buffer size.
613  */
614 static void
615 blockdev_write_zeroes_read_3m_500k(void)
616 {
617 	uint32_t data_length;
618 	uint64_t offset;
619 	int pattern;
620 	int expected_rc;
621 
622 	/* Data size = 3.5M */
623 	data_length = 3670016;
624 	offset = 0;
625 	pattern = 0xA3;
626 	/* Params are valid, hence the expected return value
627 	 * of write_zeroes and read for all blockdevs is 0. */
628 	expected_rc = 0;
629 
630 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
631 }
632 
633 static void
634 blockdev_writev_readv_4k(void)
635 {
636 	uint32_t data_length, iov_len;
637 	uint64_t offset;
638 	int pattern;
639 	int expected_rc;
640 
641 	/* Data size = 4K */
642 	data_length = 4096;
643 	iov_len = 4096;
644 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
645 	offset = 0;
646 	pattern = 0xA3;
647 	/* Params are valid, hence the expected return value
648 	 * of write and read for all blockdevs is 0. */
649 	expected_rc = 0;
650 
651 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
652 }
653 
654 static void
655 blockdev_comparev_and_writev(void)
656 {
657 	uint32_t data_length, iov_len;
658 	uint64_t offset;
659 
660 	data_length = 1;
661 	iov_len = 1;
662 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
663 	offset = 0;
664 
665 	blockdev_compare_and_write(data_length, iov_len, offset);
666 }
667 
668 static void
669 blockdev_writev_readv_30x4k(void)
670 {
671 	uint32_t data_length, iov_len;
672 	uint64_t offset;
673 	int pattern;
674 	int expected_rc;
675 
676 	/* Data size = 4K */
677 	data_length = 4096 * 30;
678 	iov_len = 4096;
679 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
680 	offset = 0;
681 	pattern = 0xA3;
682 	/* Params are valid, hence the expected return value
683 	 * of write and read for all blockdevs is 0. */
684 	expected_rc = 0;
685 
686 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
687 }
688 
689 static void
690 blockdev_write_read_512Bytes(void)
691 {
692 	uint32_t data_length;
693 	uint64_t offset;
694 	int pattern;
695 	int expected_rc;
696 
697 	/* Data size = 512 */
698 	data_length = 512;
699 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
700 	offset = 8192;
701 	pattern = 0xA3;
702 	/* Params are valid, hence the expected return value
703 	 * of write and read for all blockdevs is 0. */
704 	expected_rc = 0;
705 
706 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
707 }
708 
709 static void
710 blockdev_writev_readv_512Bytes(void)
711 {
712 	uint32_t data_length, iov_len;
713 	uint64_t offset;
714 	int pattern;
715 	int expected_rc;
716 
717 	/* Data size = 512 */
718 	data_length = 512;
719 	iov_len = 512;
720 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
721 	offset = 8192;
722 	pattern = 0xA3;
723 	/* Params are valid, hence the expected return value
724 	 * of write and read for all blockdevs is 0. */
725 	expected_rc = 0;
726 
727 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
728 }
729 
730 static void
731 blockdev_write_read_size_gt_128k(void)
732 {
733 	uint32_t data_length;
734 	uint64_t offset;
735 	int pattern;
736 	int expected_rc;
737 
738 	/* Data size = 132K */
739 	data_length = 135168;
740 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
741 	offset = 8192;
742 	pattern = 0xA3;
743 	/* Params are valid, hence the expected return value
744 	 * of write and read for all blockdevs is 0. */
745 	expected_rc = 0;
746 
747 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
748 }
749 
750 static void
751 blockdev_writev_readv_size_gt_128k(void)
752 {
753 	uint32_t data_length, iov_len;
754 	uint64_t offset;
755 	int pattern;
756 	int expected_rc;
757 
758 	/* Data size = 132K */
759 	data_length = 135168;
760 	iov_len = 135168;
761 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
762 	offset = 8192;
763 	pattern = 0xA3;
764 	/* Params are valid, hence the expected return value
765 	 * of write and read for all blockdevs is 0. */
766 	expected_rc = 0;
767 
768 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
769 }
770 
771 static void
772 blockdev_writev_readv_size_gt_128k_two_iov(void)
773 {
774 	uint32_t data_length, iov_len;
775 	uint64_t offset;
776 	int pattern;
777 	int expected_rc;
778 
779 	/* Data size = 132K */
780 	data_length = 135168;
781 	iov_len = 128 * 1024;
782 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
783 	offset = 8192;
784 	pattern = 0xA3;
785 	/* Params are valid, hence the expected return value
786 	 * of write and read for all blockdevs is 0. */
787 	expected_rc = 0;
788 
789 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
790 }
791 
792 static void
793 blockdev_write_read_invalid_size(void)
794 {
795 	uint32_t data_length;
796 	uint64_t offset;
797 	int pattern;
798 	int expected_rc;
799 
800 	/* Data size is not a multiple of the block size */
801 	data_length = 0x1015;
802 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
803 	offset = 8192;
804 	pattern = 0xA3;
805 	/* Params are invalid, hence the expected return value
806 	 * of write and read for all blockdevs is < 0 */
807 	expected_rc = -1;
808 
809 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
810 }
811 
812 static void
813 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
814 {
815 	struct io_target *target;
816 	struct spdk_bdev *bdev;
817 	char	*tx_buf = NULL;
818 	char	*rx_buf = NULL;
819 	uint64_t offset;
820 	uint32_t block_size;
821 	int rc;
822 
823 	target = g_current_io_target;
824 	bdev = target->bdev;
825 
826 	block_size = spdk_bdev_get_block_size(bdev);
827 
828 	/* The start offset has been set to a marginal value
829 	 * such that offset + nbytes == Total size of
830 	 * blockdev. */
831 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
832 
833 	initialize_buffer(&tx_buf, 0xA3, block_size);
834 	initialize_buffer(&rx_buf, 0, block_size);
835 
836 	blockdev_write(target, tx_buf, offset, block_size, 0);
837 	CU_ASSERT_EQUAL(g_completion_success, true);
838 
839 	blockdev_read(target, rx_buf, offset, block_size, 0);
840 	CU_ASSERT_EQUAL(g_completion_success, true);
841 
842 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
843 	/* Assert the write by comparing it with values read
844 	 * from each blockdev */
845 	CU_ASSERT_EQUAL(rc, 0);
846 }
847 
848 static void
849 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
850 {
851 	struct io_target *target;
852 	struct spdk_bdev *bdev;
853 	char	*tx_buf = NULL;
854 	char	*rx_buf = NULL;
855 	int	data_length;
856 	uint64_t offset;
857 	int pattern;
858 
859 	/* Tests the overflow condition of the blockdevs. */
860 	data_length = 4096;
861 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
862 	pattern = 0xA3;
863 
864 	target = g_current_io_target;
865 	bdev = target->bdev;
866 
867 	/* The start offset has been set to a valid value
868 	 * but offset + nbytes is greater than the Total size
869 	 * of the blockdev. The test should fail. */
870 	offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
871 
872 	initialize_buffer(&tx_buf, pattern, data_length);
873 	initialize_buffer(&rx_buf, 0, data_length);
874 
875 	blockdev_write(target, tx_buf, offset, data_length, 0);
876 	CU_ASSERT_EQUAL(g_completion_success, false);
877 
878 	blockdev_read(target, rx_buf, offset, data_length, 0);
879 	CU_ASSERT_EQUAL(g_completion_success, false);
880 }
881 
882 static void
883 blockdev_write_read_max_offset(void)
884 {
885 	int	data_length;
886 	uint64_t offset;
887 	int pattern;
888 	int expected_rc;
889 
890 	data_length = 4096;
891 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
892 	/* The start offset has been set to UINT64_MAX such that
893 	 * adding nbytes wraps around and points to an invalid address. */
894 	offset = UINT64_MAX;
895 	pattern = 0xA3;
896 	/* Params are invalid, hence the expected return value
897 	 * of write and read for all blockdevs is < 0 */
898 	expected_rc = -1;
899 
900 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
901 }
902 
903 static void
904 blockdev_overlapped_write_read_8k(void)
905 {
906 	int	data_length;
907 	uint64_t offset;
908 	int pattern;
909 	int expected_rc;
910 
911 	/* Data size = 8K */
912 	data_length = 8192;
913 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
914 	offset = 0;
915 	pattern = 0xA3;
916 	/* Params are valid, hence the expected return value
917 	 * of write and read for all blockdevs is 0. */
918 	expected_rc = 0;
919 	/* Assert the write by comparing it with values read
920 	 * from the same offset for each blockdev */
921 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
922 
923 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
924 	 * with the address written above and assert the new value in
925 	 * the overlapped address range */
926 	/* Populate 8k with value 0xBB */
927 	pattern = 0xBB;
928 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
929 	offset = 4096;
930 	/* Assert the write by comparing it with values read
931 	 * from the overlapped offset for each blockdev */
932 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
933 }
934 
935 static void
936 __blockdev_reset(void *arg)
937 {
938 	struct bdevio_request *req = arg;
939 	struct io_target *target = req->target;
940 	int rc;
941 
942 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
943 	if (rc < 0) {
944 		g_completion_success = false;
945 		wake_ut_thread();
946 	}
947 }
948 
949 static void
950 blockdev_test_reset(void)
951 {
952 	struct bdevio_request req;
953 	struct io_target *target;
954 	bool reset_supported;
955 
956 	target = g_current_io_target;
957 	req.target = target;
958 
959 	reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET);
960 	g_completion_success = false;
961 
962 	execute_spdk_function(__blockdev_reset, &req);
963 
964 	CU_ASSERT_EQUAL(g_completion_success, reset_supported);
965 }
966 
967 struct bdevio_passthrough_request {
968 	struct spdk_nvme_cmd cmd;
969 	void *buf;
970 	uint32_t len;
971 	struct io_target *target;
972 	int sct;
973 	int sc;
974 	uint32_t cdw0;
975 };
976 
977 static void
978 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
979 {
980 	struct bdevio_passthrough_request *pt_req = arg;
981 
982 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
983 	spdk_bdev_free_io(bdev_io);
984 	wake_ut_thread();
985 }
986 
987 static void
988 __blockdev_nvme_passthru(void *arg)
989 {
990 	struct bdevio_passthrough_request *pt_req = arg;
991 	struct io_target *target = pt_req->target;
992 	int rc;
993 
994 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
995 					&pt_req->cmd, pt_req->buf, pt_req->len,
996 					nvme_pt_test_complete, pt_req);
997 	if (rc) {
998 		wake_ut_thread();
999 	}
1000 }
1001 
1002 static void
1003 blockdev_test_nvme_passthru_rw(void)
1004 {
1005 	struct bdevio_passthrough_request pt_req;
1006 	void *write_buf, *read_buf;
1007 	struct io_target *target;
1008 
1009 	target = g_current_io_target;
1010 
1011 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1012 		return;
1013 	}
1014 
1015 	memset(&pt_req, 0, sizeof(pt_req));
1016 	pt_req.target = target;
1017 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
1018 	pt_req.cmd.nsid = 1;
1019 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
1020 	pt_req.cmd.cdw12 = 0;
1021 
1022 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
1023 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1024 	memset(write_buf, 0xA5, pt_req.len);
1025 	pt_req.buf = write_buf;
1026 
1027 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1028 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1029 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1030 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1031 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1032 
1033 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
1034 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1035 	pt_req.buf = read_buf;
1036 
1037 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1038 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1039 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1040 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1041 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1042 
1043 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
1044 	spdk_free(read_buf);
1045 	spdk_free(write_buf);
1046 }
1047 
1048 static void
1049 blockdev_test_nvme_passthru_vendor_specific(void)
1050 {
1051 	struct bdevio_passthrough_request pt_req;
1052 	struct io_target *target;
1053 
1054 	target = g_current_io_target;
1055 
1056 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1057 		return;
1058 	}
1059 
1060 	memset(&pt_req, 0, sizeof(pt_req));
1061 	pt_req.target = target;
1062 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
1063 	pt_req.cmd.nsid = 1;
1064 
1065 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1066 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1067 	pt_req.cdw0 = 0xbeef;
1068 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1069 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1070 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
1071 	CU_ASSERT(pt_req.cdw0 == 0x0);
1072 }
1073 
1074 static void
1075 __blockdev_nvme_admin_passthru(void *arg)
1076 {
1077 	struct bdevio_passthrough_request *pt_req = arg;
1078 	struct io_target *target = pt_req->target;
1079 	int rc;
1080 
1081 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
1082 					   &pt_req->cmd, pt_req->buf, pt_req->len,
1083 					   nvme_pt_test_complete, pt_req);
1084 	if (rc) {
1085 		wake_ut_thread();
1086 	}
1087 }
1088 
1089 static void
1090 blockdev_test_nvme_admin_passthru(void)
1091 {
1092 	struct io_target *target;
1093 	struct bdevio_passthrough_request pt_req;
1094 
1095 	target = g_current_io_target;
1096 
1097 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
1098 		return;
1099 	}
1100 
1101 	memset(&pt_req, 0, sizeof(pt_req));
1102 	pt_req.target = target;
1103 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1104 	pt_req.cmd.nsid = 0;
1105 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
1106 
1107 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
1108 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1109 
1110 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
1111 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1112 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
1113 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1114 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1115 }
1116 
1117 static void
1118 __stop_init_thread(void *arg)
1119 {
1120 	unsigned num_failures = g_num_failures;
1121 	struct spdk_jsonrpc_request *request = arg;
1122 
1123 	g_num_failures = 0;
1124 
1125 	bdevio_cleanup_targets();
1126 	if (g_wait_for_tests && !g_shutdown) {
1127 		/* Do not stop the app yet, wait for another RPC */
1128 		rpc_perform_tests_cb(num_failures, request);
1129 		return;
1130 	}
1131 	spdk_app_stop(num_failures);
1132 }
1133 
1134 static void
1135 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1136 {
1137 	g_num_failures = num_failures;
1138 
1139 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1140 }
1141 
1142 static int
1143 suite_init(void)
1144 {
1145 	if (g_current_io_target == NULL) {
1146 		g_current_io_target = g_io_targets;
1147 	}
1148 	return 0;
1149 }
1150 
1151 static int
1152 suite_fini(void)
1153 {
1154 	g_current_io_target = g_current_io_target->next;
1155 	return 0;
1156 }
1157 
1158 #define SUITE_NAME_MAX 64
1159 
1160 static int
1161 __setup_ut_on_single_target(struct io_target *target)
1162 {
1163 	unsigned rc = 0;
1164 	CU_pSuite suite = NULL;
1165 	char name[SUITE_NAME_MAX];
1166 
1167 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1168 	suite = CU_add_suite(name, suite_init, suite_fini);
1169 	if (suite == NULL) {
1170 		CU_cleanup_registry();
1171 		rc = CU_get_error();
1172 		return -rc;
1173 	}
1174 
1175 	if (
1176 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
1177 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
1178 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
1179 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
1180 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
1181 		|| CU_add_test(suite, "blockdev reset",
1182 			       blockdev_test_reset) == NULL
1183 		|| CU_add_test(suite, "blockdev write read 512 bytes",
1184 			       blockdev_write_read_512Bytes) == NULL
1185 		|| CU_add_test(suite, "blockdev write read size > 128k",
1186 			       blockdev_write_read_size_gt_128k) == NULL
1187 		|| CU_add_test(suite, "blockdev write read invalid size",
1188 			       blockdev_write_read_invalid_size) == NULL
1189 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1190 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1191 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1192 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1193 		|| CU_add_test(suite, "blockdev write read max offset",
1194 			       blockdev_write_read_max_offset) == NULL
1195 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
1196 			       blockdev_overlapped_write_read_8k) == NULL
1197 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
1198 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
1199 			       blockdev_writev_readv_30x4k) == NULL
1200 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
1201 			       blockdev_writev_readv_512Bytes) == NULL
1202 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1203 			       blockdev_writev_readv_size_gt_128k) == NULL
1204 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1205 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1206 		|| CU_add_test(suite, "blockdev comparev and writev", blockdev_comparev_and_writev) == NULL
1207 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1208 			       blockdev_test_nvme_passthru_rw) == NULL
1209 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1210 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1211 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1212 			       blockdev_test_nvme_admin_passthru) == NULL
1213 	) {
1214 		CU_cleanup_registry();
1215 		rc = CU_get_error();
1216 		return -rc;
1217 	}
1218 	return 0;
1219 }
1220 
1221 static void
1222 __run_ut_thread(void *arg)
1223 {
1224 	struct spdk_jsonrpc_request *request = arg;
1225 	int rc = 0;
1226 	struct io_target *target;
1227 	unsigned num_failures;
1228 
1229 	if (CU_initialize_registry() != CUE_SUCCESS) {
1230 		/* CUnit error, probably won't recover */
1231 		rc = CU_get_error();
1232 		stop_init_thread(-rc, request);
1233 	}
1234 
1235 	target = g_io_targets;
1236 	while (target != NULL) {
1237 		rc = __setup_ut_on_single_target(target);
1238 		if (rc < 0) {
1239 			/* CUnit error, probably won't recover */
1240 			stop_init_thread(-rc, request);
1241 		}
1242 		target = target->next;
1243 	}
1244 	CU_basic_set_mode(CU_BRM_VERBOSE);
1245 	CU_basic_run_tests();
1246 	num_failures = CU_get_number_of_failures();
1247 	CU_cleanup_registry();
1248 
1249 	stop_init_thread(num_failures, request);
1250 }
1251 
1252 static void
1253 __construct_targets(void *arg)
1254 {
1255 	if (bdevio_construct_targets() < 0) {
1256 		spdk_app_stop(-1);
1257 		return;
1258 	}
1259 
1260 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1261 }
1262 
1263 static void
1264 test_main(void *arg1)
1265 {
1266 	struct spdk_cpuset tmpmask = {};
1267 	uint32_t i;
1268 
1269 	pthread_mutex_init(&g_test_mutex, NULL);
1270 	pthread_cond_init(&g_test_cond, NULL);
1271 
1272 	/* This test runs specifically on at least three cores.
1273 	 * g_thread_init is the app_thread on main core from event framework.
1274 	 * Next two are only for the tests and should always be on separate CPU cores. */
1275 	if (spdk_env_get_core_count() < 3) {
1276 		spdk_app_stop(-1);
1277 		return;
1278 	}
1279 
1280 	SPDK_ENV_FOREACH_CORE(i) {
1281 		if (i == spdk_env_get_current_core()) {
1282 			g_thread_init = spdk_get_thread();
1283 			continue;
1284 		}
1285 		spdk_cpuset_zero(&tmpmask);
1286 		spdk_cpuset_set_cpu(&tmpmask, i, true);
1287 		if (g_thread_ut == NULL) {
1288 			g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
1289 		} else if (g_thread_io == NULL) {
1290 			g_thread_io = spdk_thread_create("io_thread", &tmpmask);
1291 		}
1292 
1293 	}
1294 
1295 	if (g_wait_for_tests) {
1296 		/* Do not perform any tests until RPC is received */
1297 		return;
1298 	}
1299 
1300 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1301 }
1302 
1303 static void
1304 bdevio_usage(void)
1305 {
1306 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1307 }
1308 
1309 static int
1310 bdevio_parse_arg(int ch, char *arg)
1311 {
1312 	switch (ch) {
1313 	case 'w':
1314 		g_wait_for_tests =  true;
1315 		break;
1316 	default:
1317 		return -EINVAL;
1318 	}
1319 	return 0;
1320 }
1321 
1322 struct rpc_perform_tests {
1323 	char *name;
1324 };
1325 
1326 static void
1327 free_rpc_perform_tests(struct rpc_perform_tests *r)
1328 {
1329 	free(r->name);
1330 }
1331 
1332 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1333 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1334 };
1335 
1336 static void
1337 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1338 {
1339 	struct spdk_json_write_ctx *w;
1340 
1341 	if (num_failures == 0) {
1342 		w = spdk_jsonrpc_begin_result(request);
1343 		spdk_json_write_uint32(w, num_failures);
1344 		spdk_jsonrpc_end_result(request, w);
1345 	} else {
1346 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1347 						     "%d test cases failed", num_failures);
1348 	}
1349 }
1350 
1351 static void
1352 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1353 {
1354 	struct rpc_perform_tests req = {NULL};
1355 	struct spdk_bdev *bdev;
1356 	int rc;
1357 
1358 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1359 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1360 					      &req)) {
1361 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1362 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1363 		goto invalid;
1364 	}
1365 
1366 	if (req.name) {
1367 		bdev = spdk_bdev_get_by_name(req.name);
1368 		if (bdev == NULL) {
1369 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1370 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1371 							     "Bdev '%s' does not exist: %s",
1372 							     req.name, spdk_strerror(ENODEV));
1373 			goto invalid;
1374 		}
1375 		rc = bdevio_construct_target(bdev);
1376 		if (rc < 0) {
1377 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1378 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1379 							     "Could not construct target for bdev '%s': %s",
1380 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1381 			goto invalid;
1382 		}
1383 	} else {
1384 		rc = bdevio_construct_targets();
1385 		if (rc < 0) {
1386 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1387 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1388 							     "Could not construct targets for all bdevs: %s",
1389 							     spdk_strerror(-rc));
1390 			goto invalid;
1391 		}
1392 	}
1393 	free_rpc_perform_tests(&req);
1394 
1395 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1396 
1397 	return;
1398 
1399 invalid:
1400 	free_rpc_perform_tests(&req);
1401 }
1402 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1403 
1404 static void
1405 spdk_bdevio_shutdown_cb(void)
1406 {
1407 	g_shutdown = true;
1408 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL);
1409 }
1410 
1411 int
1412 main(int argc, char **argv)
1413 {
1414 	int			rc;
1415 	struct spdk_app_opts	opts = {};
1416 
1417 	spdk_app_opts_init(&opts, sizeof(opts));
1418 	opts.name = "bdevio";
1419 	opts.reactor_mask = "0x7";
1420 	opts.shutdown_cb = spdk_bdevio_shutdown_cb;
1421 
1422 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1423 				      bdevio_parse_arg, bdevio_usage)) !=
1424 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1425 		return rc;
1426 	}
1427 
1428 	rc = spdk_app_start(&opts, test_main, NULL);
1429 	spdk_app_fini();
1430 
1431 	return rc;
1432 }
1433