xref: /spdk/test/bdev/bdevio/bdevio.c (revision 06b537bfdb4393dea857e204b85d8df46a351d8a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/accel_engine.h"
38 #include "spdk/env.h"
39 #include "spdk/log.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
42 #include "spdk/rpc.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "CUnit/Basic.h"
47 
48 #define BUFFER_IOVS		1024
49 #define BUFFER_SIZE		260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE	2048
51 
52 pthread_mutex_t g_test_mutex;
53 pthread_cond_t g_test_cond;
54 
55 static struct spdk_thread *g_thread_init;
56 static struct spdk_thread *g_thread_ut;
57 static struct spdk_thread *g_thread_io;
58 static bool g_wait_for_tests = false;
59 static int g_num_failures = 0;
60 
61 struct io_target {
62 	struct spdk_bdev	*bdev;
63 	struct spdk_bdev_desc	*bdev_desc;
64 	struct spdk_io_channel	*ch;
65 	struct io_target	*next;
66 };
67 
68 struct bdevio_request {
69 	char *buf;
70 	char *fused_buf;
71 	int data_len;
72 	uint64_t offset;
73 	struct iovec iov[BUFFER_IOVS];
74 	int iovcnt;
75 	struct iovec fused_iov[BUFFER_IOVS];
76 	int fused_iovcnt;
77 	struct io_target *target;
78 };
79 
80 struct io_target *g_io_targets = NULL;
81 struct io_target *g_current_io_target = NULL;
82 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
83 
84 static void
85 execute_spdk_function(spdk_msg_fn fn, void *arg)
86 {
87 	pthread_mutex_lock(&g_test_mutex);
88 	spdk_thread_send_msg(g_thread_io, fn, arg);
89 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
90 	pthread_mutex_unlock(&g_test_mutex);
91 }
92 
93 static void
94 wake_ut_thread(void)
95 {
96 	pthread_mutex_lock(&g_test_mutex);
97 	pthread_cond_signal(&g_test_cond);
98 	pthread_mutex_unlock(&g_test_mutex);
99 }
100 
101 static void
102 __get_io_channel(void *arg)
103 {
104 	struct io_target *target = arg;
105 
106 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
107 	assert(target->ch);
108 	wake_ut_thread();
109 }
110 
111 static int
112 bdevio_construct_target(struct spdk_bdev *bdev)
113 {
114 	struct io_target *target;
115 	int rc;
116 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
117 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
118 
119 	target = malloc(sizeof(struct io_target));
120 	if (target == NULL) {
121 		return -ENOMEM;
122 	}
123 
124 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
125 	if (rc != 0) {
126 		free(target);
127 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
128 		return rc;
129 	}
130 
131 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
132 	       spdk_bdev_get_name(bdev),
133 	       num_blocks, block_size,
134 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
135 
136 	target->bdev = bdev;
137 	target->next = g_io_targets;
138 	execute_spdk_function(__get_io_channel, target);
139 	g_io_targets = target;
140 
141 	return 0;
142 }
143 
144 static int
145 bdevio_construct_targets(void)
146 {
147 	struct spdk_bdev *bdev;
148 	int rc;
149 
150 	printf("I/O targets:\n");
151 
152 	bdev = spdk_bdev_first_leaf();
153 	while (bdev != NULL) {
154 		rc = bdevio_construct_target(bdev);
155 		if (rc < 0) {
156 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
157 			return rc;
158 		}
159 		bdev = spdk_bdev_next_leaf(bdev);
160 	}
161 
162 	if (g_io_targets == NULL) {
163 		SPDK_ERRLOG("No bdevs to perform tests on\n");
164 		return -1;
165 	}
166 
167 	return 0;
168 }
169 
170 static void
171 __put_io_channel(void *arg)
172 {
173 	struct io_target *target = arg;
174 
175 	spdk_put_io_channel(target->ch);
176 	wake_ut_thread();
177 }
178 
179 static void
180 bdevio_cleanup_targets(void)
181 {
182 	struct io_target *target;
183 
184 	target = g_io_targets;
185 	while (target != NULL) {
186 		execute_spdk_function(__put_io_channel, target);
187 		spdk_bdev_close(target->bdev_desc);
188 		g_io_targets = target->next;
189 		free(target);
190 		target = g_io_targets;
191 	}
192 }
193 
194 static bool g_completion_success;
195 
196 static void
197 initialize_buffer(char **buf, int pattern, int size)
198 {
199 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
200 	memset(*buf, pattern, size);
201 }
202 
203 static void
204 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
205 {
206 	g_completion_success = success;
207 	spdk_bdev_free_io(bdev_io);
208 	wake_ut_thread();
209 }
210 
211 static void
212 __blockdev_write(void *arg)
213 {
214 	struct bdevio_request *req = arg;
215 	struct io_target *target = req->target;
216 	int rc;
217 
218 	if (req->iovcnt) {
219 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
220 				      req->data_len, quick_test_complete, NULL);
221 	} else {
222 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
223 				     req->data_len, quick_test_complete, NULL);
224 	}
225 
226 	if (rc) {
227 		g_completion_success = false;
228 		wake_ut_thread();
229 	}
230 }
231 
232 static void
233 __blockdev_write_zeroes(void *arg)
234 {
235 	struct bdevio_request *req = arg;
236 	struct io_target *target = req->target;
237 	int rc;
238 
239 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
240 				    req->data_len, quick_test_complete, NULL);
241 	if (rc) {
242 		g_completion_success = false;
243 		wake_ut_thread();
244 	}
245 }
246 
247 static void
248 __blockdev_compare_and_write(void *arg)
249 {
250 	struct bdevio_request *req = arg;
251 	struct io_target *target = req->target;
252 	int rc;
253 
254 	rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
255 			req->fused_iov, req->fused_iovcnt, req->offset, req->data_len, quick_test_complete, NULL);
256 
257 	if (rc) {
258 		g_completion_success = false;
259 		wake_ut_thread();
260 	}
261 }
262 
263 static void
264 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
265 {
266 	int data_len = req->data_len;
267 	char *buf = req->buf;
268 
269 	req->iovcnt = 0;
270 	if (!iov_len) {
271 		return;
272 	}
273 
274 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
275 		if (data_len < iov_len) {
276 			iov_len = data_len;
277 		}
278 
279 		req->iov[req->iovcnt].iov_base = buf;
280 		req->iov[req->iovcnt].iov_len = iov_len;
281 
282 		buf += iov_len;
283 		data_len -= iov_len;
284 	}
285 
286 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
287 }
288 
289 static void
290 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
291 {
292 	int data_len = req->data_len;
293 	char *buf = req->fused_buf;
294 
295 	req->fused_iovcnt = 0;
296 	if (!iov_len) {
297 		return;
298 	}
299 
300 	for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
301 		if (data_len < iov_len) {
302 			iov_len = data_len;
303 		}
304 
305 		req->fused_iov[req->fused_iovcnt].iov_base = buf;
306 		req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
307 
308 		buf += iov_len;
309 		data_len -= iov_len;
310 	}
311 
312 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
313 }
314 
315 static void
316 blockdev_write(struct io_target *target, char *tx_buf,
317 	       uint64_t offset, int data_len, int iov_len)
318 {
319 	struct bdevio_request req;
320 
321 	req.target = target;
322 	req.buf = tx_buf;
323 	req.data_len = data_len;
324 	req.offset = offset;
325 	sgl_chop_buffer(&req, iov_len);
326 
327 	g_completion_success = false;
328 
329 	execute_spdk_function(__blockdev_write, &req);
330 }
331 
332 static void
333 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
334 			    uint64_t offset, int data_len, int iov_len)
335 {
336 	struct bdevio_request req;
337 
338 	req.target = target;
339 	req.buf = cmp_buf;
340 	req.fused_buf = write_buf;
341 	req.data_len = data_len;
342 	req.offset = offset;
343 	sgl_chop_buffer(&req, iov_len);
344 	sgl_chop_fused_buffer(&req, iov_len);
345 
346 	g_completion_success = false;
347 
348 	execute_spdk_function(__blockdev_compare_and_write, &req);
349 }
350 
351 static void
352 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
353 		      uint64_t offset, int data_len)
354 {
355 	struct bdevio_request req;
356 
357 	req.target = target;
358 	req.buf = tx_buf;
359 	req.data_len = data_len;
360 	req.offset = offset;
361 
362 	g_completion_success = false;
363 
364 	execute_spdk_function(__blockdev_write_zeroes, &req);
365 }
366 
367 static void
368 __blockdev_read(void *arg)
369 {
370 	struct bdevio_request *req = arg;
371 	struct io_target *target = req->target;
372 	int rc;
373 
374 	if (req->iovcnt) {
375 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
376 				     req->data_len, quick_test_complete, NULL);
377 	} else {
378 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
379 				    req->data_len, quick_test_complete, NULL);
380 	}
381 
382 	if (rc) {
383 		g_completion_success = false;
384 		wake_ut_thread();
385 	}
386 }
387 
388 static void
389 blockdev_read(struct io_target *target, char *rx_buf,
390 	      uint64_t offset, int data_len, int iov_len)
391 {
392 	struct bdevio_request req;
393 
394 	req.target = target;
395 	req.buf = rx_buf;
396 	req.data_len = data_len;
397 	req.offset = offset;
398 	req.iovcnt = 0;
399 	sgl_chop_buffer(&req, iov_len);
400 
401 	g_completion_success = false;
402 
403 	execute_spdk_function(__blockdev_read, &req);
404 }
405 
406 static int
407 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
408 {
409 	int rc;
410 	rc = memcmp(rx_buf, tx_buf, data_length);
411 
412 	spdk_free(rx_buf);
413 	spdk_free(tx_buf);
414 
415 	return rc;
416 }
417 
418 static bool
419 blockdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t data_length)
420 {
421 	if (data_length < spdk_bdev_get_block_size(bdev) ||
422 	    data_length % spdk_bdev_get_block_size(bdev) ||
423 	    data_length / spdk_bdev_get_block_size(bdev) > spdk_bdev_get_num_blocks(bdev)) {
424 		return false;
425 	}
426 
427 	return true;
428 }
429 
430 static void
431 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
432 		    int expected_rc, bool write_zeroes)
433 {
434 	struct io_target *target;
435 	char	*tx_buf = NULL;
436 	char	*rx_buf = NULL;
437 	int	rc;
438 
439 	target = g_current_io_target;
440 
441 	if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
442 		return;
443 	}
444 
445 	if (!write_zeroes) {
446 		initialize_buffer(&tx_buf, pattern, data_length);
447 		initialize_buffer(&rx_buf, 0, data_length);
448 
449 		blockdev_write(target, tx_buf, offset, data_length, iov_len);
450 	} else {
451 		initialize_buffer(&tx_buf, 0, data_length);
452 		initialize_buffer(&rx_buf, pattern, data_length);
453 
454 		blockdev_write_zeroes(target, tx_buf, offset, data_length);
455 	}
456 
457 
458 	if (expected_rc == 0) {
459 		CU_ASSERT_EQUAL(g_completion_success, true);
460 	} else {
461 		CU_ASSERT_EQUAL(g_completion_success, false);
462 	}
463 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
464 
465 	if (expected_rc == 0) {
466 		CU_ASSERT_EQUAL(g_completion_success, true);
467 	} else {
468 		CU_ASSERT_EQUAL(g_completion_success, false);
469 	}
470 
471 	if (g_completion_success) {
472 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
473 		/* Assert the write by comparing it with values read
474 		 * from each blockdev */
475 		CU_ASSERT_EQUAL(rc, 0);
476 	}
477 }
478 
479 static void
480 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
481 {
482 	struct io_target *target;
483 	char	*tx_buf = NULL;
484 	char	*write_buf = NULL;
485 	char	*rx_buf = NULL;
486 	int	rc;
487 
488 	target = g_current_io_target;
489 
490 	if (!blockdev_io_valid_blocks(target->bdev, data_length)) {
491 		return;
492 	}
493 
494 	initialize_buffer(&tx_buf, 0xAA, data_length);
495 	initialize_buffer(&rx_buf, 0, data_length);
496 	initialize_buffer(&write_buf, 0xBB, data_length);
497 
498 	blockdev_write(target, tx_buf, offset, data_length, iov_len);
499 	CU_ASSERT_EQUAL(g_completion_success, true);
500 
501 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
502 	CU_ASSERT_EQUAL(g_completion_success, true);
503 
504 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
505 	CU_ASSERT_EQUAL(g_completion_success, false);
506 
507 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
508 	CU_ASSERT_EQUAL(g_completion_success, true);
509 	rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
510 	/* Assert the write by comparing it with values read
511 	 * from each blockdev */
512 	CU_ASSERT_EQUAL(rc, 0);
513 }
514 
515 static void
516 blockdev_write_read_4k(void)
517 {
518 	uint32_t data_length;
519 	uint64_t offset;
520 	int pattern;
521 	int expected_rc;
522 
523 	/* Data size = 4K */
524 	data_length = 4096;
525 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
526 	offset = 0;
527 	pattern = 0xA3;
528 	/* Params are valid, hence the expected return value
529 	 * of write and read for all blockdevs is 0. */
530 	expected_rc = 0;
531 
532 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
533 }
534 
535 static void
536 blockdev_write_zeroes_read_4k(void)
537 {
538 	uint32_t data_length;
539 	uint64_t offset;
540 	int pattern;
541 	int expected_rc;
542 
543 	/* Data size = 4K */
544 	data_length = 4096;
545 	offset = 0;
546 	pattern = 0xA3;
547 	/* Params are valid, hence the expected return value
548 	 * of write_zeroes and read for all blockdevs is 0. */
549 	expected_rc = 0;
550 
551 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
552 }
553 
554 /*
555  * This i/o will not have to split at the bdev layer.
556  */
557 static void
558 blockdev_write_zeroes_read_1m(void)
559 {
560 	uint32_t data_length;
561 	uint64_t offset;
562 	int pattern;
563 	int expected_rc;
564 
565 	/* Data size = 1M */
566 	data_length = 1048576;
567 	offset = 0;
568 	pattern = 0xA3;
569 	/* Params are valid, hence the expected return value
570 	 * of write_zeroes and read for all blockdevs is 0. */
571 	expected_rc = 0;
572 
573 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
574 }
575 
576 /*
577  * This i/o will have to split at the bdev layer if
578  * write-zeroes is not supported by the bdev.
579  */
580 static void
581 blockdev_write_zeroes_read_3m(void)
582 {
583 	uint32_t data_length;
584 	uint64_t offset;
585 	int pattern;
586 	int expected_rc;
587 
588 	/* Data size = 3M */
589 	data_length = 3145728;
590 	offset = 0;
591 	pattern = 0xA3;
592 	/* Params are valid, hence the expected return value
593 	 * of write_zeroes and read for all blockdevs is 0. */
594 	expected_rc = 0;
595 
596 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
597 }
598 
599 /*
600  * This i/o will have to split at the bdev layer if
601  * write-zeroes is not supported by the bdev. It also
602  * tests a write size that is not an even multiple of
603  * the bdev layer zero buffer size.
604  */
605 static void
606 blockdev_write_zeroes_read_3m_500k(void)
607 {
608 	uint32_t data_length;
609 	uint64_t offset;
610 	int pattern;
611 	int expected_rc;
612 
613 	/* Data size = 3.5M */
614 	data_length = 3670016;
615 	offset = 0;
616 	pattern = 0xA3;
617 	/* Params are valid, hence the expected return value
618 	 * of write_zeroes and read for all blockdevs is 0. */
619 	expected_rc = 0;
620 
621 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
622 }
623 
624 static void
625 blockdev_writev_readv_4k(void)
626 {
627 	uint32_t data_length, iov_len;
628 	uint64_t offset;
629 	int pattern;
630 	int expected_rc;
631 
632 	/* Data size = 4K */
633 	data_length = 4096;
634 	iov_len = 4096;
635 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
636 	offset = 0;
637 	pattern = 0xA3;
638 	/* Params are valid, hence the expected return value
639 	 * of write and read for all blockdevs is 0. */
640 	expected_rc = 0;
641 
642 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
643 }
644 
645 static void
646 blockdev_comparev_and_writev(void)
647 {
648 	uint32_t data_length, iov_len;
649 	uint64_t offset;
650 
651 	data_length = 1;
652 	iov_len = 1;
653 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
654 	offset = 0;
655 
656 	blockdev_compare_and_write(data_length, iov_len, offset);
657 }
658 
659 static void
660 blockdev_writev_readv_30x4k(void)
661 {
662 	uint32_t data_length, iov_len;
663 	uint64_t offset;
664 	int pattern;
665 	int expected_rc;
666 
667 	/* Data size = 4K */
668 	data_length = 4096 * 30;
669 	iov_len = 4096;
670 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
671 	offset = 0;
672 	pattern = 0xA3;
673 	/* Params are valid, hence the expected return value
674 	 * of write and read for all blockdevs is 0. */
675 	expected_rc = 0;
676 
677 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
678 }
679 
680 static void
681 blockdev_write_read_512Bytes(void)
682 {
683 	uint32_t data_length;
684 	uint64_t offset;
685 	int pattern;
686 	int expected_rc;
687 
688 	/* Data size = 512 */
689 	data_length = 512;
690 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
691 	offset = 8192;
692 	pattern = 0xA3;
693 	/* Params are valid, hence the expected return value
694 	 * of write and read for all blockdevs is 0. */
695 	expected_rc = 0;
696 
697 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
698 }
699 
700 static void
701 blockdev_writev_readv_512Bytes(void)
702 {
703 	uint32_t data_length, iov_len;
704 	uint64_t offset;
705 	int pattern;
706 	int expected_rc;
707 
708 	/* Data size = 512 */
709 	data_length = 512;
710 	iov_len = 512;
711 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
712 	offset = 8192;
713 	pattern = 0xA3;
714 	/* Params are valid, hence the expected return value
715 	 * of write and read for all blockdevs is 0. */
716 	expected_rc = 0;
717 
718 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
719 }
720 
721 static void
722 blockdev_write_read_size_gt_128k(void)
723 {
724 	uint32_t data_length;
725 	uint64_t offset;
726 	int pattern;
727 	int expected_rc;
728 
729 	/* Data size = 132K */
730 	data_length = 135168;
731 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
732 	offset = 8192;
733 	pattern = 0xA3;
734 	/* Params are valid, hence the expected return value
735 	 * of write and read for all blockdevs is 0. */
736 	expected_rc = 0;
737 
738 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
739 }
740 
741 static void
742 blockdev_writev_readv_size_gt_128k(void)
743 {
744 	uint32_t data_length, iov_len;
745 	uint64_t offset;
746 	int pattern;
747 	int expected_rc;
748 
749 	/* Data size = 132K */
750 	data_length = 135168;
751 	iov_len = 135168;
752 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
753 	offset = 8192;
754 	pattern = 0xA3;
755 	/* Params are valid, hence the expected return value
756 	 * of write and read for all blockdevs is 0. */
757 	expected_rc = 0;
758 
759 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
760 }
761 
762 static void
763 blockdev_writev_readv_size_gt_128k_two_iov(void)
764 {
765 	uint32_t data_length, iov_len;
766 	uint64_t offset;
767 	int pattern;
768 	int expected_rc;
769 
770 	/* Data size = 132K */
771 	data_length = 135168;
772 	iov_len = 128 * 1024;
773 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
774 	offset = 8192;
775 	pattern = 0xA3;
776 	/* Params are valid, hence the expected return value
777 	 * of write and read for all blockdevs is 0. */
778 	expected_rc = 0;
779 
780 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
781 }
782 
783 static void
784 blockdev_write_read_invalid_size(void)
785 {
786 	uint32_t data_length;
787 	uint64_t offset;
788 	int pattern;
789 	int expected_rc;
790 
791 	/* Data size is not a multiple of the block size */
792 	data_length = 0x1015;
793 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
794 	offset = 8192;
795 	pattern = 0xA3;
796 	/* Params are invalid, hence the expected return value
797 	 * of write and read for all blockdevs is < 0 */
798 	expected_rc = -1;
799 
800 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
801 }
802 
803 static void
804 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
805 {
806 	struct io_target *target;
807 	struct spdk_bdev *bdev;
808 	char	*tx_buf = NULL;
809 	char	*rx_buf = NULL;
810 	uint64_t offset;
811 	uint32_t block_size;
812 	int rc;
813 
814 	target = g_current_io_target;
815 	bdev = target->bdev;
816 
817 	block_size = spdk_bdev_get_block_size(bdev);
818 
819 	/* The start offset has been set to a marginal value
820 	 * such that offset + nbytes == Total size of
821 	 * blockdev. */
822 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
823 
824 	initialize_buffer(&tx_buf, 0xA3, block_size);
825 	initialize_buffer(&rx_buf, 0, block_size);
826 
827 	blockdev_write(target, tx_buf, offset, block_size, 0);
828 	CU_ASSERT_EQUAL(g_completion_success, true);
829 
830 	blockdev_read(target, rx_buf, offset, block_size, 0);
831 	CU_ASSERT_EQUAL(g_completion_success, true);
832 
833 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
834 	/* Assert the write by comparing it with values read
835 	 * from each blockdev */
836 	CU_ASSERT_EQUAL(rc, 0);
837 }
838 
839 static void
840 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
841 {
842 	struct io_target *target;
843 	struct spdk_bdev *bdev;
844 	char	*tx_buf = NULL;
845 	char	*rx_buf = NULL;
846 	int	data_length;
847 	uint64_t offset;
848 	int pattern;
849 
850 	/* Tests the overflow condition of the blockdevs. */
851 	data_length = 4096;
852 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
853 	pattern = 0xA3;
854 
855 	target = g_current_io_target;
856 	bdev = target->bdev;
857 
858 	/* The start offset has been set to a valid value
859 	 * but offset + nbytes is greater than the Total size
860 	 * of the blockdev. The test should fail. */
861 	offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
862 
863 	initialize_buffer(&tx_buf, pattern, data_length);
864 	initialize_buffer(&rx_buf, 0, data_length);
865 
866 	blockdev_write(target, tx_buf, offset, data_length, 0);
867 	CU_ASSERT_EQUAL(g_completion_success, false);
868 
869 	blockdev_read(target, rx_buf, offset, data_length, 0);
870 	CU_ASSERT_EQUAL(g_completion_success, false);
871 }
872 
873 static void
874 blockdev_write_read_max_offset(void)
875 {
876 	int	data_length;
877 	uint64_t offset;
878 	int pattern;
879 	int expected_rc;
880 
881 	data_length = 4096;
882 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
883 	/* The start offset has been set to UINT64_MAX such that
884 	 * adding nbytes wraps around and points to an invalid address. */
885 	offset = UINT64_MAX;
886 	pattern = 0xA3;
887 	/* Params are invalid, hence the expected return value
888 	 * of write and read for all blockdevs is < 0 */
889 	expected_rc = -1;
890 
891 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
892 }
893 
894 static void
895 blockdev_overlapped_write_read_8k(void)
896 {
897 	int	data_length;
898 	uint64_t offset;
899 	int pattern;
900 	int expected_rc;
901 
902 	/* Data size = 8K */
903 	data_length = 8192;
904 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
905 	offset = 0;
906 	pattern = 0xA3;
907 	/* Params are valid, hence the expected return value
908 	 * of write and read for all blockdevs is 0. */
909 	expected_rc = 0;
910 	/* Assert the write by comparing it with values read
911 	 * from the same offset for each blockdev */
912 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
913 
914 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
915 	 * with the address written above and assert the new value in
916 	 * the overlapped address range */
917 	/* Populate 8k with value 0xBB */
918 	pattern = 0xBB;
919 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
920 	offset = 4096;
921 	/* Assert the write by comparing it with values read
922 	 * from the overlapped offset for each blockdev */
923 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
924 }
925 
926 static void
927 __blockdev_reset(void *arg)
928 {
929 	struct bdevio_request *req = arg;
930 	struct io_target *target = req->target;
931 	int rc;
932 
933 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
934 	if (rc < 0) {
935 		g_completion_success = false;
936 		wake_ut_thread();
937 	}
938 }
939 
940 static void
941 blockdev_test_reset(void)
942 {
943 	struct bdevio_request req;
944 	struct io_target *target;
945 
946 	target = g_current_io_target;
947 	req.target = target;
948 
949 	g_completion_success = false;
950 
951 	execute_spdk_function(__blockdev_reset, &req);
952 
953 	/* Workaround: NVMe-oF target doesn't support reset yet - so for now
954 	 *  don't fail the test if it's an NVMe bdev.
955 	 */
956 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
957 		CU_ASSERT_EQUAL(g_completion_success, true);
958 	}
959 }
960 
961 struct bdevio_passthrough_request {
962 	struct spdk_nvme_cmd cmd;
963 	void *buf;
964 	uint32_t len;
965 	struct io_target *target;
966 	int sct;
967 	int sc;
968 	uint32_t cdw0;
969 };
970 
971 static void
972 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
973 {
974 	struct bdevio_passthrough_request *pt_req = arg;
975 
976 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
977 	spdk_bdev_free_io(bdev_io);
978 	wake_ut_thread();
979 }
980 
981 static void
982 __blockdev_nvme_passthru(void *arg)
983 {
984 	struct bdevio_passthrough_request *pt_req = arg;
985 	struct io_target *target = pt_req->target;
986 	int rc;
987 
988 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
989 					&pt_req->cmd, pt_req->buf, pt_req->len,
990 					nvme_pt_test_complete, pt_req);
991 	if (rc) {
992 		wake_ut_thread();
993 	}
994 }
995 
996 static void
997 blockdev_test_nvme_passthru_rw(void)
998 {
999 	struct bdevio_passthrough_request pt_req;
1000 	void *write_buf, *read_buf;
1001 	struct io_target *target;
1002 
1003 	target = g_current_io_target;
1004 
1005 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1006 		return;
1007 	}
1008 
1009 	memset(&pt_req, 0, sizeof(pt_req));
1010 	pt_req.target = target;
1011 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
1012 	pt_req.cmd.nsid = 1;
1013 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
1014 	pt_req.cmd.cdw12 = 0;
1015 
1016 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
1017 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1018 	memset(write_buf, 0xA5, pt_req.len);
1019 	pt_req.buf = write_buf;
1020 
1021 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1022 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1023 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1024 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1025 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1026 
1027 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
1028 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1029 	pt_req.buf = read_buf;
1030 
1031 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1032 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1033 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1034 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1035 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1036 
1037 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
1038 	spdk_free(read_buf);
1039 	spdk_free(write_buf);
1040 }
1041 
1042 static void
1043 blockdev_test_nvme_passthru_vendor_specific(void)
1044 {
1045 	struct bdevio_passthrough_request pt_req;
1046 	struct io_target *target;
1047 
1048 	target = g_current_io_target;
1049 
1050 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1051 		return;
1052 	}
1053 
1054 	memset(&pt_req, 0, sizeof(pt_req));
1055 	pt_req.target = target;
1056 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
1057 	pt_req.cmd.nsid = 1;
1058 
1059 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1060 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1061 	pt_req.cdw0 = 0xbeef;
1062 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1063 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1064 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
1065 	CU_ASSERT(pt_req.cdw0 == 0x0);
1066 }
1067 
1068 static void
1069 __blockdev_nvme_admin_passthru(void *arg)
1070 {
1071 	struct bdevio_passthrough_request *pt_req = arg;
1072 	struct io_target *target = pt_req->target;
1073 	int rc;
1074 
1075 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
1076 					   &pt_req->cmd, pt_req->buf, pt_req->len,
1077 					   nvme_pt_test_complete, pt_req);
1078 	if (rc) {
1079 		wake_ut_thread();
1080 	}
1081 }
1082 
1083 static void
1084 blockdev_test_nvme_admin_passthru(void)
1085 {
1086 	struct io_target *target;
1087 	struct bdevio_passthrough_request pt_req;
1088 
1089 	target = g_current_io_target;
1090 
1091 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
1092 		return;
1093 	}
1094 
1095 	memset(&pt_req, 0, sizeof(pt_req));
1096 	pt_req.target = target;
1097 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1098 	pt_req.cmd.nsid = 0;
1099 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
1100 
1101 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
1102 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1103 
1104 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
1105 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1106 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
1107 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1108 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1109 }
1110 
1111 static void
1112 __stop_init_thread(void *arg)
1113 {
1114 	unsigned num_failures = g_num_failures;
1115 	struct spdk_jsonrpc_request *request = arg;
1116 
1117 	g_num_failures = 0;
1118 
1119 	bdevio_cleanup_targets();
1120 	if (g_wait_for_tests) {
1121 		/* Do not stop the app yet, wait for another RPC */
1122 		rpc_perform_tests_cb(num_failures, request);
1123 		return;
1124 	}
1125 	spdk_app_stop(num_failures);
1126 }
1127 
1128 static void
1129 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1130 {
1131 	g_num_failures = num_failures;
1132 
1133 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1134 }
1135 
1136 static int
1137 suite_init(void)
1138 {
1139 	if (g_current_io_target == NULL) {
1140 		g_current_io_target = g_io_targets;
1141 	}
1142 	return 0;
1143 }
1144 
1145 static int
1146 suite_fini(void)
1147 {
1148 	g_current_io_target = g_current_io_target->next;
1149 	return 0;
1150 }
1151 
1152 #define SUITE_NAME_MAX 64
1153 
1154 static int
1155 __setup_ut_on_single_target(struct io_target *target)
1156 {
1157 	unsigned rc = 0;
1158 	CU_pSuite suite = NULL;
1159 	char name[SUITE_NAME_MAX];
1160 
1161 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1162 	suite = CU_add_suite(name, suite_init, suite_fini);
1163 	if (suite == NULL) {
1164 		CU_cleanup_registry();
1165 		rc = CU_get_error();
1166 		return -rc;
1167 	}
1168 
1169 	if (
1170 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
1171 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
1172 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
1173 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
1174 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
1175 		|| CU_add_test(suite, "blockdev reset",
1176 			       blockdev_test_reset) == NULL
1177 		|| CU_add_test(suite, "blockdev write read 512 bytes",
1178 			       blockdev_write_read_512Bytes) == NULL
1179 		|| CU_add_test(suite, "blockdev write read size > 128k",
1180 			       blockdev_write_read_size_gt_128k) == NULL
1181 		|| CU_add_test(suite, "blockdev write read invalid size",
1182 			       blockdev_write_read_invalid_size) == NULL
1183 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1184 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1185 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1186 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1187 		|| CU_add_test(suite, "blockdev write read max offset",
1188 			       blockdev_write_read_max_offset) == NULL
1189 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
1190 			       blockdev_overlapped_write_read_8k) == NULL
1191 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
1192 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
1193 			       blockdev_writev_readv_30x4k) == NULL
1194 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
1195 			       blockdev_writev_readv_512Bytes) == NULL
1196 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1197 			       blockdev_writev_readv_size_gt_128k) == NULL
1198 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1199 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1200 		|| CU_add_test(suite, "blockdev comparev and writev", blockdev_comparev_and_writev) == NULL
1201 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1202 			       blockdev_test_nvme_passthru_rw) == NULL
1203 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1204 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1205 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1206 			       blockdev_test_nvme_admin_passthru) == NULL
1207 	) {
1208 		CU_cleanup_registry();
1209 		rc = CU_get_error();
1210 		return -rc;
1211 	}
1212 	return 0;
1213 }
1214 
1215 static void
1216 __run_ut_thread(void *arg)
1217 {
1218 	struct spdk_jsonrpc_request *request = arg;
1219 	int rc = 0;
1220 	struct io_target *target;
1221 	unsigned num_failures;
1222 
1223 	if (CU_initialize_registry() != CUE_SUCCESS) {
1224 		/* CUnit error, probably won't recover */
1225 		rc = CU_get_error();
1226 		stop_init_thread(-rc, request);
1227 	}
1228 
1229 	target = g_io_targets;
1230 	while (target != NULL) {
1231 		rc = __setup_ut_on_single_target(target);
1232 		if (rc < 0) {
1233 			/* CUnit error, probably won't recover */
1234 			stop_init_thread(-rc, request);
1235 		}
1236 		target = target->next;
1237 	}
1238 	CU_basic_set_mode(CU_BRM_VERBOSE);
1239 	CU_basic_run_tests();
1240 	num_failures = CU_get_number_of_failures();
1241 	CU_cleanup_registry();
1242 
1243 	stop_init_thread(num_failures, request);
1244 }
1245 
1246 static void
1247 __construct_targets(void *arg)
1248 {
1249 	if (bdevio_construct_targets() < 0) {
1250 		spdk_app_stop(-1);
1251 		return;
1252 	}
1253 
1254 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1255 }
1256 
1257 static void
1258 test_main(void *arg1)
1259 {
1260 	struct spdk_cpuset tmpmask = {};
1261 	const struct spdk_cpuset *appmask;
1262 	uint32_t cpu, init_cpu;
1263 
1264 	pthread_mutex_init(&g_test_mutex, NULL);
1265 	pthread_cond_init(&g_test_cond, NULL);
1266 
1267 	appmask = spdk_app_get_core_mask();
1268 
1269 	if (spdk_cpuset_count(appmask) < 3) {
1270 		spdk_app_stop(-1);
1271 		return;
1272 	}
1273 
1274 	init_cpu = spdk_env_get_current_core();
1275 	g_thread_init = spdk_get_thread();
1276 
1277 	for (cpu = 0; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) {
1278 		if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) {
1279 			spdk_cpuset_zero(&tmpmask);
1280 			spdk_cpuset_set_cpu(&tmpmask, cpu, true);
1281 			g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
1282 			break;
1283 		}
1284 	}
1285 
1286 	if (cpu == SPDK_ENV_LCORE_ID_ANY) {
1287 		spdk_app_stop(-1);
1288 		return;
1289 	}
1290 
1291 	for (cpu++; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) {
1292 		if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) {
1293 			spdk_cpuset_zero(&tmpmask);
1294 			spdk_cpuset_set_cpu(&tmpmask, cpu, true);
1295 			g_thread_io = spdk_thread_create("io_thread", &tmpmask);
1296 			break;
1297 		}
1298 	}
1299 
1300 	if (cpu == SPDK_ENV_LCORE_ID_ANY) {
1301 		spdk_app_stop(-1);
1302 		return;
1303 	}
1304 
1305 	if (g_wait_for_tests) {
1306 		/* Do not perform any tests until RPC is received */
1307 		return;
1308 	}
1309 
1310 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1311 }
1312 
1313 static void
1314 bdevio_usage(void)
1315 {
1316 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1317 }
1318 
1319 static int
1320 bdevio_parse_arg(int ch, char *arg)
1321 {
1322 	switch (ch) {
1323 	case 'w':
1324 		g_wait_for_tests =  true;
1325 		break;
1326 	default:
1327 		return -EINVAL;
1328 	}
1329 	return 0;
1330 }
1331 
1332 struct rpc_perform_tests {
1333 	char *name;
1334 };
1335 
1336 static void
1337 free_rpc_perform_tests(struct rpc_perform_tests *r)
1338 {
1339 	free(r->name);
1340 }
1341 
1342 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1343 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1344 };
1345 
1346 static void
1347 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1348 {
1349 	struct spdk_json_write_ctx *w;
1350 
1351 	if (num_failures == 0) {
1352 		w = spdk_jsonrpc_begin_result(request);
1353 		spdk_json_write_uint32(w, num_failures);
1354 		spdk_jsonrpc_end_result(request, w);
1355 	} else {
1356 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1357 						     "%d test cases failed", num_failures);
1358 	}
1359 }
1360 
1361 static void
1362 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1363 {
1364 	struct rpc_perform_tests req = {NULL};
1365 	struct spdk_bdev *bdev;
1366 	int rc;
1367 
1368 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1369 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1370 					      &req)) {
1371 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1372 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1373 		goto invalid;
1374 	}
1375 
1376 	if (req.name) {
1377 		bdev = spdk_bdev_get_by_name(req.name);
1378 		if (bdev == NULL) {
1379 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1380 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1381 							     "Bdev '%s' does not exist: %s",
1382 							     req.name, spdk_strerror(ENODEV));
1383 			goto invalid;
1384 		}
1385 		rc = bdevio_construct_target(bdev);
1386 		if (rc < 0) {
1387 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1388 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1389 							     "Could not construct target for bdev '%s': %s",
1390 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1391 			goto invalid;
1392 		}
1393 	} else {
1394 		rc = bdevio_construct_targets();
1395 		if (rc < 0) {
1396 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1397 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1398 							     "Could not construct targets for all bdevs: %s",
1399 							     spdk_strerror(-rc));
1400 			goto invalid;
1401 		}
1402 	}
1403 	free_rpc_perform_tests(&req);
1404 
1405 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1406 
1407 	return;
1408 
1409 invalid:
1410 	free_rpc_perform_tests(&req);
1411 }
1412 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1413 
1414 int
1415 main(int argc, char **argv)
1416 {
1417 	int			rc;
1418 	struct spdk_app_opts	opts = {};
1419 
1420 	spdk_app_opts_init(&opts);
1421 	opts.name = "bdevio";
1422 	opts.reactor_mask = "0x7";
1423 
1424 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1425 				      bdevio_parse_arg, bdevio_usage)) !=
1426 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1427 		return rc;
1428 	}
1429 
1430 	rc = spdk_app_start(&opts, test_main, NULL);
1431 	spdk_app_fini();
1432 
1433 	return rc;
1434 }
1435