xref: /spdk/test/bdev/bdevio/bdevio.c (revision 7aa2cc29c0b532d2ec949c8d0c6084df9a3d6cab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/bdev.h"
10 #include "spdk/accel.h"
11 #include "spdk/env.h"
12 #include "spdk/log.h"
13 #include "spdk/thread.h"
14 #include "spdk/event.h"
15 #include "spdk/rpc.h"
16 #include "spdk/util.h"
17 #include "spdk/string.h"
18 
19 #include "bdev_internal.h"
20 #include "CUnit/Basic.h"
21 
22 #define BUFFER_IOVS		1024
23 #define BUFFER_SIZE		260 * 1024
24 #define BDEV_TASK_ARRAY_SIZE	2048
25 
26 pthread_mutex_t g_test_mutex;
27 pthread_cond_t g_test_cond;
28 
29 static struct spdk_thread *g_thread_init;
30 static struct spdk_thread *g_thread_ut;
31 static struct spdk_thread *g_thread_io;
32 static bool g_wait_for_tests = false;
33 static int g_num_failures = 0;
34 static bool g_shutdown = false;
35 
36 struct io_target {
37 	struct spdk_bdev	*bdev;
38 	struct spdk_bdev_desc	*bdev_desc;
39 	struct spdk_io_channel	*ch;
40 	struct io_target	*next;
41 };
42 
43 struct bdevio_request {
44 	char *buf;
45 	char *fused_buf;
46 	int data_len;
47 	uint64_t offset;
48 	struct iovec iov[BUFFER_IOVS];
49 	int iovcnt;
50 	struct iovec fused_iov[BUFFER_IOVS];
51 	int fused_iovcnt;
52 	struct io_target *target;
53 	uint64_t src_offset;
54 };
55 
56 struct io_target *g_io_targets = NULL;
57 struct io_target *g_current_io_target = NULL;
58 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
59 
60 static void
61 execute_spdk_function(spdk_msg_fn fn, void *arg)
62 {
63 	pthread_mutex_lock(&g_test_mutex);
64 	spdk_thread_send_msg(g_thread_io, fn, arg);
65 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
66 	pthread_mutex_unlock(&g_test_mutex);
67 }
68 
69 static void
70 wake_ut_thread(void)
71 {
72 	pthread_mutex_lock(&g_test_mutex);
73 	pthread_cond_signal(&g_test_cond);
74 	pthread_mutex_unlock(&g_test_mutex);
75 }
76 
77 static void
78 __exit_io_thread(void *arg)
79 {
80 	assert(spdk_get_thread() == g_thread_io);
81 	spdk_thread_exit(g_thread_io);
82 	wake_ut_thread();
83 }
84 
85 static void
86 __get_io_channel(void *arg)
87 {
88 	struct io_target *target = arg;
89 
90 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
91 	assert(target->ch);
92 	wake_ut_thread();
93 }
94 
95 static void
96 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
97 				void *event_ctx)
98 {
99 }
100 
101 static int
102 bdevio_construct_target(struct spdk_bdev *bdev)
103 {
104 	struct io_target *target;
105 	int rc;
106 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
107 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
108 
109 	target = malloc(sizeof(struct io_target));
110 	if (target == NULL) {
111 		return -ENOMEM;
112 	}
113 
114 	rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL,
115 				&target->bdev_desc);
116 	if (rc != 0) {
117 		free(target);
118 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
119 		return rc;
120 	}
121 
122 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
123 	       spdk_bdev_get_name(bdev),
124 	       num_blocks, block_size,
125 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
126 
127 	target->bdev = bdev;
128 	target->next = g_io_targets;
129 	execute_spdk_function(__get_io_channel, target);
130 	g_io_targets = target;
131 
132 	return 0;
133 }
134 
135 static int
136 bdevio_construct_targets(void)
137 {
138 	struct spdk_bdev *bdev;
139 	int rc;
140 
141 	printf("I/O targets:\n");
142 
143 	bdev = spdk_bdev_first_leaf();
144 	while (bdev != NULL) {
145 		rc = bdevio_construct_target(bdev);
146 		if (rc < 0) {
147 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
148 			return rc;
149 		}
150 		bdev = spdk_bdev_next_leaf(bdev);
151 	}
152 
153 	if (g_io_targets == NULL) {
154 		SPDK_ERRLOG("No bdevs to perform tests on\n");
155 		return -1;
156 	}
157 
158 	return 0;
159 }
160 
161 static void
162 __put_io_channel(void *arg)
163 {
164 	struct io_target *target = arg;
165 
166 	spdk_put_io_channel(target->ch);
167 	wake_ut_thread();
168 }
169 
170 static void
171 bdevio_cleanup_targets(void)
172 {
173 	struct io_target *target;
174 
175 	target = g_io_targets;
176 	while (target != NULL) {
177 		execute_spdk_function(__put_io_channel, target);
178 		spdk_bdev_close(target->bdev_desc);
179 		g_io_targets = target->next;
180 		free(target);
181 		target = g_io_targets;
182 	}
183 }
184 
185 static bool g_completion_success;
186 
187 static void
188 initialize_buffer(char **buf, int pattern, int size)
189 {
190 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
191 	memset(*buf, pattern, size);
192 }
193 
194 static void
195 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
196 {
197 	g_completion_success = success;
198 	spdk_bdev_free_io(bdev_io);
199 	wake_ut_thread();
200 }
201 
202 static uint64_t
203 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes)
204 {
205 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
206 
207 	CU_ASSERT(bytes % block_size == 0);
208 	return bytes / block_size;
209 }
210 
211 static void
212 __blockdev_write(void *arg)
213 {
214 	struct bdevio_request *req = arg;
215 	struct io_target *target = req->target;
216 	int rc;
217 
218 	if (req->iovcnt) {
219 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
220 				      req->data_len, quick_test_complete, NULL);
221 	} else {
222 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
223 				     req->data_len, quick_test_complete, NULL);
224 	}
225 
226 	if (rc) {
227 		g_completion_success = false;
228 		wake_ut_thread();
229 	}
230 }
231 
232 static void
233 __blockdev_write_zeroes(void *arg)
234 {
235 	struct bdevio_request *req = arg;
236 	struct io_target *target = req->target;
237 	int rc;
238 
239 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
240 				    req->data_len, quick_test_complete, NULL);
241 	if (rc) {
242 		g_completion_success = false;
243 		wake_ut_thread();
244 	}
245 }
246 
247 static void
248 __blockdev_compare_and_write(void *arg)
249 {
250 	struct bdevio_request *req = arg;
251 	struct io_target *target = req->target;
252 	struct spdk_bdev *bdev = target->bdev;
253 	int rc;
254 
255 	rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
256 			req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset),
257 			bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL);
258 
259 	if (rc) {
260 		g_completion_success = false;
261 		wake_ut_thread();
262 	}
263 }
264 
265 static void
266 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
267 {
268 	int data_len = req->data_len;
269 	char *buf = req->buf;
270 
271 	req->iovcnt = 0;
272 	if (!iov_len) {
273 		return;
274 	}
275 
276 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
277 		if (data_len < iov_len) {
278 			iov_len = data_len;
279 		}
280 
281 		req->iov[req->iovcnt].iov_base = buf;
282 		req->iov[req->iovcnt].iov_len = iov_len;
283 
284 		buf += iov_len;
285 		data_len -= iov_len;
286 	}
287 
288 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
289 }
290 
291 static void
292 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
293 {
294 	int data_len = req->data_len;
295 	char *buf = req->fused_buf;
296 
297 	req->fused_iovcnt = 0;
298 	if (!iov_len) {
299 		return;
300 	}
301 
302 	for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
303 		if (data_len < iov_len) {
304 			iov_len = data_len;
305 		}
306 
307 		req->fused_iov[req->fused_iovcnt].iov_base = buf;
308 		req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
309 
310 		buf += iov_len;
311 		data_len -= iov_len;
312 	}
313 
314 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
315 }
316 
317 static void
318 blockdev_write(struct io_target *target, char *tx_buf,
319 	       uint64_t offset, int data_len, int iov_len)
320 {
321 	struct bdevio_request req;
322 
323 	req.target = target;
324 	req.buf = tx_buf;
325 	req.data_len = data_len;
326 	req.offset = offset;
327 	sgl_chop_buffer(&req, iov_len);
328 
329 	g_completion_success = false;
330 
331 	execute_spdk_function(__blockdev_write, &req);
332 }
333 
334 static void
335 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
336 			    uint64_t offset, int data_len, int iov_len)
337 {
338 	struct bdevio_request req;
339 
340 	req.target = target;
341 	req.buf = cmp_buf;
342 	req.fused_buf = write_buf;
343 	req.data_len = data_len;
344 	req.offset = offset;
345 	sgl_chop_buffer(&req, iov_len);
346 	sgl_chop_fused_buffer(&req, iov_len);
347 
348 	g_completion_success = false;
349 
350 	execute_spdk_function(__blockdev_compare_and_write, &req);
351 }
352 
353 static void
354 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
355 		      uint64_t offset, int data_len)
356 {
357 	struct bdevio_request req;
358 
359 	req.target = target;
360 	req.buf = tx_buf;
361 	req.data_len = data_len;
362 	req.offset = offset;
363 
364 	g_completion_success = false;
365 
366 	execute_spdk_function(__blockdev_write_zeroes, &req);
367 }
368 
369 static void
370 __blockdev_read(void *arg)
371 {
372 	struct bdevio_request *req = arg;
373 	struct io_target *target = req->target;
374 	int rc;
375 
376 	if (req->iovcnt) {
377 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
378 				     req->data_len, quick_test_complete, NULL);
379 	} else {
380 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
381 				    req->data_len, quick_test_complete, NULL);
382 	}
383 
384 	if (rc) {
385 		g_completion_success = false;
386 		wake_ut_thread();
387 	}
388 }
389 
390 static void
391 blockdev_read(struct io_target *target, char *rx_buf,
392 	      uint64_t offset, int data_len, int iov_len)
393 {
394 	struct bdevio_request req;
395 
396 	req.target = target;
397 	req.buf = rx_buf;
398 	req.data_len = data_len;
399 	req.offset = offset;
400 	req.iovcnt = 0;
401 	sgl_chop_buffer(&req, iov_len);
402 
403 	g_completion_success = false;
404 
405 	execute_spdk_function(__blockdev_read, &req);
406 }
407 
408 static void
409 _blockdev_copy(void *arg)
410 {
411 	struct bdevio_request *req = arg;
412 	struct io_target *target = req->target;
413 	struct spdk_bdev *bdev = target->bdev;
414 	int rc;
415 
416 	rc = spdk_bdev_copy_blocks(target->bdev_desc, target->ch,
417 				   bdev_bytes_to_blocks(bdev, req->offset),
418 				   bdev_bytes_to_blocks(bdev, req->src_offset),
419 				   bdev_bytes_to_blocks(bdev, req->data_len),
420 				   quick_test_complete, NULL);
421 
422 	if (rc) {
423 		g_completion_success = false;
424 		wake_ut_thread();
425 	}
426 }
427 
428 static void
429 blockdev_copy(struct io_target *target, uint64_t dst_offset, uint64_t src_offset, int data_len)
430 {
431 	struct bdevio_request req;
432 
433 	req.target = target;
434 	req.data_len = data_len;
435 	req.offset = dst_offset;
436 	req.src_offset = src_offset;
437 
438 	g_completion_success = false;
439 
440 	execute_spdk_function(_blockdev_copy, &req);
441 }
442 
443 static int
444 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
445 {
446 	return memcmp(rx_buf, tx_buf, data_length);
447 }
448 
449 static void
450 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
451 		    int expected_rc, bool write_zeroes)
452 {
453 	struct io_target *target;
454 	char	*tx_buf = NULL;
455 	char	*rx_buf = NULL;
456 	int	rc;
457 	uint64_t write_offset = offset;
458 	uint32_t write_data_len = data_length;
459 
460 	target = g_current_io_target;
461 
462 	if (spdk_bdev_get_write_unit_size(target->bdev) > 1 && expected_rc == 0) {
463 		uint32_t write_unit_bytes;
464 
465 		write_unit_bytes = spdk_bdev_get_write_unit_size(target->bdev) *
466 				   spdk_bdev_get_block_size(target->bdev);
467 		write_offset -= offset % write_unit_bytes;
468 		write_data_len += (offset - write_offset);
469 
470 		if (write_data_len % write_unit_bytes) {
471 			write_data_len += write_unit_bytes - write_data_len % write_unit_bytes;
472 		}
473 	}
474 
475 	if (!write_zeroes) {
476 		initialize_buffer(&tx_buf, pattern, write_data_len);
477 		initialize_buffer(&rx_buf, 0, data_length);
478 
479 		blockdev_write(target, tx_buf, write_offset, write_data_len, iov_len);
480 	} else {
481 		initialize_buffer(&tx_buf, 0, write_data_len);
482 		initialize_buffer(&rx_buf, pattern, data_length);
483 
484 		blockdev_write_zeroes(target, tx_buf, write_offset, write_data_len);
485 	}
486 
487 
488 	if (expected_rc == 0) {
489 		CU_ASSERT_EQUAL(g_completion_success, true);
490 	} else {
491 		CU_ASSERT_EQUAL(g_completion_success, false);
492 	}
493 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
494 
495 	if (expected_rc == 0) {
496 		CU_ASSERT_EQUAL(g_completion_success, true);
497 	} else {
498 		CU_ASSERT_EQUAL(g_completion_success, false);
499 	}
500 
501 	if (g_completion_success) {
502 		rc = blockdev_write_read_data_match(rx_buf, tx_buf + (offset - write_offset), data_length);
503 		/* Assert the write by comparing it with values read
504 		 * from each blockdev */
505 		CU_ASSERT_EQUAL(rc, 0);
506 	}
507 
508 	spdk_free(rx_buf);
509 	spdk_free(tx_buf);
510 }
511 
512 static void
513 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
514 {
515 	struct io_target *target;
516 	char	*tx_buf = NULL;
517 	char	*write_buf = NULL;
518 	char	*rx_buf = NULL;
519 	int	rc;
520 
521 	target = g_current_io_target;
522 
523 	initialize_buffer(&tx_buf, 0xAA, data_length);
524 	initialize_buffer(&rx_buf, 0, data_length);
525 	initialize_buffer(&write_buf, 0xBB, data_length);
526 
527 	blockdev_write(target, tx_buf, offset, data_length, iov_len);
528 	CU_ASSERT_EQUAL(g_completion_success, true);
529 
530 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
531 	CU_ASSERT_EQUAL(g_completion_success, true);
532 
533 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
534 	CU_ASSERT_EQUAL(g_completion_success, false);
535 
536 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
537 	CU_ASSERT_EQUAL(g_completion_success, true);
538 	rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
539 	/* Assert the write by comparing it with values read
540 	 * from each blockdev */
541 	CU_ASSERT_EQUAL(rc, 0);
542 
543 	spdk_free(rx_buf);
544 	spdk_free(tx_buf);
545 	spdk_free(write_buf);
546 }
547 
548 static void
549 blockdev_write_read_block(void)
550 {
551 	uint32_t data_length;
552 	uint64_t offset;
553 	int pattern;
554 	int expected_rc;
555 	struct io_target *target = g_current_io_target;
556 	struct spdk_bdev *bdev = target->bdev;
557 
558 	/* Data size = 1 block */
559 	data_length = spdk_bdev_get_block_size(bdev);
560 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
561 	offset = 0;
562 	pattern = 0xA3;
563 	/* Params are valid, hence the expected return value
564 	 * of write and read for all blockdevs is 0. */
565 	expected_rc = 0;
566 
567 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
568 }
569 
570 static void
571 blockdev_write_zeroes_read_block(void)
572 {
573 	uint32_t data_length;
574 	uint64_t offset;
575 	int pattern;
576 	int expected_rc;
577 	struct io_target *target = g_current_io_target;
578 	struct spdk_bdev *bdev = target->bdev;
579 
580 	/* Data size = 1 block */
581 	data_length = spdk_bdev_get_block_size(bdev);
582 	offset = 0;
583 	pattern = 0xA3;
584 	/* Params are valid, hence the expected return value
585 	 * of write_zeroes and read for all blockdevs is 0. */
586 	expected_rc = 0;
587 
588 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
589 }
590 
591 /*
592  * This i/o will not have to split at the bdev layer.
593  */
594 static void
595 blockdev_write_zeroes_read_no_split(void)
596 {
597 	uint32_t data_length;
598 	uint64_t offset;
599 	int pattern;
600 	int expected_rc;
601 	struct io_target *target = g_current_io_target;
602 	struct spdk_bdev *bdev = target->bdev;
603 
604 	/* Data size = block size aligned ZERO_BUFFER_SIZE */
605 	data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */
606 	data_length -= ZERO_BUFFER_SIZE % spdk_bdev_get_block_size(bdev);
607 	offset = 0;
608 	pattern = 0xA3;
609 	/* Params are valid, hence the expected return value
610 	 * of write_zeroes and read for all blockdevs is 0. */
611 	expected_rc = 0;
612 
613 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
614 }
615 
616 /*
617  * This i/o will have to split at the bdev layer if
618  * write-zeroes is not supported by the bdev.
619  */
620 static void
621 blockdev_write_zeroes_read_split(void)
622 {
623 	uint32_t data_length;
624 	uint64_t offset;
625 	int pattern;
626 	int expected_rc;
627 	struct io_target *target = g_current_io_target;
628 	struct spdk_bdev *bdev = target->bdev;
629 
630 	/* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */
631 	data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */
632 	data_length -= data_length % spdk_bdev_get_block_size(bdev);
633 	offset = 0;
634 	pattern = 0xA3;
635 	/* Params are valid, hence the expected return value
636 	 * of write_zeroes and read for all blockdevs is 0. */
637 	expected_rc = 0;
638 
639 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
640 }
641 
642 /*
643  * This i/o will have to split at the bdev layer if
644  * write-zeroes is not supported by the bdev. It also
645  * tests a write size that is not an even multiple of
646  * the bdev layer zero buffer size.
647  */
648 static void
649 blockdev_write_zeroes_read_split_partial(void)
650 {
651 	uint32_t data_length;
652 	uint64_t offset;
653 	int pattern;
654 	int expected_rc;
655 	struct io_target *target = g_current_io_target;
656 	struct spdk_bdev *bdev = target->bdev;
657 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
658 
659 	/* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */
660 	data_length = ZERO_BUFFER_SIZE * 7 / 2;
661 	data_length -= data_length % block_size;
662 	offset = 0;
663 	pattern = 0xA3;
664 	/* Params are valid, hence the expected return value
665 	 * of write_zeroes and read for all blockdevs is 0. */
666 	expected_rc = 0;
667 
668 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
669 }
670 
671 static void
672 blockdev_writev_readv_block(void)
673 {
674 	uint32_t data_length, iov_len;
675 	uint64_t offset;
676 	int pattern;
677 	int expected_rc;
678 	struct io_target *target = g_current_io_target;
679 	struct spdk_bdev *bdev = target->bdev;
680 
681 	/* Data size = 1 block */
682 	data_length = spdk_bdev_get_block_size(bdev);
683 	iov_len = data_length;
684 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
685 	offset = 0;
686 	pattern = 0xA3;
687 	/* Params are valid, hence the expected return value
688 	 * of write and read for all blockdevs is 0. */
689 	expected_rc = 0;
690 
691 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
692 }
693 
694 static void
695 blockdev_comparev_and_writev(void)
696 {
697 	uint32_t data_length, iov_len;
698 	uint64_t offset;
699 	struct io_target *target = g_current_io_target;
700 	struct spdk_bdev *bdev = target->bdev;
701 
702 	if (spdk_bdev_is_md_separate(bdev)) {
703 		/* TODO: remove this check once bdev layer properly supports
704 		 * compare and write for bdevs with separate md.
705 		 */
706 		SPDK_ERRLOG("skipping comparev_and_writev on bdev %s since it has\n"
707 			    "separate metadata which is not supported yet.\n",
708 			    spdk_bdev_get_name(bdev));
709 		return;
710 	}
711 
712 	/* Data size = acwu size */
713 	data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev);
714 	iov_len = data_length;
715 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
716 	offset = 0;
717 
718 	blockdev_compare_and_write(data_length, iov_len, offset);
719 }
720 
721 static void
722 blockdev_writev_readv_30x1block(void)
723 {
724 	uint32_t data_length, iov_len;
725 	uint64_t offset;
726 	int pattern;
727 	int expected_rc;
728 	struct io_target *target = g_current_io_target;
729 	struct spdk_bdev *bdev = target->bdev;
730 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
731 
732 	/* Data size = 30 * block size */
733 	data_length = block_size * 30;
734 	iov_len = block_size;
735 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
736 	offset = 0;
737 	pattern = 0xA3;
738 	/* Params are valid, hence the expected return value
739 	 * of write and read for all blockdevs is 0. */
740 	expected_rc = 0;
741 
742 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
743 }
744 
745 static void
746 blockdev_write_read_8blocks(void)
747 {
748 	uint32_t data_length;
749 	uint64_t offset;
750 	int pattern;
751 	int expected_rc;
752 	struct io_target *target = g_current_io_target;
753 	struct spdk_bdev *bdev = target->bdev;
754 
755 	/* Data size = 8 * block size */
756 	data_length = spdk_bdev_get_block_size(bdev) * 8;
757 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
758 	offset = data_length;
759 	pattern = 0xA3;
760 	/* Params are valid, hence the expected return value
761 	 * of write and read for all blockdevs is 0. */
762 	expected_rc = 0;
763 
764 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
765 }
766 
767 static void
768 blockdev_writev_readv_8blocks(void)
769 {
770 	uint32_t data_length, iov_len;
771 	uint64_t offset;
772 	int pattern;
773 	int expected_rc;
774 	struct io_target *target = g_current_io_target;
775 	struct spdk_bdev *bdev = target->bdev;
776 
777 	/* Data size = 8 * block size */
778 	data_length = spdk_bdev_get_block_size(bdev) * 8;
779 	iov_len = data_length;
780 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
781 	offset = data_length;
782 	pattern = 0xA3;
783 	/* Params are valid, hence the expected return value
784 	 * of write and read for all blockdevs is 0. */
785 	expected_rc = 0;
786 
787 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
788 }
789 
790 static void
791 blockdev_write_read_size_gt_128k(void)
792 {
793 	uint32_t data_length;
794 	uint64_t offset;
795 	int pattern;
796 	int expected_rc;
797 	struct io_target *target = g_current_io_target;
798 	struct spdk_bdev *bdev = target->bdev;
799 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
800 
801 	/* Data size = block size aligned 128K + 1 block */
802 	data_length = 128 * 1024;
803 	data_length -= data_length % block_size;
804 	data_length += block_size;
805 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
806 	offset = block_size * 2;
807 	pattern = 0xA3;
808 	/* Params are valid, hence the expected return value
809 	 * of write and read for all blockdevs is 0. */
810 	expected_rc = 0;
811 
812 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
813 }
814 
815 static void
816 blockdev_writev_readv_size_gt_128k(void)
817 {
818 	uint32_t data_length, iov_len;
819 	uint64_t offset;
820 	int pattern;
821 	int expected_rc;
822 	struct io_target *target = g_current_io_target;
823 	struct spdk_bdev *bdev = target->bdev;
824 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
825 
826 	/* Data size = block size aligned 128K + 1 block */
827 	data_length = 128 * 1024;
828 	data_length -= data_length % block_size;
829 	data_length += block_size;
830 	iov_len = data_length;
831 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
832 	offset = block_size * 2;
833 	pattern = 0xA3;
834 	/* Params are valid, hence the expected return value
835 	 * of write and read for all blockdevs is 0. */
836 	expected_rc = 0;
837 
838 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
839 }
840 
841 static void
842 blockdev_writev_readv_size_gt_128k_two_iov(void)
843 {
844 	uint32_t data_length, iov_len;
845 	uint64_t offset;
846 	int pattern;
847 	int expected_rc;
848 	struct io_target *target = g_current_io_target;
849 	struct spdk_bdev *bdev = target->bdev;
850 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
851 
852 	/* Data size = block size aligned 128K + 1 block */
853 	data_length = 128 * 1024;
854 	data_length -= data_length % block_size;
855 	iov_len = data_length;
856 	data_length += block_size;
857 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
858 	offset = block_size * 2;
859 	pattern = 0xA3;
860 	/* Params are valid, hence the expected return value
861 	 * of write and read for all blockdevs is 0. */
862 	expected_rc = 0;
863 
864 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
865 }
866 
867 static void
868 blockdev_write_read_invalid_size(void)
869 {
870 	uint32_t data_length;
871 	uint64_t offset;
872 	int pattern;
873 	int expected_rc;
874 	struct io_target *target = g_current_io_target;
875 	struct spdk_bdev *bdev = target->bdev;
876 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
877 
878 	/* Data size is not a multiple of the block size */
879 	data_length = block_size - 1;
880 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
881 	offset = block_size * 2;
882 	pattern = 0xA3;
883 	/* Params are invalid, hence the expected return value
884 	 * of write and read for all blockdevs is < 0 */
885 	expected_rc = -1;
886 
887 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
888 }
889 
890 static void
891 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
892 {
893 	uint32_t data_length;
894 	uint64_t offset;
895 	int pattern;
896 	int expected_rc;
897 	struct io_target *target = g_current_io_target;
898 	struct spdk_bdev *bdev = target->bdev;
899 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
900 
901 	data_length = block_size;
902 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
903 	/* The start offset has been set to a marginal value
904 	 * such that offset + nbytes == Total size of
905 	 * blockdev. */
906 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
907 	pattern = 0xA3;
908 	/* Params are valid, hence the expected return value
909 	 * of write and read for all blockdevs is 0. */
910 	expected_rc = 0;
911 
912 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
913 }
914 
915 static void
916 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
917 {
918 	uint32_t data_length;
919 	uint64_t offset;
920 	int pattern;
921 	int expected_rc;
922 	struct io_target *target = g_current_io_target;
923 	struct spdk_bdev *bdev = target->bdev;
924 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
925 
926 	/* Tests the overflow condition of the blockdevs. */
927 	data_length = block_size * 2;
928 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
929 	pattern = 0xA3;
930 
931 	/* The start offset has been set to a valid value
932 	 * but offset + nbytes is greater than the Total size
933 	 * of the blockdev. The test should fail. */
934 	offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size;
935 	/* Params are invalid, hence the expected return value
936 	 * of write and read for all blockdevs is < 0 */
937 	expected_rc = -1;
938 
939 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
940 }
941 
942 static void
943 blockdev_write_read_max_offset(void)
944 {
945 	int	data_length;
946 	uint64_t offset;
947 	int pattern;
948 	int expected_rc;
949 	struct io_target *target = g_current_io_target;
950 	struct spdk_bdev *bdev = target->bdev;
951 
952 	data_length = spdk_bdev_get_block_size(bdev);
953 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
954 	/* The start offset has been set to UINT64_MAX such that
955 	 * adding nbytes wraps around and points to an invalid address. */
956 	offset = UINT64_MAX;
957 	pattern = 0xA3;
958 	/* Params are invalid, hence the expected return value
959 	 * of write and read for all blockdevs is < 0 */
960 	expected_rc = -1;
961 
962 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
963 }
964 
965 static void
966 blockdev_overlapped_write_read_2blocks(void)
967 {
968 	int	data_length;
969 	uint64_t offset;
970 	int pattern;
971 	int expected_rc;
972 	struct io_target *target = g_current_io_target;
973 	struct spdk_bdev *bdev = target->bdev;
974 
975 	/* Data size = 2 blocks */
976 	data_length = spdk_bdev_get_block_size(bdev) * 2;
977 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
978 	offset = 0;
979 	pattern = 0xA3;
980 	/* Params are valid, hence the expected return value
981 	 * of write and read for all blockdevs is 0. */
982 	expected_rc = 0;
983 	/* Assert the write by comparing it with values read
984 	 * from the same offset for each blockdev */
985 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
986 
987 	/* Overwrite the pattern 0xbb of size 2*block size on an address offset
988 	 * overlapping with the address written above and assert the new value in
989 	 * the overlapped address range */
990 	/* Populate 2*block size with value 0xBB */
991 	pattern = 0xBB;
992 	/* Offset = 1 block; Overlap offset addresses and write value 0xbb */
993 	offset = spdk_bdev_get_block_size(bdev);
994 	/* Assert the write by comparing it with values read
995 	 * from the overlapped offset for each blockdev */
996 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
997 }
998 
999 static void
1000 __blockdev_reset(void *arg)
1001 {
1002 	struct bdevio_request *req = arg;
1003 	struct io_target *target = req->target;
1004 	int rc;
1005 
1006 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
1007 	if (rc < 0) {
1008 		g_completion_success = false;
1009 		wake_ut_thread();
1010 	}
1011 }
1012 
1013 static void
1014 blockdev_test_reset(void)
1015 {
1016 	struct bdevio_request req;
1017 	struct io_target *target;
1018 	bool reset_supported;
1019 
1020 	target = g_current_io_target;
1021 	req.target = target;
1022 
1023 	reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET);
1024 	g_completion_success = false;
1025 
1026 	execute_spdk_function(__blockdev_reset, &req);
1027 
1028 	CU_ASSERT_EQUAL(g_completion_success, reset_supported);
1029 }
1030 
1031 struct bdevio_passthrough_request {
1032 	struct spdk_nvme_cmd cmd;
1033 	void *buf;
1034 	uint32_t len;
1035 	struct io_target *target;
1036 	int sct;
1037 	int sc;
1038 	uint32_t cdw0;
1039 };
1040 
1041 static void
1042 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1043 {
1044 	struct bdevio_passthrough_request *pt_req = arg;
1045 
1046 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
1047 	spdk_bdev_free_io(bdev_io);
1048 	wake_ut_thread();
1049 }
1050 
1051 static void
1052 __blockdev_nvme_passthru(void *arg)
1053 {
1054 	struct bdevio_passthrough_request *pt_req = arg;
1055 	struct io_target *target = pt_req->target;
1056 	int rc;
1057 
1058 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
1059 					&pt_req->cmd, pt_req->buf, pt_req->len,
1060 					nvme_pt_test_complete, pt_req);
1061 	if (rc) {
1062 		wake_ut_thread();
1063 	}
1064 }
1065 
1066 static void
1067 blockdev_test_nvme_passthru_rw(void)
1068 {
1069 	struct bdevio_passthrough_request pt_req;
1070 	void *write_buf, *read_buf;
1071 	struct io_target *target;
1072 
1073 	target = g_current_io_target;
1074 
1075 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1076 		return;
1077 	}
1078 
1079 	memset(&pt_req, 0, sizeof(pt_req));
1080 	pt_req.target = target;
1081 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
1082 	pt_req.cmd.nsid = 1;
1083 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
1084 	pt_req.cmd.cdw12 = 0;
1085 
1086 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
1087 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1088 	memset(write_buf, 0xA5, pt_req.len);
1089 	pt_req.buf = write_buf;
1090 
1091 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1092 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1093 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1094 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1095 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1096 
1097 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
1098 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1099 	pt_req.buf = read_buf;
1100 
1101 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1102 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1103 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1104 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1105 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1106 
1107 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
1108 	spdk_free(read_buf);
1109 	spdk_free(write_buf);
1110 }
1111 
1112 static void
1113 blockdev_test_nvme_passthru_vendor_specific(void)
1114 {
1115 	struct bdevio_passthrough_request pt_req;
1116 	struct io_target *target;
1117 
1118 	target = g_current_io_target;
1119 
1120 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1121 		return;
1122 	}
1123 
1124 	memset(&pt_req, 0, sizeof(pt_req));
1125 	pt_req.target = target;
1126 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
1127 	pt_req.cmd.nsid = 1;
1128 
1129 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1130 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1131 	pt_req.cdw0 = 0xbeef;
1132 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1133 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1134 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
1135 	CU_ASSERT(pt_req.cdw0 == 0x0);
1136 }
1137 
1138 static void
1139 __blockdev_nvme_admin_passthru(void *arg)
1140 {
1141 	struct bdevio_passthrough_request *pt_req = arg;
1142 	struct io_target *target = pt_req->target;
1143 	int rc;
1144 
1145 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
1146 					   &pt_req->cmd, pt_req->buf, pt_req->len,
1147 					   nvme_pt_test_complete, pt_req);
1148 	if (rc) {
1149 		wake_ut_thread();
1150 	}
1151 }
1152 
1153 static void
1154 blockdev_test_nvme_admin_passthru(void)
1155 {
1156 	struct io_target *target;
1157 	struct bdevio_passthrough_request pt_req;
1158 
1159 	target = g_current_io_target;
1160 
1161 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
1162 		return;
1163 	}
1164 
1165 	memset(&pt_req, 0, sizeof(pt_req));
1166 	pt_req.target = target;
1167 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1168 	pt_req.cmd.nsid = 0;
1169 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
1170 
1171 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
1172 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1173 
1174 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
1175 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1176 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
1177 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1178 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1179 }
1180 
1181 static void
1182 blockdev_test_copy(void)
1183 {
1184 	uint32_t data_length;
1185 	uint64_t src_offset, dst_offset;
1186 	struct io_target *target = g_current_io_target;
1187 	struct spdk_bdev *bdev = target->bdev;
1188 	char *tx_buf = NULL;
1189 	char *rx_buf = NULL;
1190 	int rc;
1191 
1192 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_COPY)) {
1193 		return;
1194 	}
1195 
1196 	data_length = spdk_bdev_get_block_size(bdev);
1197 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
1198 	src_offset = 0;
1199 	dst_offset = spdk_bdev_get_block_size(bdev);
1200 
1201 	initialize_buffer(&tx_buf, 0xAA, data_length);
1202 	initialize_buffer(&rx_buf, 0, data_length);
1203 
1204 	blockdev_write(target, tx_buf, src_offset, data_length, data_length);
1205 	CU_ASSERT_EQUAL(g_completion_success, true);
1206 
1207 	blockdev_copy(target, dst_offset, src_offset, data_length);
1208 	CU_ASSERT_EQUAL(g_completion_success, true);
1209 
1210 	blockdev_read(target, rx_buf, dst_offset, data_length, data_length);
1211 	CU_ASSERT_EQUAL(g_completion_success, true);
1212 
1213 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
1214 	CU_ASSERT_EQUAL(rc, 0);
1215 }
1216 
1217 static void
1218 __stop_init_thread(void *arg)
1219 {
1220 	unsigned num_failures = g_num_failures;
1221 	struct spdk_jsonrpc_request *request = arg;
1222 
1223 	g_num_failures = 0;
1224 
1225 	bdevio_cleanup_targets();
1226 	if (g_wait_for_tests && !g_shutdown) {
1227 		/* Do not stop the app yet, wait for another RPC */
1228 		rpc_perform_tests_cb(num_failures, request);
1229 		return;
1230 	}
1231 	assert(spdk_get_thread() == g_thread_init);
1232 	assert(spdk_get_thread() == spdk_thread_get_app_thread());
1233 	execute_spdk_function(__exit_io_thread, NULL);
1234 	spdk_app_stop(num_failures);
1235 }
1236 
1237 static void
1238 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1239 {
1240 	g_num_failures = num_failures;
1241 
1242 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1243 }
1244 
1245 static int
1246 suite_init(void)
1247 {
1248 	if (g_current_io_target == NULL) {
1249 		g_current_io_target = g_io_targets;
1250 	}
1251 	return 0;
1252 }
1253 
1254 static int
1255 suite_fini(void)
1256 {
1257 	g_current_io_target = g_current_io_target->next;
1258 	return 0;
1259 }
1260 
1261 #define SUITE_NAME_MAX 64
1262 
1263 static int
1264 __setup_ut_on_single_target(struct io_target *target)
1265 {
1266 	unsigned rc = 0;
1267 	CU_pSuite suite = NULL;
1268 	char name[SUITE_NAME_MAX];
1269 
1270 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1271 	suite = CU_add_suite(name, suite_init, suite_fini);
1272 	if (suite == NULL) {
1273 		CU_cleanup_registry();
1274 		rc = CU_get_error();
1275 		return -rc;
1276 	}
1277 
1278 	if (
1279 		CU_add_test(suite, "blockdev write read block",
1280 			    blockdev_write_read_block) == NULL
1281 		|| CU_add_test(suite, "blockdev write zeroes read block",
1282 			       blockdev_write_zeroes_read_block) == NULL
1283 		|| CU_add_test(suite, "blockdev write zeroes read no split",
1284 			       blockdev_write_zeroes_read_no_split) == NULL
1285 		|| CU_add_test(suite, "blockdev write zeroes read split",
1286 			       blockdev_write_zeroes_read_split) == NULL
1287 		|| CU_add_test(suite, "blockdev write zeroes read split partial",
1288 			       blockdev_write_zeroes_read_split_partial) == NULL
1289 		|| CU_add_test(suite, "blockdev reset",
1290 			       blockdev_test_reset) == NULL
1291 		|| CU_add_test(suite, "blockdev write read 8 blocks",
1292 			       blockdev_write_read_8blocks) == NULL
1293 		|| CU_add_test(suite, "blockdev write read size > 128k",
1294 			       blockdev_write_read_size_gt_128k) == NULL
1295 		|| CU_add_test(suite, "blockdev write read invalid size",
1296 			       blockdev_write_read_invalid_size) == NULL
1297 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1298 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1299 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1300 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1301 		|| CU_add_test(suite, "blockdev write read max offset",
1302 			       blockdev_write_read_max_offset) == NULL
1303 		|| CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset",
1304 			       blockdev_overlapped_write_read_2blocks) == NULL
1305 		|| CU_add_test(suite, "blockdev writev readv 8 blocks",
1306 			       blockdev_writev_readv_8blocks) == NULL
1307 		|| CU_add_test(suite, "blockdev writev readv 30 x 1block",
1308 			       blockdev_writev_readv_30x1block) == NULL
1309 		|| CU_add_test(suite, "blockdev writev readv block",
1310 			       blockdev_writev_readv_block) == NULL
1311 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1312 			       blockdev_writev_readv_size_gt_128k) == NULL
1313 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1314 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1315 		|| CU_add_test(suite, "blockdev comparev and writev",
1316 			       blockdev_comparev_and_writev) == NULL
1317 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1318 			       blockdev_test_nvme_passthru_rw) == NULL
1319 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1320 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1321 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1322 			       blockdev_test_nvme_admin_passthru) == NULL
1323 		|| CU_add_test(suite, "blockdev copy",
1324 			       blockdev_test_copy) == NULL
1325 	) {
1326 		CU_cleanup_registry();
1327 		rc = CU_get_error();
1328 		return -rc;
1329 	}
1330 	return 0;
1331 }
1332 
1333 static void
1334 __run_ut_thread(void *arg)
1335 {
1336 	struct spdk_jsonrpc_request *request = arg;
1337 	int rc = 0;
1338 	struct io_target *target;
1339 
1340 	if (CU_initialize_registry() != CUE_SUCCESS) {
1341 		/* CUnit error, probably won't recover */
1342 		rc = CU_get_error();
1343 		rc = -rc;
1344 		goto ret;
1345 	}
1346 
1347 	target = g_io_targets;
1348 	while (target != NULL) {
1349 		rc = __setup_ut_on_single_target(target);
1350 		if (rc < 0) {
1351 			/* CUnit error, probably won't recover */
1352 			rc = -rc;
1353 			goto ret;
1354 		}
1355 		target = target->next;
1356 	}
1357 	CU_basic_set_mode(CU_BRM_VERBOSE);
1358 	CU_basic_run_tests();
1359 	rc = CU_get_number_of_failures();
1360 	CU_cleanup_registry();
1361 
1362 ret:
1363 	stop_init_thread(rc, request);
1364 	assert(spdk_get_thread() == g_thread_ut);
1365 	spdk_thread_exit(g_thread_ut);
1366 }
1367 
1368 static void
1369 __construct_targets(void *arg)
1370 {
1371 	if (bdevio_construct_targets() < 0) {
1372 		spdk_app_stop(-1);
1373 		return;
1374 	}
1375 
1376 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1377 }
1378 
1379 static void
1380 test_main(void *arg1)
1381 {
1382 	struct spdk_cpuset tmpmask = {};
1383 	uint32_t i;
1384 
1385 	pthread_mutex_init(&g_test_mutex, NULL);
1386 	pthread_cond_init(&g_test_cond, NULL);
1387 
1388 	/* This test runs specifically on at least three cores.
1389 	 * g_thread_init is the app_thread on main core from event framework.
1390 	 * Next two are only for the tests and should always be on separate CPU cores. */
1391 	if (spdk_env_get_core_count() < 3) {
1392 		spdk_app_stop(-1);
1393 		return;
1394 	}
1395 
1396 	SPDK_ENV_FOREACH_CORE(i) {
1397 		if (i == spdk_env_get_current_core()) {
1398 			g_thread_init = spdk_get_thread();
1399 			continue;
1400 		}
1401 		spdk_cpuset_zero(&tmpmask);
1402 		spdk_cpuset_set_cpu(&tmpmask, i, true);
1403 		if (g_thread_ut == NULL) {
1404 			g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
1405 		} else if (g_thread_io == NULL) {
1406 			g_thread_io = spdk_thread_create("io_thread", &tmpmask);
1407 		}
1408 
1409 	}
1410 
1411 	if (g_wait_for_tests) {
1412 		/* Do not perform any tests until RPC is received */
1413 		return;
1414 	}
1415 
1416 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1417 }
1418 
1419 static void
1420 bdevio_usage(void)
1421 {
1422 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1423 }
1424 
1425 static int
1426 bdevio_parse_arg(int ch, char *arg)
1427 {
1428 	switch (ch) {
1429 	case 'w':
1430 		g_wait_for_tests =  true;
1431 		break;
1432 	default:
1433 		return -EINVAL;
1434 	}
1435 	return 0;
1436 }
1437 
1438 struct rpc_perform_tests {
1439 	char *name;
1440 };
1441 
1442 static void
1443 free_rpc_perform_tests(struct rpc_perform_tests *r)
1444 {
1445 	free(r->name);
1446 }
1447 
1448 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1449 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1450 };
1451 
1452 static void
1453 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1454 {
1455 	struct spdk_json_write_ctx *w;
1456 
1457 	if (num_failures == 0) {
1458 		w = spdk_jsonrpc_begin_result(request);
1459 		spdk_json_write_uint32(w, num_failures);
1460 		spdk_jsonrpc_end_result(request, w);
1461 	} else {
1462 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1463 						     "%d test cases failed", num_failures);
1464 	}
1465 }
1466 
1467 static void
1468 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1469 {
1470 	struct rpc_perform_tests req = {NULL};
1471 	struct spdk_bdev *bdev;
1472 	int rc;
1473 
1474 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1475 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1476 					      &req)) {
1477 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1478 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1479 		goto invalid;
1480 	}
1481 
1482 	if (req.name) {
1483 		bdev = spdk_bdev_get_by_name(req.name);
1484 		if (bdev == NULL) {
1485 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1486 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1487 							     "Bdev '%s' does not exist: %s",
1488 							     req.name, spdk_strerror(ENODEV));
1489 			goto invalid;
1490 		}
1491 		rc = bdevio_construct_target(bdev);
1492 		if (rc < 0) {
1493 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1494 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1495 							     "Could not construct target for bdev '%s': %s",
1496 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1497 			goto invalid;
1498 		}
1499 	} else {
1500 		rc = bdevio_construct_targets();
1501 		if (rc < 0) {
1502 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1503 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1504 							     "Could not construct targets for all bdevs: %s",
1505 							     spdk_strerror(-rc));
1506 			goto invalid;
1507 		}
1508 	}
1509 	free_rpc_perform_tests(&req);
1510 
1511 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1512 
1513 	return;
1514 
1515 invalid:
1516 	free_rpc_perform_tests(&req);
1517 }
1518 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1519 
1520 static void
1521 spdk_bdevio_shutdown_cb(void)
1522 {
1523 	g_shutdown = true;
1524 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL);
1525 }
1526 
1527 int
1528 main(int argc, char **argv)
1529 {
1530 	int			rc;
1531 	struct spdk_app_opts	opts = {};
1532 
1533 	spdk_app_opts_init(&opts, sizeof(opts));
1534 	opts.name = "bdevio";
1535 	opts.reactor_mask = "0x7";
1536 	opts.shutdown_cb = spdk_bdevio_shutdown_cb;
1537 
1538 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1539 				      bdevio_parse_arg, bdevio_usage)) !=
1540 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1541 		return rc;
1542 	}
1543 
1544 	rc = spdk_app_start(&opts, test_main, NULL);
1545 	spdk_app_fini();
1546 
1547 	return rc;
1548 }
1549