xref: /spdk/test/bdev/bdevio/bdevio.c (revision 784b9d48746955f210926648a0131f84f58de76f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/bdev.h"
10 #include "spdk/accel.h"
11 #include "spdk/env.h"
12 #include "spdk/log.h"
13 #include "spdk/thread.h"
14 #include "spdk/event.h"
15 #include "spdk/rpc.h"
16 #include "spdk/util.h"
17 #include "spdk/string.h"
18 
19 #include "bdev_internal.h"
20 #include "CUnit/Basic.h"
21 
22 #define BUFFER_IOVS		1024
23 #define BUFFER_SIZE		260 * 1024
24 #define BDEV_TASK_ARRAY_SIZE	2048
25 
26 pthread_mutex_t g_test_mutex;
27 pthread_cond_t g_test_cond;
28 
29 static struct spdk_thread *g_thread_init;
30 static struct spdk_thread *g_thread_ut;
31 static struct spdk_thread *g_thread_io;
32 static bool g_wait_for_tests = false;
33 static int g_num_failures = 0;
34 static bool g_shutdown = false;
35 
36 struct io_target {
37 	struct spdk_bdev	*bdev;
38 	struct spdk_bdev_desc	*bdev_desc;
39 	struct spdk_io_channel	*ch;
40 	struct io_target	*next;
41 };
42 
43 struct bdevio_request {
44 	char *buf;
45 	char *fused_buf;
46 	int data_len;
47 	uint64_t offset;
48 	struct iovec iov[BUFFER_IOVS];
49 	int iovcnt;
50 	struct iovec fused_iov[BUFFER_IOVS];
51 	int fused_iovcnt;
52 	struct io_target *target;
53 	uint64_t src_offset;
54 };
55 
56 struct io_target *g_io_targets = NULL;
57 struct io_target *g_current_io_target = NULL;
58 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
59 
60 static void
61 execute_spdk_function(spdk_msg_fn fn, void *arg)
62 {
63 	pthread_mutex_lock(&g_test_mutex);
64 	spdk_thread_send_msg(g_thread_io, fn, arg);
65 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
66 	pthread_mutex_unlock(&g_test_mutex);
67 }
68 
69 static void
70 wake_ut_thread(void)
71 {
72 	pthread_mutex_lock(&g_test_mutex);
73 	pthread_cond_signal(&g_test_cond);
74 	pthread_mutex_unlock(&g_test_mutex);
75 }
76 
77 static void
78 __exit_io_thread(void *arg)
79 {
80 	assert(spdk_get_thread() == g_thread_io);
81 	spdk_thread_exit(g_thread_io);
82 	wake_ut_thread();
83 }
84 
85 static void
86 __get_io_channel(void *arg)
87 {
88 	struct io_target *target = arg;
89 
90 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
91 	assert(target->ch);
92 	wake_ut_thread();
93 }
94 
95 static void
96 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
97 				void *event_ctx)
98 {
99 }
100 
101 static int
102 bdevio_construct_target(struct spdk_bdev *bdev)
103 {
104 	struct io_target *target;
105 	int rc;
106 	uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
107 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
108 
109 	target = malloc(sizeof(struct io_target));
110 	if (target == NULL) {
111 		return -ENOMEM;
112 	}
113 
114 	rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL,
115 				&target->bdev_desc);
116 	if (rc != 0) {
117 		free(target);
118 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
119 		return rc;
120 	}
121 
122 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
123 	       spdk_bdev_get_name(bdev),
124 	       num_blocks, block_size,
125 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
126 
127 	target->bdev = bdev;
128 	target->next = g_io_targets;
129 	execute_spdk_function(__get_io_channel, target);
130 	g_io_targets = target;
131 
132 	return 0;
133 }
134 
135 static int
136 bdevio_construct_targets(void)
137 {
138 	struct spdk_bdev *bdev;
139 	int rc;
140 
141 	printf("I/O targets:\n");
142 
143 	bdev = spdk_bdev_first_leaf();
144 	while (bdev != NULL) {
145 		rc = bdevio_construct_target(bdev);
146 		if (rc < 0) {
147 			SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
148 			return rc;
149 		}
150 		bdev = spdk_bdev_next_leaf(bdev);
151 	}
152 
153 	if (g_io_targets == NULL) {
154 		SPDK_ERRLOG("No bdevs to perform tests on\n");
155 		return -1;
156 	}
157 
158 	return 0;
159 }
160 
161 static void
162 __put_io_channel(void *arg)
163 {
164 	struct io_target *target = arg;
165 
166 	spdk_put_io_channel(target->ch);
167 	wake_ut_thread();
168 }
169 
170 static void
171 bdevio_cleanup_targets(void)
172 {
173 	struct io_target *target;
174 
175 	target = g_io_targets;
176 	while (target != NULL) {
177 		execute_spdk_function(__put_io_channel, target);
178 		spdk_bdev_close(target->bdev_desc);
179 		g_io_targets = target->next;
180 		free(target);
181 		target = g_io_targets;
182 	}
183 }
184 
185 static bool g_completion_success;
186 
187 static void
188 initialize_buffer(char **buf, int pattern, int size, uint32_t block_size)
189 {
190 	CU_ASSERT(block_size != 0);
191 
192 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
193 	memset(*buf, pattern, size);
194 
195 	if (pattern) {
196 		for (int offset = 0, block = 0; offset < size; offset += block_size, block++) {
197 			*(*buf + offset) = block;
198 		}
199 	}
200 }
201 
202 static void
203 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
204 {
205 	g_completion_success = success;
206 	spdk_bdev_free_io(bdev_io);
207 	wake_ut_thread();
208 }
209 
210 static uint64_t
211 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes)
212 {
213 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
214 
215 	CU_ASSERT(bytes % block_size == 0);
216 	return bytes / block_size;
217 }
218 
219 static void
220 __blockdev_write(void *arg)
221 {
222 	struct bdevio_request *req = arg;
223 	struct io_target *target = req->target;
224 	int rc;
225 
226 	if (req->iovcnt) {
227 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
228 				      req->data_len, quick_test_complete, NULL);
229 	} else {
230 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
231 				     req->data_len, quick_test_complete, NULL);
232 	}
233 
234 	if (rc) {
235 		g_completion_success = false;
236 		wake_ut_thread();
237 	}
238 }
239 
240 static void
241 __blockdev_write_zeroes(void *arg)
242 {
243 	struct bdevio_request *req = arg;
244 	struct io_target *target = req->target;
245 	int rc;
246 
247 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
248 				    req->data_len, quick_test_complete, NULL);
249 	if (rc) {
250 		g_completion_success = false;
251 		wake_ut_thread();
252 	}
253 }
254 
255 static void
256 __blockdev_compare_and_write(void *arg)
257 {
258 	struct bdevio_request *req = arg;
259 	struct io_target *target = req->target;
260 	struct spdk_bdev *bdev = target->bdev;
261 	int rc;
262 
263 	rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
264 			req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset),
265 			bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL);
266 
267 	if (rc) {
268 		g_completion_success = false;
269 		wake_ut_thread();
270 	}
271 }
272 
273 static void
274 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
275 {
276 	int data_len = req->data_len;
277 	char *buf = req->buf;
278 
279 	req->iovcnt = 0;
280 	if (!iov_len) {
281 		return;
282 	}
283 
284 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
285 		if (data_len < iov_len) {
286 			iov_len = data_len;
287 		}
288 
289 		req->iov[req->iovcnt].iov_base = buf;
290 		req->iov[req->iovcnt].iov_len = iov_len;
291 
292 		buf += iov_len;
293 		data_len -= iov_len;
294 	}
295 
296 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
297 }
298 
299 static void
300 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
301 {
302 	int data_len = req->data_len;
303 	char *buf = req->fused_buf;
304 
305 	req->fused_iovcnt = 0;
306 	if (!iov_len) {
307 		return;
308 	}
309 
310 	for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
311 		if (data_len < iov_len) {
312 			iov_len = data_len;
313 		}
314 
315 		req->fused_iov[req->fused_iovcnt].iov_base = buf;
316 		req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
317 
318 		buf += iov_len;
319 		data_len -= iov_len;
320 	}
321 
322 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
323 }
324 
325 static void
326 blockdev_write(struct io_target *target, char *tx_buf,
327 	       uint64_t offset, int data_len, int iov_len)
328 {
329 	struct bdevio_request req;
330 
331 	req.target = target;
332 	req.buf = tx_buf;
333 	req.data_len = data_len;
334 	req.offset = offset;
335 	sgl_chop_buffer(&req, iov_len);
336 
337 	g_completion_success = false;
338 
339 	execute_spdk_function(__blockdev_write, &req);
340 }
341 
342 static void
343 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
344 			    uint64_t offset, int data_len, int iov_len)
345 {
346 	struct bdevio_request req;
347 
348 	req.target = target;
349 	req.buf = cmp_buf;
350 	req.fused_buf = write_buf;
351 	req.data_len = data_len;
352 	req.offset = offset;
353 	sgl_chop_buffer(&req, iov_len);
354 	sgl_chop_fused_buffer(&req, iov_len);
355 
356 	g_completion_success = false;
357 
358 	execute_spdk_function(__blockdev_compare_and_write, &req);
359 }
360 
361 static void
362 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
363 		      uint64_t offset, int data_len)
364 {
365 	struct bdevio_request req;
366 
367 	req.target = target;
368 	req.buf = tx_buf;
369 	req.data_len = data_len;
370 	req.offset = offset;
371 
372 	g_completion_success = false;
373 
374 	execute_spdk_function(__blockdev_write_zeroes, &req);
375 }
376 
377 static void
378 __blockdev_read(void *arg)
379 {
380 	struct bdevio_request *req = arg;
381 	struct io_target *target = req->target;
382 	int rc;
383 
384 	if (req->iovcnt) {
385 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
386 				     req->data_len, quick_test_complete, NULL);
387 	} else {
388 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
389 				    req->data_len, quick_test_complete, NULL);
390 	}
391 
392 	if (rc) {
393 		g_completion_success = false;
394 		wake_ut_thread();
395 	}
396 }
397 
398 static void
399 blockdev_read(struct io_target *target, char *rx_buf,
400 	      uint64_t offset, int data_len, int iov_len)
401 {
402 	struct bdevio_request req;
403 
404 	req.target = target;
405 	req.buf = rx_buf;
406 	req.data_len = data_len;
407 	req.offset = offset;
408 	req.iovcnt = 0;
409 	sgl_chop_buffer(&req, iov_len);
410 
411 	g_completion_success = false;
412 
413 	execute_spdk_function(__blockdev_read, &req);
414 }
415 
416 static void
417 _blockdev_copy(void *arg)
418 {
419 	struct bdevio_request *req = arg;
420 	struct io_target *target = req->target;
421 	struct spdk_bdev *bdev = target->bdev;
422 	int rc;
423 
424 	rc = spdk_bdev_copy_blocks(target->bdev_desc, target->ch,
425 				   bdev_bytes_to_blocks(bdev, req->offset),
426 				   bdev_bytes_to_blocks(bdev, req->src_offset),
427 				   bdev_bytes_to_blocks(bdev, req->data_len),
428 				   quick_test_complete, NULL);
429 
430 	if (rc) {
431 		g_completion_success = false;
432 		wake_ut_thread();
433 	}
434 }
435 
436 static void
437 blockdev_copy(struct io_target *target, uint64_t dst_offset, uint64_t src_offset, int data_len)
438 {
439 	struct bdevio_request req;
440 
441 	req.target = target;
442 	req.data_len = data_len;
443 	req.offset = dst_offset;
444 	req.src_offset = src_offset;
445 
446 	g_completion_success = false;
447 
448 	execute_spdk_function(_blockdev_copy, &req);
449 }
450 
451 static int
452 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
453 {
454 	return memcmp(rx_buf, tx_buf, data_length);
455 }
456 
457 static void
458 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
459 		    int expected_rc, bool write_zeroes, uint32_t block_size)
460 {
461 	struct io_target *target;
462 	char	*tx_buf = NULL;
463 	char	*rx_buf = NULL;
464 	int	rc;
465 	uint64_t write_offset = offset;
466 	uint32_t write_data_len = data_length;
467 
468 	target = g_current_io_target;
469 
470 	if (spdk_bdev_get_write_unit_size(target->bdev) > 1 && expected_rc == 0) {
471 		uint32_t write_unit_bytes;
472 
473 		write_unit_bytes = spdk_bdev_get_write_unit_size(target->bdev) *
474 				   spdk_bdev_get_block_size(target->bdev);
475 		write_offset -= offset % write_unit_bytes;
476 		write_data_len += (offset - write_offset);
477 
478 		if (write_data_len % write_unit_bytes) {
479 			write_data_len += write_unit_bytes - write_data_len % write_unit_bytes;
480 		}
481 	}
482 
483 	if (!write_zeroes) {
484 		initialize_buffer(&tx_buf, pattern, write_data_len, block_size);
485 		initialize_buffer(&rx_buf, 0, data_length, block_size);
486 
487 		blockdev_write(target, tx_buf, write_offset, write_data_len, iov_len);
488 	} else {
489 		initialize_buffer(&tx_buf, 0, write_data_len, block_size);
490 		initialize_buffer(&rx_buf, pattern, data_length, block_size);
491 
492 		blockdev_write_zeroes(target, tx_buf, write_offset, write_data_len);
493 	}
494 
495 
496 	if (expected_rc == 0) {
497 		CU_ASSERT_EQUAL(g_completion_success, true);
498 	} else {
499 		CU_ASSERT_EQUAL(g_completion_success, false);
500 	}
501 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
502 
503 	if (expected_rc == 0) {
504 		CU_ASSERT_EQUAL(g_completion_success, true);
505 	} else {
506 		CU_ASSERT_EQUAL(g_completion_success, false);
507 	}
508 
509 	if (g_completion_success) {
510 		rc = blockdev_write_read_data_match(rx_buf, tx_buf + (offset - write_offset), data_length);
511 		/* Assert the write by comparing it with values read
512 		 * from each blockdev */
513 		CU_ASSERT_EQUAL(rc, 0);
514 	}
515 
516 	spdk_free(rx_buf);
517 	spdk_free(tx_buf);
518 }
519 
520 static void
521 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
522 {
523 	struct io_target *target = g_current_io_target;
524 	struct spdk_bdev *bdev = target->bdev;
525 	char	*tx_buf = NULL;
526 	char	*write_buf = NULL;
527 	char	*rx_buf = NULL;
528 	int	rc;
529 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
530 
531 	initialize_buffer(&tx_buf, 0xAA, data_length, block_size);
532 	initialize_buffer(&rx_buf, 0, data_length, block_size);
533 	initialize_buffer(&write_buf, 0xBB, data_length, block_size);
534 
535 	blockdev_write(target, tx_buf, offset, data_length, iov_len);
536 	CU_ASSERT_EQUAL(g_completion_success, true);
537 
538 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
539 	CU_ASSERT_EQUAL(g_completion_success, true);
540 
541 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
542 	CU_ASSERT_EQUAL(g_completion_success, false);
543 
544 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
545 	CU_ASSERT_EQUAL(g_completion_success, true);
546 	rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
547 	/* Assert the write by comparing it with values read
548 	 * from each blockdev */
549 	CU_ASSERT_EQUAL(rc, 0);
550 
551 	spdk_free(rx_buf);
552 	spdk_free(tx_buf);
553 	spdk_free(write_buf);
554 }
555 
556 static void
557 blockdev_write_read_block(void)
558 {
559 	uint32_t data_length;
560 	uint64_t offset;
561 	int pattern;
562 	int expected_rc;
563 	struct io_target *target = g_current_io_target;
564 	struct spdk_bdev *bdev = target->bdev;
565 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
566 
567 	/* Data size = 1 block */
568 	data_length = block_size;
569 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
570 	offset = 0;
571 	pattern = 0xA3;
572 	/* Params are valid, hence the expected return value
573 	 * of write and read for all blockdevs is 0. */
574 	expected_rc = 0;
575 
576 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
577 }
578 
579 static void
580 blockdev_write_zeroes_read_block(void)
581 {
582 	uint32_t data_length;
583 	uint64_t offset;
584 	int pattern;
585 	int expected_rc;
586 	struct io_target *target = g_current_io_target;
587 	struct spdk_bdev *bdev = target->bdev;
588 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
589 
590 	/* Data size = 1 block */
591 	data_length = block_size;
592 	offset = 0;
593 	pattern = 0xA3;
594 	/* Params are valid, hence the expected return value
595 	 * of write_zeroes and read for all blockdevs is 0. */
596 	expected_rc = 0;
597 
598 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
599 }
600 
601 /*
602  * This i/o will not have to split at the bdev layer.
603  */
604 static void
605 blockdev_write_zeroes_read_no_split(void)
606 {
607 	uint32_t data_length;
608 	uint64_t offset;
609 	int pattern;
610 	int expected_rc;
611 	struct io_target *target = g_current_io_target;
612 	struct spdk_bdev *bdev = target->bdev;
613 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
614 
615 	/* Data size = block size aligned ZERO_BUFFER_SIZE */
616 	data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */
617 	data_length -= ZERO_BUFFER_SIZE % block_size;
618 	offset = 0;
619 	pattern = 0xA3;
620 	/* Params are valid, hence the expected return value
621 	 * of write_zeroes and read for all blockdevs is 0. */
622 	expected_rc = 0;
623 
624 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
625 }
626 
627 /*
628  * This i/o will have to split at the bdev layer if
629  * write-zeroes is not supported by the bdev.
630  */
631 static void
632 blockdev_write_zeroes_read_split(void)
633 {
634 	uint32_t data_length;
635 	uint64_t offset;
636 	int pattern;
637 	int expected_rc;
638 	struct io_target *target = g_current_io_target;
639 	struct spdk_bdev *bdev = target->bdev;
640 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
641 
642 	/* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */
643 	data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */
644 	data_length -= data_length % block_size;
645 	offset = 0;
646 	pattern = 0xA3;
647 	/* Params are valid, hence the expected return value
648 	 * of write_zeroes and read for all blockdevs is 0. */
649 	expected_rc = 0;
650 
651 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
652 }
653 
654 /*
655  * This i/o will have to split at the bdev layer if
656  * write-zeroes is not supported by the bdev. It also
657  * tests a write size that is not an even multiple of
658  * the bdev layer zero buffer size.
659  */
660 static void
661 blockdev_write_zeroes_read_split_partial(void)
662 {
663 	uint32_t data_length;
664 	uint64_t offset;
665 	int pattern;
666 	int expected_rc;
667 	struct io_target *target = g_current_io_target;
668 	struct spdk_bdev *bdev = target->bdev;
669 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
670 
671 	/* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */
672 	data_length = ZERO_BUFFER_SIZE * 7 / 2;
673 	data_length -= data_length % block_size;
674 	offset = 0;
675 	pattern = 0xA3;
676 	/* Params are valid, hence the expected return value
677 	 * of write_zeroes and read for all blockdevs is 0. */
678 	expected_rc = 0;
679 
680 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
681 }
682 
683 static void
684 blockdev_writev_readv_block(void)
685 {
686 	uint32_t data_length, iov_len;
687 	uint64_t offset;
688 	int pattern;
689 	int expected_rc;
690 	struct io_target *target = g_current_io_target;
691 	struct spdk_bdev *bdev = target->bdev;
692 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
693 
694 	/* Data size = 1 block */
695 	data_length = block_size;
696 	iov_len = data_length;
697 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
698 	offset = 0;
699 	pattern = 0xA3;
700 	/* Params are valid, hence the expected return value
701 	 * of write and read for all blockdevs is 0. */
702 	expected_rc = 0;
703 
704 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
705 }
706 
707 static void
708 blockdev_comparev_and_writev(void)
709 {
710 	uint32_t data_length, iov_len;
711 	uint64_t offset;
712 	struct io_target *target = g_current_io_target;
713 	struct spdk_bdev *bdev = target->bdev;
714 
715 	if (spdk_bdev_is_md_separate(bdev)) {
716 		/* TODO: remove this check once bdev layer properly supports
717 		 * compare and write for bdevs with separate md.
718 		 */
719 		SPDK_ERRLOG("skipping comparev_and_writev on bdev %s since it has\n"
720 			    "separate metadata which is not supported yet.\n",
721 			    spdk_bdev_get_name(bdev));
722 		return;
723 	}
724 
725 	/* Data size = acwu size */
726 	data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev);
727 	iov_len = data_length;
728 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
729 	offset = 0;
730 
731 	blockdev_compare_and_write(data_length, iov_len, offset);
732 }
733 
734 static void
735 blockdev_writev_readv_30x1block(void)
736 {
737 	uint32_t data_length, iov_len;
738 	uint64_t offset;
739 	int pattern;
740 	int expected_rc;
741 	struct io_target *target = g_current_io_target;
742 	struct spdk_bdev *bdev = target->bdev;
743 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
744 
745 	/* Data size = 30 * block size */
746 	data_length = block_size * 30;
747 	iov_len = block_size;
748 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
749 	offset = 0;
750 	pattern = 0xA3;
751 	/* Params are valid, hence the expected return value
752 	 * of write and read for all blockdevs is 0. */
753 	expected_rc = 0;
754 
755 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
756 }
757 
758 static void
759 blockdev_write_read_8blocks(void)
760 {
761 	uint32_t data_length;
762 	uint64_t offset;
763 	int pattern;
764 	int expected_rc;
765 	struct io_target *target = g_current_io_target;
766 	struct spdk_bdev *bdev = target->bdev;
767 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
768 
769 	/* Data size = 8 * block size */
770 	data_length = block_size * 8;
771 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
772 	offset = data_length;
773 	pattern = 0xA3;
774 	/* Params are valid, hence the expected return value
775 	 * of write and read for all blockdevs is 0. */
776 	expected_rc = 0;
777 
778 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
779 }
780 
781 static void
782 blockdev_writev_readv_8blocks(void)
783 {
784 	uint32_t data_length, iov_len;
785 	uint64_t offset;
786 	int pattern;
787 	int expected_rc;
788 	struct io_target *target = g_current_io_target;
789 	struct spdk_bdev *bdev = target->bdev;
790 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
791 
792 
793 	/* Data size = 8 * block size */
794 	data_length = block_size * 8;
795 	iov_len = data_length;
796 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
797 	offset = data_length;
798 	pattern = 0xA3;
799 	/* Params are valid, hence the expected return value
800 	 * of write and read for all blockdevs is 0. */
801 	expected_rc = 0;
802 
803 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
804 }
805 
806 static void
807 blockdev_write_read_size_gt_128k(void)
808 {
809 	uint32_t data_length;
810 	uint64_t offset;
811 	int pattern;
812 	int expected_rc;
813 	struct io_target *target = g_current_io_target;
814 	struct spdk_bdev *bdev = target->bdev;
815 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
816 
817 	/* Data size = block size aligned 128K + 1 block */
818 	data_length = 128 * 1024;
819 	data_length -= data_length % block_size;
820 	data_length += block_size;
821 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
822 	offset = block_size * 2;
823 	pattern = 0xA3;
824 	/* Params are valid, hence the expected return value
825 	 * of write and read for all blockdevs is 0. */
826 	expected_rc = 0;
827 
828 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
829 }
830 
831 static void
832 blockdev_writev_readv_size_gt_128k(void)
833 {
834 	uint32_t data_length, iov_len;
835 	uint64_t offset;
836 	int pattern;
837 	int expected_rc;
838 	struct io_target *target = g_current_io_target;
839 	struct spdk_bdev *bdev = target->bdev;
840 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
841 
842 	/* Data size = block size aligned 128K + 1 block */
843 	data_length = 128 * 1024;
844 	data_length -= data_length % block_size;
845 	data_length += block_size;
846 	iov_len = data_length;
847 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
848 	offset = block_size * 2;
849 	pattern = 0xA3;
850 	/* Params are valid, hence the expected return value
851 	 * of write and read for all blockdevs is 0. */
852 	expected_rc = 0;
853 
854 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
855 }
856 
857 static void
858 blockdev_writev_readv_size_gt_128k_two_iov(void)
859 {
860 	uint32_t data_length, iov_len;
861 	uint64_t offset;
862 	int pattern;
863 	int expected_rc;
864 	struct io_target *target = g_current_io_target;
865 	struct spdk_bdev *bdev = target->bdev;
866 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
867 
868 	/* Data size = block size aligned 128K + 1 block */
869 	data_length = 128 * 1024;
870 	data_length -= data_length % block_size;
871 	iov_len = data_length;
872 	data_length += block_size;
873 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
874 	offset = block_size * 2;
875 	pattern = 0xA3;
876 	/* Params are valid, hence the expected return value
877 	 * of write and read for all blockdevs is 0. */
878 	expected_rc = 0;
879 
880 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
881 }
882 
883 static void
884 blockdev_write_read_invalid_size(void)
885 {
886 	uint32_t data_length;
887 	uint64_t offset;
888 	int pattern;
889 	int expected_rc;
890 	struct io_target *target = g_current_io_target;
891 	struct spdk_bdev *bdev = target->bdev;
892 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
893 
894 	/* Data size is not a multiple of the block size */
895 	data_length = block_size - 1;
896 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
897 	offset = block_size * 2;
898 	pattern = 0xA3;
899 	/* Params are invalid, hence the expected return value
900 	 * of write and read for all blockdevs is < 0 */
901 	expected_rc = -1;
902 
903 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
904 }
905 
906 static void
907 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
908 {
909 	uint32_t data_length;
910 	uint64_t offset;
911 	int pattern;
912 	int expected_rc;
913 	struct io_target *target = g_current_io_target;
914 	struct spdk_bdev *bdev = target->bdev;
915 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
916 
917 	data_length = block_size;
918 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
919 	/* The start offset has been set to a marginal value
920 	 * such that offset + nbytes == Total size of
921 	 * blockdev. */
922 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
923 	pattern = 0xA3;
924 	/* Params are valid, hence the expected return value
925 	 * of write and read for all blockdevs is 0. */
926 	expected_rc = 0;
927 
928 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
929 }
930 
931 static void
932 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
933 {
934 	uint32_t data_length;
935 	uint64_t offset;
936 	int pattern;
937 	int expected_rc;
938 	struct io_target *target = g_current_io_target;
939 	struct spdk_bdev *bdev = target->bdev;
940 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
941 
942 	/* Tests the overflow condition of the blockdevs. */
943 	data_length = block_size * 2;
944 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
945 	pattern = 0xA3;
946 
947 	/* The start offset has been set to a valid value
948 	 * but offset + nbytes is greater than the Total size
949 	 * of the blockdev. The test should fail. */
950 	offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size;
951 	/* Params are invalid, hence the expected return value
952 	 * of write and read for all blockdevs is < 0 */
953 	expected_rc = -1;
954 
955 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
956 }
957 
958 static void
959 blockdev_write_read_max_offset(void)
960 {
961 	int	data_length;
962 	uint64_t offset;
963 	int pattern;
964 	int expected_rc;
965 	struct io_target *target = g_current_io_target;
966 	struct spdk_bdev *bdev = target->bdev;
967 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
968 
969 	data_length = block_size;
970 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
971 	/* The start offset has been set to UINT64_MAX such that
972 	 * adding nbytes wraps around and points to an invalid address. */
973 	offset = UINT64_MAX;
974 	pattern = 0xA3;
975 	/* Params are invalid, hence the expected return value
976 	 * of write and read for all blockdevs is < 0 */
977 	expected_rc = -1;
978 
979 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
980 }
981 
982 static void
983 blockdev_overlapped_write_read_2blocks(void)
984 {
985 	int	data_length;
986 	uint64_t offset;
987 	int pattern;
988 	int expected_rc;
989 	struct io_target *target = g_current_io_target;
990 	struct spdk_bdev *bdev = target->bdev;
991 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
992 
993 	/* Data size = 2 blocks */
994 	data_length = block_size * 2;
995 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
996 	offset = 0;
997 	pattern = 0xA3;
998 	/* Params are valid, hence the expected return value
999 	 * of write and read for all blockdevs is 0. */
1000 	expected_rc = 0;
1001 	/* Assert the write by comparing it with values read
1002 	 * from the same offset for each blockdev */
1003 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
1004 
1005 	/* Overwrite the pattern 0xbb of size 2*block size on an address offset
1006 	 * overlapping with the address written above and assert the new value in
1007 	 * the overlapped address range */
1008 	/* Populate 2*block size with value 0xBB */
1009 	pattern = 0xBB;
1010 	/* Offset = 1 block; Overlap offset addresses and write value 0xbb */
1011 	offset = spdk_bdev_get_block_size(bdev);
1012 	/* Assert the write by comparing it with values read
1013 	 * from the overlapped offset for each blockdev */
1014 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
1015 }
1016 
1017 static void
1018 __blockdev_reset(void *arg)
1019 {
1020 	struct bdevio_request *req = arg;
1021 	struct io_target *target = req->target;
1022 	int rc;
1023 
1024 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
1025 	if (rc < 0) {
1026 		g_completion_success = false;
1027 		wake_ut_thread();
1028 	}
1029 }
1030 
1031 static void
1032 blockdev_test_reset(void)
1033 {
1034 	struct bdevio_request req;
1035 	struct io_target *target;
1036 	bool reset_supported;
1037 
1038 	target = g_current_io_target;
1039 	req.target = target;
1040 
1041 	reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET);
1042 	g_completion_success = false;
1043 
1044 	execute_spdk_function(__blockdev_reset, &req);
1045 
1046 	CU_ASSERT_EQUAL(g_completion_success, reset_supported);
1047 }
1048 
1049 struct bdevio_passthrough_request {
1050 	struct spdk_nvme_cmd cmd;
1051 	void *buf;
1052 	uint32_t len;
1053 	struct io_target *target;
1054 	int sct;
1055 	int sc;
1056 	uint32_t cdw0;
1057 };
1058 
1059 static void
1060 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1061 {
1062 	struct bdevio_passthrough_request *pt_req = arg;
1063 
1064 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
1065 	spdk_bdev_free_io(bdev_io);
1066 	wake_ut_thread();
1067 }
1068 
1069 static void
1070 __blockdev_nvme_passthru(void *arg)
1071 {
1072 	struct bdevio_passthrough_request *pt_req = arg;
1073 	struct io_target *target = pt_req->target;
1074 	int rc;
1075 
1076 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
1077 					&pt_req->cmd, pt_req->buf, pt_req->len,
1078 					nvme_pt_test_complete, pt_req);
1079 	if (rc) {
1080 		wake_ut_thread();
1081 	}
1082 }
1083 
1084 static void
1085 blockdev_test_nvme_passthru_rw(void)
1086 {
1087 	struct bdevio_passthrough_request pt_req;
1088 	void *write_buf, *read_buf;
1089 	struct io_target *target;
1090 
1091 	target = g_current_io_target;
1092 
1093 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1094 		return;
1095 	}
1096 
1097 	memset(&pt_req, 0, sizeof(pt_req));
1098 	pt_req.target = target;
1099 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
1100 	pt_req.cmd.nsid = 1;
1101 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
1102 	pt_req.cmd.cdw12 = 0;
1103 
1104 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
1105 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1106 	memset(write_buf, 0xA5, pt_req.len);
1107 	pt_req.buf = write_buf;
1108 
1109 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1110 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1111 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1112 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1113 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1114 
1115 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
1116 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1117 	pt_req.buf = read_buf;
1118 
1119 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1120 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1121 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1122 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1123 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1124 
1125 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
1126 	spdk_free(read_buf);
1127 	spdk_free(write_buf);
1128 }
1129 
1130 static void
1131 blockdev_test_nvme_passthru_vendor_specific(void)
1132 {
1133 	struct bdevio_passthrough_request pt_req;
1134 	struct io_target *target;
1135 
1136 	target = g_current_io_target;
1137 
1138 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1139 		return;
1140 	}
1141 
1142 	memset(&pt_req, 0, sizeof(pt_req));
1143 	pt_req.target = target;
1144 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
1145 	pt_req.cmd.nsid = 1;
1146 
1147 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1148 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1149 	pt_req.cdw0 = 0xbeef;
1150 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1151 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1152 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
1153 	CU_ASSERT(pt_req.cdw0 == 0x0);
1154 }
1155 
1156 static void
1157 __blockdev_nvme_admin_passthru(void *arg)
1158 {
1159 	struct bdevio_passthrough_request *pt_req = arg;
1160 	struct io_target *target = pt_req->target;
1161 	int rc;
1162 
1163 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
1164 					   &pt_req->cmd, pt_req->buf, pt_req->len,
1165 					   nvme_pt_test_complete, pt_req);
1166 	if (rc) {
1167 		wake_ut_thread();
1168 	}
1169 }
1170 
1171 static void
1172 blockdev_test_nvme_admin_passthru(void)
1173 {
1174 	struct io_target *target;
1175 	struct bdevio_passthrough_request pt_req;
1176 
1177 	target = g_current_io_target;
1178 
1179 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
1180 		return;
1181 	}
1182 
1183 	memset(&pt_req, 0, sizeof(pt_req));
1184 	pt_req.target = target;
1185 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1186 	pt_req.cmd.nsid = 0;
1187 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
1188 
1189 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
1190 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1191 
1192 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
1193 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1194 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
1195 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1196 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1197 }
1198 
1199 static void
1200 blockdev_test_copy(void)
1201 {
1202 	uint32_t data_length;
1203 	uint64_t src_offset, dst_offset;
1204 	struct io_target *target = g_current_io_target;
1205 	struct spdk_bdev *bdev = target->bdev;
1206 	char *tx_buf = NULL;
1207 	char *rx_buf = NULL;
1208 	int rc;
1209 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
1210 
1211 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_COPY)) {
1212 		return;
1213 	}
1214 
1215 	data_length = block_size;
1216 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
1217 	src_offset = 0;
1218 	dst_offset = block_size;
1219 
1220 	initialize_buffer(&tx_buf, 0xAA, data_length, block_size);
1221 	initialize_buffer(&rx_buf, 0, data_length, block_size);
1222 
1223 	blockdev_write(target, tx_buf, src_offset, data_length, data_length);
1224 	CU_ASSERT_EQUAL(g_completion_success, true);
1225 
1226 	blockdev_copy(target, dst_offset, src_offset, data_length);
1227 	CU_ASSERT_EQUAL(g_completion_success, true);
1228 
1229 	blockdev_read(target, rx_buf, dst_offset, data_length, data_length);
1230 	CU_ASSERT_EQUAL(g_completion_success, true);
1231 
1232 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
1233 	CU_ASSERT_EQUAL(rc, 0);
1234 }
1235 
1236 static void
1237 __stop_init_thread(void *arg)
1238 {
1239 	unsigned num_failures = g_num_failures;
1240 	struct spdk_jsonrpc_request *request = arg;
1241 
1242 	g_num_failures = 0;
1243 
1244 	bdevio_cleanup_targets();
1245 	if (g_wait_for_tests && !g_shutdown) {
1246 		/* Do not stop the app yet, wait for another RPC */
1247 		rpc_perform_tests_cb(num_failures, request);
1248 		return;
1249 	}
1250 	assert(spdk_get_thread() == g_thread_init);
1251 	assert(spdk_get_thread() == spdk_thread_get_app_thread());
1252 	execute_spdk_function(__exit_io_thread, NULL);
1253 	spdk_app_stop(num_failures);
1254 }
1255 
1256 static void
1257 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1258 {
1259 	g_num_failures = num_failures;
1260 
1261 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1262 }
1263 
1264 static int
1265 suite_init(void)
1266 {
1267 	if (g_current_io_target == NULL) {
1268 		g_current_io_target = g_io_targets;
1269 	}
1270 	return 0;
1271 }
1272 
1273 static int
1274 suite_fini(void)
1275 {
1276 	g_current_io_target = g_current_io_target->next;
1277 	return 0;
1278 }
1279 
1280 #define SUITE_NAME_MAX 64
1281 
1282 static int
1283 __setup_ut_on_single_target(struct io_target *target)
1284 {
1285 	unsigned rc = 0;
1286 	CU_pSuite suite = NULL;
1287 	char name[SUITE_NAME_MAX];
1288 
1289 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1290 	suite = CU_add_suite(name, suite_init, suite_fini);
1291 	if (suite == NULL) {
1292 		CU_cleanup_registry();
1293 		rc = CU_get_error();
1294 		return -rc;
1295 	}
1296 
1297 	if (
1298 		CU_add_test(suite, "blockdev write read block",
1299 			    blockdev_write_read_block) == NULL
1300 		|| CU_add_test(suite, "blockdev write zeroes read block",
1301 			       blockdev_write_zeroes_read_block) == NULL
1302 		|| CU_add_test(suite, "blockdev write zeroes read no split",
1303 			       blockdev_write_zeroes_read_no_split) == NULL
1304 		|| CU_add_test(suite, "blockdev write zeroes read split",
1305 			       blockdev_write_zeroes_read_split) == NULL
1306 		|| CU_add_test(suite, "blockdev write zeroes read split partial",
1307 			       blockdev_write_zeroes_read_split_partial) == NULL
1308 		|| CU_add_test(suite, "blockdev reset",
1309 			       blockdev_test_reset) == NULL
1310 		|| CU_add_test(suite, "blockdev write read 8 blocks",
1311 			       blockdev_write_read_8blocks) == NULL
1312 		|| CU_add_test(suite, "blockdev write read size > 128k",
1313 			       blockdev_write_read_size_gt_128k) == NULL
1314 		|| CU_add_test(suite, "blockdev write read invalid size",
1315 			       blockdev_write_read_invalid_size) == NULL
1316 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1317 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1318 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1319 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1320 		|| CU_add_test(suite, "blockdev write read max offset",
1321 			       blockdev_write_read_max_offset) == NULL
1322 		|| CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset",
1323 			       blockdev_overlapped_write_read_2blocks) == NULL
1324 		|| CU_add_test(suite, "blockdev writev readv 8 blocks",
1325 			       blockdev_writev_readv_8blocks) == NULL
1326 		|| CU_add_test(suite, "blockdev writev readv 30 x 1block",
1327 			       blockdev_writev_readv_30x1block) == NULL
1328 		|| CU_add_test(suite, "blockdev writev readv block",
1329 			       blockdev_writev_readv_block) == NULL
1330 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1331 			       blockdev_writev_readv_size_gt_128k) == NULL
1332 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1333 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1334 		|| CU_add_test(suite, "blockdev comparev and writev",
1335 			       blockdev_comparev_and_writev) == NULL
1336 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1337 			       blockdev_test_nvme_passthru_rw) == NULL
1338 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1339 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1340 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1341 			       blockdev_test_nvme_admin_passthru) == NULL
1342 		|| CU_add_test(suite, "blockdev copy",
1343 			       blockdev_test_copy) == NULL
1344 	) {
1345 		CU_cleanup_registry();
1346 		rc = CU_get_error();
1347 		return -rc;
1348 	}
1349 	return 0;
1350 }
1351 
1352 static void
1353 __run_ut_thread(void *arg)
1354 {
1355 	struct spdk_jsonrpc_request *request = arg;
1356 	int rc = 0;
1357 	struct io_target *target;
1358 
1359 	if (CU_initialize_registry() != CUE_SUCCESS) {
1360 		/* CUnit error, probably won't recover */
1361 		rc = CU_get_error();
1362 		rc = -rc;
1363 		goto ret;
1364 	}
1365 
1366 	target = g_io_targets;
1367 	while (target != NULL) {
1368 		rc = __setup_ut_on_single_target(target);
1369 		if (rc < 0) {
1370 			/* CUnit error, probably won't recover */
1371 			rc = -rc;
1372 			goto ret;
1373 		}
1374 		target = target->next;
1375 	}
1376 	CU_basic_set_mode(CU_BRM_VERBOSE);
1377 	CU_basic_run_tests();
1378 	rc = CU_get_number_of_failures();
1379 	CU_cleanup_registry();
1380 
1381 ret:
1382 	stop_init_thread(rc, request);
1383 	assert(spdk_get_thread() == g_thread_ut);
1384 	spdk_thread_exit(g_thread_ut);
1385 }
1386 
1387 static void
1388 __construct_targets(void *arg)
1389 {
1390 	if (bdevio_construct_targets() < 0) {
1391 		spdk_app_stop(-1);
1392 		return;
1393 	}
1394 
1395 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1396 }
1397 
1398 static void
1399 test_main(void *arg1)
1400 {
1401 	struct spdk_cpuset tmpmask = {};
1402 	uint32_t i;
1403 
1404 	pthread_mutex_init(&g_test_mutex, NULL);
1405 	pthread_cond_init(&g_test_cond, NULL);
1406 
1407 	/* This test runs specifically on at least three cores.
1408 	 * g_thread_init is the app_thread on main core from event framework.
1409 	 * Next two are only for the tests and should always be on separate CPU cores. */
1410 	if (spdk_env_get_core_count() < 3) {
1411 		spdk_app_stop(-1);
1412 		return;
1413 	}
1414 
1415 	SPDK_ENV_FOREACH_CORE(i) {
1416 		if (i == spdk_env_get_current_core()) {
1417 			g_thread_init = spdk_get_thread();
1418 			continue;
1419 		}
1420 		spdk_cpuset_zero(&tmpmask);
1421 		spdk_cpuset_set_cpu(&tmpmask, i, true);
1422 		if (g_thread_ut == NULL) {
1423 			g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
1424 		} else if (g_thread_io == NULL) {
1425 			g_thread_io = spdk_thread_create("io_thread", &tmpmask);
1426 		}
1427 
1428 	}
1429 
1430 	if (g_wait_for_tests) {
1431 		/* Do not perform any tests until RPC is received */
1432 		return;
1433 	}
1434 
1435 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1436 }
1437 
1438 static void
1439 bdevio_usage(void)
1440 {
1441 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1442 }
1443 
1444 static int
1445 bdevio_parse_arg(int ch, char *arg)
1446 {
1447 	switch (ch) {
1448 	case 'w':
1449 		g_wait_for_tests =  true;
1450 		break;
1451 	default:
1452 		return -EINVAL;
1453 	}
1454 	return 0;
1455 }
1456 
1457 struct rpc_perform_tests {
1458 	char *name;
1459 };
1460 
1461 static void
1462 free_rpc_perform_tests(struct rpc_perform_tests *r)
1463 {
1464 	free(r->name);
1465 }
1466 
1467 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1468 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1469 };
1470 
1471 static void
1472 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1473 {
1474 	struct spdk_json_write_ctx *w;
1475 
1476 	if (num_failures == 0) {
1477 		w = spdk_jsonrpc_begin_result(request);
1478 		spdk_json_write_uint32(w, num_failures);
1479 		spdk_jsonrpc_end_result(request, w);
1480 	} else {
1481 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1482 						     "%d test cases failed", num_failures);
1483 	}
1484 }
1485 
1486 static void
1487 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1488 {
1489 	struct rpc_perform_tests req = {NULL};
1490 	struct spdk_bdev *bdev;
1491 	int rc;
1492 
1493 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1494 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1495 					      &req)) {
1496 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1497 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1498 		goto invalid;
1499 	}
1500 
1501 	if (req.name) {
1502 		bdev = spdk_bdev_get_by_name(req.name);
1503 		if (bdev == NULL) {
1504 			SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
1505 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1506 							     "Bdev '%s' does not exist: %s",
1507 							     req.name, spdk_strerror(ENODEV));
1508 			goto invalid;
1509 		}
1510 		rc = bdevio_construct_target(bdev);
1511 		if (rc < 0) {
1512 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
1513 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1514 							     "Could not construct target for bdev '%s': %s",
1515 							     spdk_bdev_get_name(bdev), spdk_strerror(-rc));
1516 			goto invalid;
1517 		}
1518 	} else {
1519 		rc = bdevio_construct_targets();
1520 		if (rc < 0) {
1521 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1522 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1523 							     "Could not construct targets for all bdevs: %s",
1524 							     spdk_strerror(-rc));
1525 			goto invalid;
1526 		}
1527 	}
1528 	free_rpc_perform_tests(&req);
1529 
1530 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1531 
1532 	return;
1533 
1534 invalid:
1535 	free_rpc_perform_tests(&req);
1536 }
1537 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1538 
1539 static void
1540 spdk_bdevio_shutdown_cb(void)
1541 {
1542 	g_shutdown = true;
1543 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL);
1544 }
1545 
1546 int
1547 main(int argc, char **argv)
1548 {
1549 	int			rc;
1550 	struct spdk_app_opts	opts = {};
1551 
1552 	spdk_app_opts_init(&opts, sizeof(opts));
1553 	opts.name = "bdevio";
1554 	opts.reactor_mask = "0x7";
1555 	opts.shutdown_cb = spdk_bdevio_shutdown_cb;
1556 
1557 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1558 				      bdevio_parse_arg, bdevio_usage)) !=
1559 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1560 		return rc;
1561 	}
1562 
1563 	rc = spdk_app_start(&opts, test_main, NULL);
1564 	spdk_app_fini();
1565 
1566 	return rc;
1567 }
1568