xref: /spdk/test/bdev/bdevio/bdevio.c (revision f8abbede89d30584d2a4f8427b13896f8591b873)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/bdev.h"
10 #include "spdk/accel.h"
11 #include "spdk/env.h"
12 #include "spdk/log.h"
13 #include "spdk/thread.h"
14 #include "spdk/event.h"
15 #include "spdk/rpc.h"
16 #include "spdk/util.h"
17 #include "spdk/string.h"
18 
19 #include "bdev_internal.h"
20 #include "CUnit/Basic.h"
21 
22 #define BUFFER_IOVS		1024
23 #define BUFFER_SIZE		260 * 1024
24 #define BDEV_TASK_ARRAY_SIZE	2048
25 
26 pthread_mutex_t g_test_mutex;
27 pthread_cond_t g_test_cond;
28 
29 static struct spdk_thread *g_thread_init;
30 static struct spdk_thread *g_thread_ut;
31 static struct spdk_thread *g_thread_io;
32 static bool g_wait_for_tests = false;
33 static int g_num_failures = 0;
34 static bool g_shutdown = false;
35 
36 struct io_target {
37 	struct spdk_bdev	*bdev;
38 	struct spdk_bdev_desc	*bdev_desc;
39 	struct spdk_io_channel	*ch;
40 	struct io_target	*next;
41 };
42 
43 struct bdevio_request {
44 	char *buf;
45 	char *fused_buf;
46 	int data_len;
47 	uint64_t offset;
48 	struct iovec iov[BUFFER_IOVS];
49 	int iovcnt;
50 	struct iovec fused_iov[BUFFER_IOVS];
51 	int fused_iovcnt;
52 	struct io_target *target;
53 	uint64_t src_offset;
54 };
55 
56 struct io_target *g_io_targets = NULL;
57 struct io_target *g_current_io_target = NULL;
58 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
59 
60 static void
61 execute_spdk_function(spdk_msg_fn fn, void *arg)
62 {
63 	pthread_mutex_lock(&g_test_mutex);
64 	spdk_thread_send_msg(g_thread_io, fn, arg);
65 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
66 	pthread_mutex_unlock(&g_test_mutex);
67 }
68 
69 static void
70 wake_ut_thread(void)
71 {
72 	pthread_mutex_lock(&g_test_mutex);
73 	pthread_cond_signal(&g_test_cond);
74 	pthread_mutex_unlock(&g_test_mutex);
75 }
76 
77 static void
78 __exit_io_thread(void *arg)
79 {
80 	assert(spdk_get_thread() == g_thread_io);
81 	spdk_thread_exit(g_thread_io);
82 	wake_ut_thread();
83 }
84 
85 static void
86 __get_io_channel(void *arg)
87 {
88 	struct io_target *target = arg;
89 
90 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
91 	assert(target->ch);
92 	wake_ut_thread();
93 }
94 
95 static void
96 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
97 				void *event_ctx)
98 {
99 }
100 
101 static int
102 bdevio_construct_target_by_name(const char *bdev_name)
103 {
104 	struct io_target *target;
105 	struct spdk_bdev *bdev;
106 	uint64_t num_blocks;
107 	uint32_t block_size;
108 	int rc;
109 
110 	target = malloc(sizeof(struct io_target));
111 	if (target == NULL) {
112 		return -ENOMEM;
113 	}
114 
115 	rc = spdk_bdev_open_ext(bdev_name, true, bdevio_construct_target_open_cb, NULL,
116 				&target->bdev_desc);
117 	if (rc != 0) {
118 		free(target);
119 		SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", bdev_name, rc);
120 		return rc;
121 	}
122 
123 	bdev = spdk_bdev_desc_get_bdev(target->bdev_desc);
124 
125 	num_blocks = spdk_bdev_get_num_blocks(bdev);
126 	block_size = spdk_bdev_get_block_size(bdev);
127 
128 	printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
129 	       bdev_name, num_blocks, block_size,
130 	       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
131 
132 	target->bdev = bdev;
133 	target->next = g_io_targets;
134 	execute_spdk_function(__get_io_channel, target);
135 	g_io_targets = target;
136 
137 	return 0;
138 }
139 
140 static int
141 bdevio_construct_target(void *ctx, struct spdk_bdev *bdev)
142 {
143 	const char *bdev_name = spdk_bdev_get_name(bdev);
144 
145 	return bdevio_construct_target_by_name(bdev_name);
146 }
147 
148 static int
149 bdevio_construct_targets(void)
150 {
151 	int rc;
152 
153 	printf("I/O targets:\n");
154 
155 	rc = spdk_for_each_bdev_leaf(NULL, bdevio_construct_target);
156 	if (rc < 0) {
157 		SPDK_ERRLOG("Could not complete constructing bdevs, error=%d\n", rc);
158 		return rc;
159 	}
160 
161 	if (g_io_targets == NULL) {
162 		SPDK_ERRLOG("No bdevs to perform tests on\n");
163 		return -1;
164 	}
165 
166 	return 0;
167 }
168 
169 static void
170 __put_io_channel(void *arg)
171 {
172 	struct io_target *target = arg;
173 
174 	spdk_put_io_channel(target->ch);
175 	wake_ut_thread();
176 }
177 
178 static void
179 bdevio_cleanup_targets(void)
180 {
181 	struct io_target *target;
182 
183 	target = g_io_targets;
184 	while (target != NULL) {
185 		execute_spdk_function(__put_io_channel, target);
186 		spdk_bdev_close(target->bdev_desc);
187 		g_io_targets = target->next;
188 		free(target);
189 		target = g_io_targets;
190 	}
191 }
192 
193 static bool g_completion_success;
194 
195 static void
196 initialize_buffer(char **buf, int pattern, int size, uint32_t block_size)
197 {
198 	CU_ASSERT(block_size != 0);
199 
200 	*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
201 	memset(*buf, pattern, size);
202 
203 	if (pattern) {
204 		for (int offset = 0, block = 0; offset < size; offset += block_size, block++) {
205 			*(*buf + offset) = block;
206 		}
207 	}
208 }
209 
210 static void
211 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
212 {
213 	g_completion_success = success;
214 	spdk_bdev_free_io(bdev_io);
215 	wake_ut_thread();
216 }
217 
218 static uint64_t
219 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes)
220 {
221 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
222 
223 	CU_ASSERT(bytes % block_size == 0);
224 	return bytes / block_size;
225 }
226 
227 static void
228 __blockdev_write(void *arg)
229 {
230 	struct bdevio_request *req = arg;
231 	struct io_target *target = req->target;
232 	int rc;
233 
234 	if (req->iovcnt) {
235 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
236 				      req->data_len, quick_test_complete, NULL);
237 	} else {
238 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
239 				     req->data_len, quick_test_complete, NULL);
240 	}
241 
242 	if (rc) {
243 		g_completion_success = false;
244 		wake_ut_thread();
245 	}
246 }
247 
248 static void
249 __blockdev_write_zeroes(void *arg)
250 {
251 	struct bdevio_request *req = arg;
252 	struct io_target *target = req->target;
253 	int rc;
254 
255 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
256 				    req->data_len, quick_test_complete, NULL);
257 	if (rc) {
258 		g_completion_success = false;
259 		wake_ut_thread();
260 	}
261 }
262 
263 static void
264 __blockdev_compare_and_write(void *arg)
265 {
266 	struct bdevio_request *req = arg;
267 	struct io_target *target = req->target;
268 	struct spdk_bdev *bdev = target->bdev;
269 	int rc;
270 
271 	rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
272 			req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset),
273 			bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL);
274 
275 	if (rc) {
276 		g_completion_success = false;
277 		wake_ut_thread();
278 	}
279 }
280 
281 static void
282 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
283 {
284 	int data_len = req->data_len;
285 	char *buf = req->buf;
286 
287 	req->iovcnt = 0;
288 	if (!iov_len) {
289 		return;
290 	}
291 
292 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
293 		if (data_len < iov_len) {
294 			iov_len = data_len;
295 		}
296 
297 		req->iov[req->iovcnt].iov_base = buf;
298 		req->iov[req->iovcnt].iov_len = iov_len;
299 
300 		buf += iov_len;
301 		data_len -= iov_len;
302 	}
303 
304 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
305 }
306 
307 static void
308 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
309 {
310 	int data_len = req->data_len;
311 	char *buf = req->fused_buf;
312 
313 	req->fused_iovcnt = 0;
314 	if (!iov_len) {
315 		return;
316 	}
317 
318 	for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
319 		if (data_len < iov_len) {
320 			iov_len = data_len;
321 		}
322 
323 		req->fused_iov[req->fused_iovcnt].iov_base = buf;
324 		req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
325 
326 		buf += iov_len;
327 		data_len -= iov_len;
328 	}
329 
330 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
331 }
332 
333 static void
334 blockdev_write(struct io_target *target, char *tx_buf,
335 	       uint64_t offset, int data_len, int iov_len)
336 {
337 	struct bdevio_request req;
338 
339 	req.target = target;
340 	req.buf = tx_buf;
341 	req.data_len = data_len;
342 	req.offset = offset;
343 	sgl_chop_buffer(&req, iov_len);
344 
345 	g_completion_success = false;
346 
347 	execute_spdk_function(__blockdev_write, &req);
348 }
349 
350 static void
351 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
352 			    uint64_t offset, int data_len, int iov_len)
353 {
354 	struct bdevio_request req;
355 
356 	req.target = target;
357 	req.buf = cmp_buf;
358 	req.fused_buf = write_buf;
359 	req.data_len = data_len;
360 	req.offset = offset;
361 	sgl_chop_buffer(&req, iov_len);
362 	sgl_chop_fused_buffer(&req, iov_len);
363 
364 	g_completion_success = false;
365 
366 	execute_spdk_function(__blockdev_compare_and_write, &req);
367 }
368 
369 static void
370 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
371 		      uint64_t offset, int data_len)
372 {
373 	struct bdevio_request req;
374 
375 	req.target = target;
376 	req.buf = tx_buf;
377 	req.data_len = data_len;
378 	req.offset = offset;
379 
380 	g_completion_success = false;
381 
382 	execute_spdk_function(__blockdev_write_zeroes, &req);
383 }
384 
385 static void
386 __blockdev_read(void *arg)
387 {
388 	struct bdevio_request *req = arg;
389 	struct io_target *target = req->target;
390 	int rc;
391 
392 	if (req->iovcnt) {
393 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
394 				     req->data_len, quick_test_complete, NULL);
395 	} else {
396 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
397 				    req->data_len, quick_test_complete, NULL);
398 	}
399 
400 	if (rc) {
401 		g_completion_success = false;
402 		wake_ut_thread();
403 	}
404 }
405 
406 static void
407 blockdev_read(struct io_target *target, char *rx_buf,
408 	      uint64_t offset, int data_len, int iov_len)
409 {
410 	struct bdevio_request req;
411 
412 	req.target = target;
413 	req.buf = rx_buf;
414 	req.data_len = data_len;
415 	req.offset = offset;
416 	req.iovcnt = 0;
417 	sgl_chop_buffer(&req, iov_len);
418 
419 	g_completion_success = false;
420 
421 	execute_spdk_function(__blockdev_read, &req);
422 }
423 
424 static void
425 _blockdev_copy(void *arg)
426 {
427 	struct bdevio_request *req = arg;
428 	struct io_target *target = req->target;
429 	struct spdk_bdev *bdev = target->bdev;
430 	int rc;
431 
432 	rc = spdk_bdev_copy_blocks(target->bdev_desc, target->ch,
433 				   bdev_bytes_to_blocks(bdev, req->offset),
434 				   bdev_bytes_to_blocks(bdev, req->src_offset),
435 				   bdev_bytes_to_blocks(bdev, req->data_len),
436 				   quick_test_complete, NULL);
437 
438 	if (rc) {
439 		g_completion_success = false;
440 		wake_ut_thread();
441 	}
442 }
443 
444 static void
445 blockdev_copy(struct io_target *target, uint64_t dst_offset, uint64_t src_offset, int data_len)
446 {
447 	struct bdevio_request req;
448 
449 	req.target = target;
450 	req.data_len = data_len;
451 	req.offset = dst_offset;
452 	req.src_offset = src_offset;
453 
454 	g_completion_success = false;
455 
456 	execute_spdk_function(_blockdev_copy, &req);
457 }
458 
459 static int
460 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
461 {
462 	return memcmp(rx_buf, tx_buf, data_length);
463 }
464 
465 static void
466 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
467 		    int expected_rc, bool write_zeroes, uint32_t block_size)
468 {
469 	struct io_target *target;
470 	char	*tx_buf = NULL;
471 	char	*rx_buf = NULL;
472 	int	rc;
473 	uint64_t write_offset = offset;
474 	uint32_t write_data_len = data_length;
475 
476 	target = g_current_io_target;
477 
478 	if (spdk_bdev_get_write_unit_size(target->bdev) > 1 && expected_rc == 0) {
479 		uint32_t write_unit_bytes;
480 
481 		write_unit_bytes = spdk_bdev_get_write_unit_size(target->bdev) *
482 				   spdk_bdev_get_block_size(target->bdev);
483 		write_offset -= offset % write_unit_bytes;
484 		write_data_len += (offset - write_offset);
485 
486 		if (write_data_len % write_unit_bytes) {
487 			write_data_len += write_unit_bytes - write_data_len % write_unit_bytes;
488 		}
489 	}
490 
491 	if (!write_zeroes) {
492 		initialize_buffer(&tx_buf, pattern, write_data_len, block_size);
493 		initialize_buffer(&rx_buf, 0, data_length, block_size);
494 
495 		blockdev_write(target, tx_buf, write_offset, write_data_len, iov_len);
496 	} else {
497 		initialize_buffer(&tx_buf, 0, write_data_len, block_size);
498 		initialize_buffer(&rx_buf, pattern, data_length, block_size);
499 
500 		blockdev_write_zeroes(target, tx_buf, write_offset, write_data_len);
501 	}
502 
503 
504 	if (expected_rc == 0) {
505 		CU_ASSERT_EQUAL(g_completion_success, true);
506 	} else {
507 		CU_ASSERT_EQUAL(g_completion_success, false);
508 	}
509 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
510 
511 	if (expected_rc == 0) {
512 		CU_ASSERT_EQUAL(g_completion_success, true);
513 	} else {
514 		CU_ASSERT_EQUAL(g_completion_success, false);
515 	}
516 
517 	if (g_completion_success) {
518 		rc = blockdev_write_read_data_match(rx_buf, tx_buf + (offset - write_offset), data_length);
519 		/* Assert the write by comparing it with values read
520 		 * from each blockdev */
521 		CU_ASSERT_EQUAL(rc, 0);
522 	}
523 
524 	spdk_free(rx_buf);
525 	spdk_free(tx_buf);
526 }
527 
528 static void
529 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
530 {
531 	struct io_target *target = g_current_io_target;
532 	struct spdk_bdev *bdev = target->bdev;
533 	char	*tx_buf = NULL;
534 	char	*write_buf = NULL;
535 	char	*rx_buf = NULL;
536 	int	rc;
537 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
538 
539 	initialize_buffer(&tx_buf, 0xAA, data_length, block_size);
540 	initialize_buffer(&rx_buf, 0, data_length, block_size);
541 	initialize_buffer(&write_buf, 0xBB, data_length, block_size);
542 
543 	blockdev_write(target, tx_buf, offset, data_length, iov_len);
544 	CU_ASSERT_EQUAL(g_completion_success, true);
545 
546 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
547 	CU_ASSERT_EQUAL(g_completion_success, true);
548 
549 	_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
550 	CU_ASSERT_EQUAL(g_completion_success, false);
551 
552 	blockdev_read(target, rx_buf, offset, data_length, iov_len);
553 	CU_ASSERT_EQUAL(g_completion_success, true);
554 	rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
555 	/* Assert the write by comparing it with values read
556 	 * from each blockdev */
557 	CU_ASSERT_EQUAL(rc, 0);
558 
559 	spdk_free(rx_buf);
560 	spdk_free(tx_buf);
561 	spdk_free(write_buf);
562 }
563 
564 static void
565 blockdev_write_read_block(void)
566 {
567 	uint32_t data_length;
568 	uint64_t offset;
569 	int pattern;
570 	int expected_rc;
571 	struct io_target *target = g_current_io_target;
572 	struct spdk_bdev *bdev = target->bdev;
573 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
574 
575 	/* Data size = 1 block */
576 	data_length = block_size;
577 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
578 	offset = 0;
579 	pattern = 0xA3;
580 	/* Params are valid, hence the expected return value
581 	 * of write and read for all blockdevs is 0. */
582 	expected_rc = 0;
583 
584 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
585 }
586 
587 static void
588 blockdev_write_zeroes_read_block(void)
589 {
590 	uint32_t data_length;
591 	uint64_t offset;
592 	int pattern;
593 	int expected_rc;
594 	struct io_target *target = g_current_io_target;
595 	struct spdk_bdev *bdev = target->bdev;
596 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
597 
598 	/* Data size = 1 block */
599 	data_length = block_size;
600 	offset = 0;
601 	pattern = 0xA3;
602 	/* Params are valid, hence the expected return value
603 	 * of write_zeroes and read for all blockdevs is 0. */
604 	expected_rc = 0;
605 
606 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
607 }
608 
609 /*
610  * This i/o will not have to split at the bdev layer.
611  */
612 static void
613 blockdev_write_zeroes_read_no_split(void)
614 {
615 	uint32_t data_length;
616 	uint64_t offset;
617 	int pattern;
618 	int expected_rc;
619 	struct io_target *target = g_current_io_target;
620 	struct spdk_bdev *bdev = target->bdev;
621 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
622 
623 	/* Data size = block size aligned ZERO_BUFFER_SIZE */
624 	data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */
625 	data_length -= ZERO_BUFFER_SIZE % block_size;
626 	offset = 0;
627 	pattern = 0xA3;
628 	/* Params are valid, hence the expected return value
629 	 * of write_zeroes and read for all blockdevs is 0. */
630 	expected_rc = 0;
631 
632 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
633 }
634 
635 /*
636  * This i/o will have to split at the bdev layer if
637  * write-zeroes is not supported by the bdev.
638  */
639 static void
640 blockdev_write_zeroes_read_split(void)
641 {
642 	uint32_t data_length;
643 	uint64_t offset;
644 	int pattern;
645 	int expected_rc;
646 	struct io_target *target = g_current_io_target;
647 	struct spdk_bdev *bdev = target->bdev;
648 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
649 
650 	/* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */
651 	data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */
652 	data_length -= data_length % block_size;
653 	offset = 0;
654 	pattern = 0xA3;
655 	/* Params are valid, hence the expected return value
656 	 * of write_zeroes and read for all blockdevs is 0. */
657 	expected_rc = 0;
658 
659 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
660 }
661 
662 /*
663  * This i/o will have to split at the bdev layer if
664  * write-zeroes is not supported by the bdev. It also
665  * tests a write size that is not an even multiple of
666  * the bdev layer zero buffer size.
667  */
668 static void
669 blockdev_write_zeroes_read_split_partial(void)
670 {
671 	uint32_t data_length;
672 	uint64_t offset;
673 	int pattern;
674 	int expected_rc;
675 	struct io_target *target = g_current_io_target;
676 	struct spdk_bdev *bdev = target->bdev;
677 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
678 
679 	/* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */
680 	data_length = ZERO_BUFFER_SIZE * 7 / 2;
681 	data_length -= data_length % block_size;
682 	offset = 0;
683 	pattern = 0xA3;
684 	/* Params are valid, hence the expected return value
685 	 * of write_zeroes and read for all blockdevs is 0. */
686 	expected_rc = 0;
687 
688 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
689 }
690 
691 static void
692 blockdev_writev_readv_block(void)
693 {
694 	uint32_t data_length, iov_len;
695 	uint64_t offset;
696 	int pattern;
697 	int expected_rc;
698 	struct io_target *target = g_current_io_target;
699 	struct spdk_bdev *bdev = target->bdev;
700 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
701 
702 	/* Data size = 1 block */
703 	data_length = block_size;
704 	iov_len = data_length;
705 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
706 	offset = 0;
707 	pattern = 0xA3;
708 	/* Params are valid, hence the expected return value
709 	 * of write and read for all blockdevs is 0. */
710 	expected_rc = 0;
711 
712 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
713 }
714 
715 static void
716 blockdev_comparev_and_writev(void)
717 {
718 	uint32_t data_length, iov_len;
719 	uint64_t offset;
720 	struct io_target *target = g_current_io_target;
721 	struct spdk_bdev *bdev = target->bdev;
722 
723 	if (spdk_bdev_is_md_separate(bdev)) {
724 		/* TODO: remove this check once bdev layer properly supports
725 		 * compare and write for bdevs with separate md.
726 		 */
727 		SPDK_ERRLOG("skipping comparev_and_writev on bdev %s since it has\n"
728 			    "separate metadata which is not supported yet.\n",
729 			    spdk_bdev_get_name(bdev));
730 		return;
731 	}
732 
733 	/* Data size = acwu size */
734 	data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev);
735 	iov_len = data_length;
736 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
737 	offset = 0;
738 
739 	blockdev_compare_and_write(data_length, iov_len, offset);
740 }
741 
742 static void
743 blockdev_writev_readv_30x1block(void)
744 {
745 	uint32_t data_length, iov_len;
746 	uint64_t offset;
747 	int pattern;
748 	int expected_rc;
749 	struct io_target *target = g_current_io_target;
750 	struct spdk_bdev *bdev = target->bdev;
751 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
752 
753 	/* Data size = 30 * block size */
754 	data_length = block_size * 30;
755 	iov_len = block_size;
756 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
757 	offset = 0;
758 	pattern = 0xA3;
759 	/* Params are valid, hence the expected return value
760 	 * of write and read for all blockdevs is 0. */
761 	expected_rc = 0;
762 
763 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
764 }
765 
766 static void
767 blockdev_write_read_8blocks(void)
768 {
769 	uint32_t data_length;
770 	uint64_t offset;
771 	int pattern;
772 	int expected_rc;
773 	struct io_target *target = g_current_io_target;
774 	struct spdk_bdev *bdev = target->bdev;
775 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
776 
777 	/* Data size = 8 * block size */
778 	data_length = block_size * 8;
779 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
780 	offset = data_length;
781 	pattern = 0xA3;
782 	/* Params are valid, hence the expected return value
783 	 * of write and read for all blockdevs is 0. */
784 	expected_rc = 0;
785 
786 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
787 }
788 
789 static void
790 blockdev_writev_readv_8blocks(void)
791 {
792 	uint32_t data_length, iov_len;
793 	uint64_t offset;
794 	int pattern;
795 	int expected_rc;
796 	struct io_target *target = g_current_io_target;
797 	struct spdk_bdev *bdev = target->bdev;
798 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
799 
800 
801 	/* Data size = 8 * block size */
802 	data_length = block_size * 8;
803 	iov_len = data_length;
804 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
805 	offset = data_length;
806 	pattern = 0xA3;
807 	/* Params are valid, hence the expected return value
808 	 * of write and read for all blockdevs is 0. */
809 	expected_rc = 0;
810 
811 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
812 }
813 
814 static void
815 blockdev_write_read_size_gt_128k(void)
816 {
817 	uint32_t data_length;
818 	uint64_t offset;
819 	int pattern;
820 	int expected_rc;
821 	struct io_target *target = g_current_io_target;
822 	struct spdk_bdev *bdev = target->bdev;
823 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
824 
825 	/* Data size = block size aligned 128K + 1 block */
826 	data_length = 128 * 1024;
827 	data_length -= data_length % block_size;
828 	data_length += block_size;
829 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
830 	offset = block_size * 2;
831 	pattern = 0xA3;
832 	/* Params are valid, hence the expected return value
833 	 * of write and read for all blockdevs is 0. */
834 	expected_rc = 0;
835 
836 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
837 }
838 
839 static void
840 blockdev_writev_readv_size_gt_128k(void)
841 {
842 	uint32_t data_length, iov_len;
843 	uint64_t offset;
844 	int pattern;
845 	int expected_rc;
846 	struct io_target *target = g_current_io_target;
847 	struct spdk_bdev *bdev = target->bdev;
848 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
849 
850 	/* Data size = block size aligned 128K + 1 block */
851 	data_length = 128 * 1024;
852 	data_length -= data_length % block_size;
853 	data_length += block_size;
854 	iov_len = data_length;
855 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
856 	offset = block_size * 2;
857 	pattern = 0xA3;
858 	/* Params are valid, hence the expected return value
859 	 * of write and read for all blockdevs is 0. */
860 	expected_rc = 0;
861 
862 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
863 }
864 
865 static void
866 blockdev_writev_readv_size_gt_128k_two_iov(void)
867 {
868 	uint32_t data_length, iov_len;
869 	uint64_t offset;
870 	int pattern;
871 	int expected_rc;
872 	struct io_target *target = g_current_io_target;
873 	struct spdk_bdev *bdev = target->bdev;
874 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
875 
876 	/* Data size = block size aligned 128K + 1 block */
877 	data_length = 128 * 1024;
878 	data_length -= data_length % block_size;
879 	iov_len = data_length;
880 	data_length += block_size;
881 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
882 	offset = block_size * 2;
883 	pattern = 0xA3;
884 	/* Params are valid, hence the expected return value
885 	 * of write and read for all blockdevs is 0. */
886 	expected_rc = 0;
887 
888 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
889 }
890 
891 static void
892 blockdev_write_read_invalid_size(void)
893 {
894 	uint32_t data_length;
895 	uint64_t offset;
896 	int pattern;
897 	int expected_rc;
898 	struct io_target *target = g_current_io_target;
899 	struct spdk_bdev *bdev = target->bdev;
900 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
901 
902 	/* Data size is not a multiple of the block size */
903 	data_length = block_size - 1;
904 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
905 	offset = block_size * 2;
906 	pattern = 0xA3;
907 	/* Params are invalid, hence the expected return value
908 	 * of write and read for all blockdevs is < 0 */
909 	expected_rc = -1;
910 
911 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
912 }
913 
914 static void
915 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
916 {
917 	uint32_t data_length;
918 	uint64_t offset;
919 	int pattern;
920 	int expected_rc;
921 	struct io_target *target = g_current_io_target;
922 	struct spdk_bdev *bdev = target->bdev;
923 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
924 
925 	data_length = block_size;
926 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
927 	/* The start offset has been set to a marginal value
928 	 * such that offset + nbytes == Total size of
929 	 * blockdev. */
930 	offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
931 	pattern = 0xA3;
932 	/* Params are valid, hence the expected return value
933 	 * of write and read for all blockdevs is 0. */
934 	expected_rc = 0;
935 
936 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
937 }
938 
939 static void
940 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
941 {
942 	uint32_t data_length;
943 	uint64_t offset;
944 	int pattern;
945 	int expected_rc;
946 	struct io_target *target = g_current_io_target;
947 	struct spdk_bdev *bdev = target->bdev;
948 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
949 
950 	/* Tests the overflow condition of the blockdevs. */
951 	data_length = block_size * 2;
952 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
953 	pattern = 0xA3;
954 
955 	/* The start offset has been set to a valid value
956 	 * but offset + nbytes is greater than the Total size
957 	 * of the blockdev. The test should fail. */
958 	offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size;
959 	/* Params are invalid, hence the expected return value
960 	 * of write and read for all blockdevs is < 0 */
961 	expected_rc = -1;
962 
963 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
964 }
965 
966 static void
967 blockdev_write_read_max_offset(void)
968 {
969 	int	data_length;
970 	uint64_t offset;
971 	int pattern;
972 	int expected_rc;
973 	struct io_target *target = g_current_io_target;
974 	struct spdk_bdev *bdev = target->bdev;
975 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
976 
977 	data_length = block_size;
978 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
979 	/* The start offset has been set to UINT64_MAX such that
980 	 * adding nbytes wraps around and points to an invalid address. */
981 	offset = UINT64_MAX;
982 	pattern = 0xA3;
983 	/* Params are invalid, hence the expected return value
984 	 * of write and read for all blockdevs is < 0 */
985 	expected_rc = -1;
986 
987 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
988 }
989 
990 static void
991 blockdev_overlapped_write_read_2blocks(void)
992 {
993 	int	data_length;
994 	uint64_t offset;
995 	int pattern;
996 	int expected_rc;
997 	struct io_target *target = g_current_io_target;
998 	struct spdk_bdev *bdev = target->bdev;
999 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
1000 
1001 	/* Data size = 2 blocks */
1002 	data_length = block_size * 2;
1003 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
1004 	offset = 0;
1005 	pattern = 0xA3;
1006 	/* Params are valid, hence the expected return value
1007 	 * of write and read for all blockdevs is 0. */
1008 	expected_rc = 0;
1009 	/* Assert the write by comparing it with values read
1010 	 * from the same offset for each blockdev */
1011 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
1012 
1013 	/* Overwrite the pattern 0xbb of size 2*block size on an address offset
1014 	 * overlapping with the address written above and assert the new value in
1015 	 * the overlapped address range */
1016 	/* Populate 2*block size with value 0xBB */
1017 	pattern = 0xBB;
1018 	/* Offset = 1 block; Overlap offset addresses and write value 0xbb */
1019 	offset = spdk_bdev_get_block_size(bdev);
1020 	/* Assert the write by comparing it with values read
1021 	 * from the overlapped offset for each blockdev */
1022 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
1023 }
1024 
1025 static void
1026 __blockdev_reset(void *arg)
1027 {
1028 	struct bdevio_request *req = arg;
1029 	struct io_target *target = req->target;
1030 	int rc;
1031 
1032 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
1033 	if (rc < 0) {
1034 		g_completion_success = false;
1035 		wake_ut_thread();
1036 	}
1037 }
1038 
1039 static void
1040 blockdev_test_reset(void)
1041 {
1042 	struct bdevio_request req;
1043 	struct io_target *target;
1044 	bool reset_supported;
1045 
1046 	target = g_current_io_target;
1047 	req.target = target;
1048 
1049 	reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET);
1050 	g_completion_success = false;
1051 
1052 	execute_spdk_function(__blockdev_reset, &req);
1053 
1054 	CU_ASSERT_EQUAL(g_completion_success, reset_supported);
1055 }
1056 
1057 struct bdevio_passthrough_request {
1058 	struct spdk_nvme_cmd cmd;
1059 	void *buf;
1060 	uint32_t len;
1061 	struct io_target *target;
1062 	int sct;
1063 	int sc;
1064 	uint32_t cdw0;
1065 };
1066 
1067 static void
1068 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1069 {
1070 	struct bdevio_passthrough_request *pt_req = arg;
1071 
1072 	spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
1073 	spdk_bdev_free_io(bdev_io);
1074 	wake_ut_thread();
1075 }
1076 
1077 static void
1078 __blockdev_nvme_passthru(void *arg)
1079 {
1080 	struct bdevio_passthrough_request *pt_req = arg;
1081 	struct io_target *target = pt_req->target;
1082 	int rc;
1083 
1084 	rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
1085 					&pt_req->cmd, pt_req->buf, pt_req->len,
1086 					nvme_pt_test_complete, pt_req);
1087 	if (rc) {
1088 		wake_ut_thread();
1089 	}
1090 }
1091 
1092 static void
1093 blockdev_test_nvme_passthru_rw(void)
1094 {
1095 	struct bdevio_passthrough_request pt_req;
1096 	void *write_buf, *read_buf;
1097 	struct io_target *target;
1098 
1099 	target = g_current_io_target;
1100 
1101 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1102 		return;
1103 	}
1104 
1105 	memset(&pt_req, 0, sizeof(pt_req));
1106 	pt_req.target = target;
1107 	pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
1108 	pt_req.cmd.nsid = 1;
1109 	*(uint64_t *)&pt_req.cmd.cdw10 = 4;
1110 	pt_req.cmd.cdw12 = 0;
1111 
1112 	pt_req.len = spdk_bdev_get_block_size(target->bdev);
1113 	write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1114 	memset(write_buf, 0xA5, pt_req.len);
1115 	pt_req.buf = write_buf;
1116 
1117 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1118 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1119 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1120 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1121 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1122 
1123 	pt_req.cmd.opc = SPDK_NVME_OPC_READ;
1124 	read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1125 	pt_req.buf = read_buf;
1126 
1127 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1128 	pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1129 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1130 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1131 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1132 
1133 	CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
1134 	spdk_free(read_buf);
1135 	spdk_free(write_buf);
1136 }
1137 
1138 static void
1139 blockdev_test_nvme_passthru_vendor_specific(void)
1140 {
1141 	struct bdevio_passthrough_request pt_req;
1142 	struct io_target *target;
1143 
1144 	target = g_current_io_target;
1145 
1146 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1147 		return;
1148 	}
1149 
1150 	memset(&pt_req, 0, sizeof(pt_req));
1151 	pt_req.target = target;
1152 	pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
1153 	pt_req.cmd.nsid = 1;
1154 
1155 	pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1156 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1157 	pt_req.cdw0 = 0xbeef;
1158 	execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1159 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1160 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
1161 	CU_ASSERT(pt_req.cdw0 == 0x0);
1162 }
1163 
1164 static void
1165 __blockdev_nvme_admin_passthru(void *arg)
1166 {
1167 	struct bdevio_passthrough_request *pt_req = arg;
1168 	struct io_target *target = pt_req->target;
1169 	int rc;
1170 
1171 	rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
1172 					   &pt_req->cmd, pt_req->buf, pt_req->len,
1173 					   nvme_pt_test_complete, pt_req);
1174 	if (rc) {
1175 		wake_ut_thread();
1176 	}
1177 }
1178 
1179 static void
1180 blockdev_test_nvme_admin_passthru(void)
1181 {
1182 	struct io_target *target;
1183 	struct bdevio_passthrough_request pt_req;
1184 
1185 	target = g_current_io_target;
1186 
1187 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
1188 		return;
1189 	}
1190 
1191 	memset(&pt_req, 0, sizeof(pt_req));
1192 	pt_req.target = target;
1193 	pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1194 	pt_req.cmd.nsid = 0;
1195 	*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
1196 
1197 	pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
1198 	pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1199 
1200 	pt_req.sct = SPDK_NVME_SCT_GENERIC;
1201 	pt_req.sc = SPDK_NVME_SC_SUCCESS;
1202 	execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
1203 	CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1204 	CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1205 }
1206 
1207 static void
1208 blockdev_test_copy(void)
1209 {
1210 	uint32_t data_length;
1211 	uint64_t src_offset, dst_offset;
1212 	struct io_target *target = g_current_io_target;
1213 	struct spdk_bdev *bdev = target->bdev;
1214 	char *tx_buf = NULL;
1215 	char *rx_buf = NULL;
1216 	int rc;
1217 	const uint32_t block_size = spdk_bdev_get_block_size(bdev);
1218 
1219 	if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_COPY)) {
1220 		return;
1221 	}
1222 
1223 	data_length = block_size;
1224 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
1225 	src_offset = 0;
1226 	dst_offset = block_size;
1227 
1228 	initialize_buffer(&tx_buf, 0xAA, data_length, block_size);
1229 	initialize_buffer(&rx_buf, 0, data_length, block_size);
1230 
1231 	blockdev_write(target, tx_buf, src_offset, data_length, data_length);
1232 	CU_ASSERT_EQUAL(g_completion_success, true);
1233 
1234 	blockdev_copy(target, dst_offset, src_offset, data_length);
1235 	CU_ASSERT_EQUAL(g_completion_success, true);
1236 
1237 	blockdev_read(target, rx_buf, dst_offset, data_length, data_length);
1238 	CU_ASSERT_EQUAL(g_completion_success, true);
1239 
1240 	rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
1241 	CU_ASSERT_EQUAL(rc, 0);
1242 }
1243 
1244 static void
1245 __stop_init_thread(void *arg)
1246 {
1247 	unsigned num_failures = g_num_failures;
1248 	struct spdk_jsonrpc_request *request = arg;
1249 
1250 	g_num_failures = 0;
1251 
1252 	bdevio_cleanup_targets();
1253 	if (g_wait_for_tests && !g_shutdown) {
1254 		/* Do not stop the app yet, wait for another RPC */
1255 		rpc_perform_tests_cb(num_failures, request);
1256 		return;
1257 	}
1258 	assert(spdk_get_thread() == g_thread_init);
1259 	assert(spdk_thread_is_app_thread(NULL));
1260 	execute_spdk_function(__exit_io_thread, NULL);
1261 	spdk_app_stop(num_failures);
1262 }
1263 
1264 static void
1265 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1266 {
1267 	g_num_failures = num_failures;
1268 
1269 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1270 }
1271 
1272 static int
1273 suite_init(void)
1274 {
1275 	if (g_current_io_target == NULL) {
1276 		g_current_io_target = g_io_targets;
1277 	}
1278 	return 0;
1279 }
1280 
1281 static int
1282 suite_fini(void)
1283 {
1284 	g_current_io_target = g_current_io_target->next;
1285 	return 0;
1286 }
1287 
1288 #define SUITE_NAME_MAX 64
1289 
1290 static int
1291 __setup_ut_on_single_target(struct io_target *target)
1292 {
1293 	unsigned rc = 0;
1294 	CU_pSuite suite = NULL;
1295 	char name[SUITE_NAME_MAX];
1296 
1297 	snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
1298 	suite = CU_add_suite(name, suite_init, suite_fini);
1299 	if (suite == NULL) {
1300 		CU_cleanup_registry();
1301 		rc = CU_get_error();
1302 		return -rc;
1303 	}
1304 
1305 	if (
1306 		CU_add_test(suite, "blockdev write read block",
1307 			    blockdev_write_read_block) == NULL
1308 		|| CU_add_test(suite, "blockdev write zeroes read block",
1309 			       blockdev_write_zeroes_read_block) == NULL
1310 		|| CU_add_test(suite, "blockdev write zeroes read no split",
1311 			       blockdev_write_zeroes_read_no_split) == NULL
1312 		|| CU_add_test(suite, "blockdev write zeroes read split",
1313 			       blockdev_write_zeroes_read_split) == NULL
1314 		|| CU_add_test(suite, "blockdev write zeroes read split partial",
1315 			       blockdev_write_zeroes_read_split_partial) == NULL
1316 		|| CU_add_test(suite, "blockdev reset",
1317 			       blockdev_test_reset) == NULL
1318 		|| CU_add_test(suite, "blockdev write read 8 blocks",
1319 			       blockdev_write_read_8blocks) == NULL
1320 		|| CU_add_test(suite, "blockdev write read size > 128k",
1321 			       blockdev_write_read_size_gt_128k) == NULL
1322 		|| CU_add_test(suite, "blockdev write read invalid size",
1323 			       blockdev_write_read_invalid_size) == NULL
1324 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1325 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1326 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1327 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1328 		|| CU_add_test(suite, "blockdev write read max offset",
1329 			       blockdev_write_read_max_offset) == NULL
1330 		|| CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset",
1331 			       blockdev_overlapped_write_read_2blocks) == NULL
1332 		|| CU_add_test(suite, "blockdev writev readv 8 blocks",
1333 			       blockdev_writev_readv_8blocks) == NULL
1334 		|| CU_add_test(suite, "blockdev writev readv 30 x 1block",
1335 			       blockdev_writev_readv_30x1block) == NULL
1336 		|| CU_add_test(suite, "blockdev writev readv block",
1337 			       blockdev_writev_readv_block) == NULL
1338 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
1339 			       blockdev_writev_readv_size_gt_128k) == NULL
1340 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1341 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1342 		|| CU_add_test(suite, "blockdev comparev and writev",
1343 			       blockdev_comparev_and_writev) == NULL
1344 		|| CU_add_test(suite, "blockdev nvme passthru rw",
1345 			       blockdev_test_nvme_passthru_rw) == NULL
1346 		|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
1347 			       blockdev_test_nvme_passthru_vendor_specific) == NULL
1348 		|| CU_add_test(suite, "blockdev nvme admin passthru",
1349 			       blockdev_test_nvme_admin_passthru) == NULL
1350 		|| CU_add_test(suite, "blockdev copy",
1351 			       blockdev_test_copy) == NULL
1352 	) {
1353 		CU_cleanup_registry();
1354 		rc = CU_get_error();
1355 		return -rc;
1356 	}
1357 	return 0;
1358 }
1359 
1360 static void
1361 __run_ut_thread(void *arg)
1362 {
1363 	struct spdk_jsonrpc_request *request = arg;
1364 	int rc = 0;
1365 	struct io_target *target;
1366 
1367 	if (CU_initialize_registry() != CUE_SUCCESS) {
1368 		/* CUnit error, probably won't recover */
1369 		rc = CU_get_error();
1370 		rc = -rc;
1371 		goto ret;
1372 	}
1373 
1374 	target = g_io_targets;
1375 	while (target != NULL) {
1376 		rc = __setup_ut_on_single_target(target);
1377 		if (rc < 0) {
1378 			/* CUnit error, probably won't recover */
1379 			rc = -rc;
1380 			goto ret;
1381 		}
1382 		target = target->next;
1383 	}
1384 	CU_basic_set_mode(CU_BRM_VERBOSE);
1385 	CU_basic_run_tests();
1386 	rc = CU_get_number_of_failures();
1387 	CU_cleanup_registry();
1388 
1389 ret:
1390 	stop_init_thread(rc, request);
1391 	assert(spdk_get_thread() == g_thread_ut);
1392 	spdk_thread_exit(g_thread_ut);
1393 }
1394 
1395 static void
1396 __construct_targets(void *arg)
1397 {
1398 	if (bdevio_construct_targets() < 0) {
1399 		spdk_app_stop(-1);
1400 		return;
1401 	}
1402 
1403 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1404 }
1405 
1406 static void
1407 test_main(void *arg1)
1408 {
1409 	struct spdk_cpuset tmpmask = {};
1410 	uint32_t i;
1411 
1412 	pthread_mutex_init(&g_test_mutex, NULL);
1413 	pthread_cond_init(&g_test_cond, NULL);
1414 
1415 	/* This test runs specifically on at least three cores.
1416 	 * g_thread_init is the app_thread on main core from event framework.
1417 	 * Next two are only for the tests and should always be on separate CPU cores. */
1418 	if (spdk_env_get_core_count() < 3) {
1419 		spdk_app_stop(-1);
1420 		return;
1421 	}
1422 
1423 	SPDK_ENV_FOREACH_CORE(i) {
1424 		if (i == spdk_env_get_current_core()) {
1425 			g_thread_init = spdk_get_thread();
1426 			continue;
1427 		}
1428 		spdk_cpuset_zero(&tmpmask);
1429 		spdk_cpuset_set_cpu(&tmpmask, i, true);
1430 		if (g_thread_ut == NULL) {
1431 			g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
1432 		} else if (g_thread_io == NULL) {
1433 			g_thread_io = spdk_thread_create("io_thread", &tmpmask);
1434 		}
1435 
1436 	}
1437 
1438 	if (g_wait_for_tests) {
1439 		/* Do not perform any tests until RPC is received */
1440 		return;
1441 	}
1442 
1443 	spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1444 }
1445 
1446 static void
1447 bdevio_usage(void)
1448 {
1449 	printf(" -w                        start bdevio app and wait for RPC to start the tests\n");
1450 }
1451 
1452 static int
1453 bdevio_parse_arg(int ch, char *arg)
1454 {
1455 	switch (ch) {
1456 	case 'w':
1457 		g_wait_for_tests =  true;
1458 		break;
1459 	default:
1460 		return -EINVAL;
1461 	}
1462 	return 0;
1463 }
1464 
1465 struct rpc_perform_tests {
1466 	char *name;
1467 };
1468 
1469 static void
1470 free_rpc_perform_tests(struct rpc_perform_tests *r)
1471 {
1472 	free(r->name);
1473 }
1474 
1475 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
1476 	{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
1477 };
1478 
1479 static void
1480 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
1481 {
1482 	struct spdk_json_write_ctx *w;
1483 
1484 	if (num_failures == 0) {
1485 		w = spdk_jsonrpc_begin_result(request);
1486 		spdk_json_write_uint32(w, num_failures);
1487 		spdk_jsonrpc_end_result(request, w);
1488 	} else {
1489 		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1490 						     "%d test cases failed", num_failures);
1491 	}
1492 }
1493 
1494 static void
1495 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
1496 {
1497 	struct rpc_perform_tests req = {NULL};
1498 	int rc;
1499 
1500 	if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
1501 					      SPDK_COUNTOF(rpc_perform_tests_decoders),
1502 					      &req)) {
1503 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1504 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
1505 		goto invalid;
1506 	}
1507 
1508 	if (req.name) {
1509 		rc = bdevio_construct_target_by_name(req.name);
1510 		if (rc < 0) {
1511 			SPDK_ERRLOG("Could not construct target for bdev '%s'\n", req.name);
1512 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1513 							     "Could not construct target for bdev '%s': %s",
1514 							     req.name, spdk_strerror(-rc));
1515 			goto invalid;
1516 		}
1517 	} else {
1518 		rc = bdevio_construct_targets();
1519 		if (rc < 0) {
1520 			SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1521 			spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1522 							     "Could not construct targets for all bdevs: %s",
1523 							     spdk_strerror(-rc));
1524 			goto invalid;
1525 		}
1526 	}
1527 	free_rpc_perform_tests(&req);
1528 
1529 	spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
1530 
1531 	return;
1532 
1533 invalid:
1534 	free_rpc_perform_tests(&req);
1535 }
1536 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
1537 
1538 static void
1539 spdk_bdevio_shutdown_cb(void)
1540 {
1541 	g_shutdown = true;
1542 	spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL);
1543 }
1544 
1545 int
1546 main(int argc, char **argv)
1547 {
1548 	int			rc;
1549 	struct spdk_app_opts	opts = {};
1550 
1551 	spdk_app_opts_init(&opts, sizeof(opts));
1552 	opts.name = "bdevio";
1553 	opts.reactor_mask = "0x7";
1554 	opts.shutdown_cb = spdk_bdevio_shutdown_cb;
1555 
1556 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1557 				      bdevio_parse_arg, bdevio_usage)) !=
1558 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
1559 		return rc;
1560 	}
1561 
1562 	rc = spdk_app_start(&opts, test_main, NULL);
1563 	spdk_app_fini();
1564 
1565 	return rc;
1566 }
1567