xref: /spdk/test/bdev/bdevio/bdevio.c (revision 1fc4165fe9bf8512483356ad8e6d27f793f2e3db)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/copy_engine.h"
38 #include "spdk/env.h"
39 #include "spdk/log.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
42 
43 #include "CUnit/Basic.h"
44 
45 #define BUFFER_IOVS		1024
46 #define BUFFER_SIZE		260 * 1024
47 #define BDEV_TASK_ARRAY_SIZE	2048
48 
49 pthread_mutex_t g_test_mutex;
50 pthread_cond_t g_test_cond;
51 
52 static uint32_t g_lcore_id_init;
53 static uint32_t g_lcore_id_ut;
54 static uint32_t g_lcore_id_io;
55 
56 struct io_target {
57 	struct spdk_bdev	*bdev;
58 	struct spdk_bdev_desc	*bdev_desc;
59 	struct spdk_io_channel	*ch;
60 	struct io_target	*next;
61 };
62 
63 struct bdevio_request {
64 	char *buf;
65 	int data_len;
66 	uint64_t offset;
67 	struct iovec iov[BUFFER_IOVS];
68 	int iovcnt;
69 	struct io_target *target;
70 };
71 
72 struct io_target *g_io_targets = NULL;
73 
74 static void
75 execute_spdk_function(spdk_event_fn fn, void *arg1, void *arg2)
76 {
77 	struct spdk_event *event;
78 
79 	event = spdk_event_allocate(g_lcore_id_io, fn, arg1, arg2);
80 	pthread_mutex_lock(&g_test_mutex);
81 	spdk_event_call(event);
82 	pthread_cond_wait(&g_test_cond, &g_test_mutex);
83 	pthread_mutex_unlock(&g_test_mutex);
84 }
85 
86 static void
87 wake_ut_thread(void)
88 {
89 	pthread_mutex_lock(&g_test_mutex);
90 	pthread_cond_signal(&g_test_cond);
91 	pthread_mutex_unlock(&g_test_mutex);
92 }
93 
94 static void
95 __get_io_channel(void *arg1, void *arg2)
96 {
97 	struct io_target *target = arg1;
98 
99 	target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
100 	assert(target->ch);
101 	wake_ut_thread();
102 }
103 
104 static int
105 bdevio_construct_targets(void)
106 {
107 	struct spdk_bdev *bdev;
108 	struct io_target *target;
109 	int rc;
110 
111 	printf("I/O targets:\n");
112 
113 	bdev = spdk_bdev_first_leaf();
114 	while (bdev != NULL) {
115 		uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
116 		uint32_t block_size = spdk_bdev_get_block_size(bdev);
117 
118 		target = malloc(sizeof(struct io_target));
119 		if (target == NULL) {
120 			return -ENOMEM;
121 		}
122 
123 		rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
124 		if (rc != 0) {
125 			free(target);
126 			SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
127 			bdev = spdk_bdev_next_leaf(bdev);
128 			continue;
129 		}
130 
131 		printf("  %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
132 		       spdk_bdev_get_name(bdev),
133 		       num_blocks, block_size,
134 		       (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
135 
136 		target->bdev = bdev;
137 		target->next = g_io_targets;
138 		execute_spdk_function(__get_io_channel, target, NULL);
139 		g_io_targets = target;
140 
141 		bdev = spdk_bdev_next_leaf(bdev);
142 	}
143 
144 	return 0;
145 }
146 
147 static void
148 __put_io_channel(void *arg1, void *arg2)
149 {
150 	struct io_target *target = arg1;
151 
152 	spdk_put_io_channel(target->ch);
153 	wake_ut_thread();
154 }
155 
156 static void
157 bdevio_cleanup_targets(void)
158 {
159 	struct io_target *target;
160 
161 	target = g_io_targets;
162 	while (target != NULL) {
163 		execute_spdk_function(__put_io_channel, target, NULL);
164 		spdk_bdev_close(target->bdev_desc);
165 		g_io_targets = target->next;
166 		free(target);
167 		target = g_io_targets;
168 	}
169 }
170 
171 static bool g_completion_success;
172 
173 static void
174 initialize_buffer(char **buf, int pattern, int size)
175 {
176 	*buf = spdk_dma_zmalloc(size, 0x1000, NULL);
177 	memset(*buf, pattern, size);
178 }
179 
180 static void
181 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
182 {
183 	g_completion_success = success;
184 	spdk_bdev_free_io(bdev_io);
185 	wake_ut_thread();
186 }
187 
188 static void
189 __blockdev_write(void *arg1, void *arg2)
190 {
191 	struct bdevio_request *req = arg1;
192 	struct io_target *target = req->target;
193 	int rc;
194 
195 	if (req->iovcnt) {
196 		rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
197 				      req->data_len, quick_test_complete, NULL);
198 	} else {
199 		rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
200 				     req->data_len, quick_test_complete, NULL);
201 	}
202 
203 	if (rc) {
204 		g_completion_success = false;
205 		wake_ut_thread();
206 	}
207 }
208 
209 static void
210 __blockdev_write_zeroes(void *arg1, void *arg2)
211 {
212 	struct bdevio_request *req = arg1;
213 	struct io_target *target = req->target;
214 	int rc;
215 
216 	rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
217 				    req->data_len, quick_test_complete, NULL);
218 	if (rc) {
219 		g_completion_success = false;
220 		wake_ut_thread();
221 	}
222 }
223 
224 static void
225 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
226 {
227 	int data_len = req->data_len;
228 	char *buf = req->buf;
229 
230 	req->iovcnt = 0;
231 	if (!iov_len) {
232 		return;
233 	}
234 
235 	for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
236 		if (data_len < iov_len) {
237 			iov_len = data_len;
238 		}
239 
240 		req->iov[req->iovcnt].iov_base = buf;
241 		req->iov[req->iovcnt].iov_len = iov_len;
242 
243 		buf += iov_len;
244 		data_len -= iov_len;
245 	}
246 
247 	CU_ASSERT_EQUAL_FATAL(data_len, 0);
248 }
249 
250 static void
251 blockdev_write(struct io_target *target, char *tx_buf,
252 	       uint64_t offset, int data_len, int iov_len)
253 {
254 	struct bdevio_request req;
255 
256 	req.target = target;
257 	req.buf = tx_buf;
258 	req.data_len = data_len;
259 	req.offset = offset;
260 	sgl_chop_buffer(&req, iov_len);
261 
262 	g_completion_success = false;
263 
264 	execute_spdk_function(__blockdev_write, &req, NULL);
265 }
266 
267 static void
268 blockdev_write_zeroes(struct io_target *target, char *tx_buf,
269 		      uint64_t offset, int data_len)
270 {
271 	struct bdevio_request req;
272 
273 	req.target = target;
274 	req.buf = tx_buf;
275 	req.data_len = data_len;
276 	req.offset = offset;
277 
278 	g_completion_success = false;
279 
280 	execute_spdk_function(__blockdev_write_zeroes, &req, NULL);
281 }
282 
283 static void
284 __blockdev_read(void *arg1, void *arg2)
285 {
286 	struct bdevio_request *req = arg1;
287 	struct io_target *target = req->target;
288 	int rc;
289 
290 	if (req->iovcnt) {
291 		rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
292 				     req->data_len, quick_test_complete, NULL);
293 	} else {
294 		rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
295 				    req->data_len, quick_test_complete, NULL);
296 	}
297 
298 	if (rc) {
299 		g_completion_success = false;
300 		wake_ut_thread();
301 	}
302 }
303 
304 static void
305 blockdev_read(struct io_target *target, char *rx_buf,
306 	      uint64_t offset, int data_len, int iov_len)
307 {
308 	struct bdevio_request req;
309 
310 	req.target = target;
311 	req.buf = rx_buf;
312 	req.data_len = data_len;
313 	req.offset = offset;
314 	req.iovcnt = 0;
315 	sgl_chop_buffer(&req, iov_len);
316 
317 	g_completion_success = false;
318 
319 	execute_spdk_function(__blockdev_read, &req, NULL);
320 }
321 
322 static int
323 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
324 {
325 	int rc;
326 	rc = memcmp(rx_buf, tx_buf, data_length);
327 
328 	spdk_dma_free(rx_buf);
329 	spdk_dma_free(tx_buf);
330 
331 	return rc;
332 }
333 
334 static void
335 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
336 		    int expected_rc, bool write_zeroes)
337 {
338 	struct io_target *target;
339 	char	*tx_buf = NULL;
340 	char	*rx_buf = NULL;
341 	int	rc;
342 
343 	target = g_io_targets;
344 	while (target != NULL) {
345 		if (data_length < spdk_bdev_get_block_size(target->bdev) ||
346 		    data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) {
347 			target = target->next;
348 			continue;
349 		}
350 
351 		if (!write_zeroes) {
352 			initialize_buffer(&tx_buf, pattern, data_length);
353 			initialize_buffer(&rx_buf, 0, data_length);
354 
355 			blockdev_write(target, tx_buf, offset, data_length, iov_len);
356 		} else {
357 			initialize_buffer(&tx_buf, 0, data_length);
358 			initialize_buffer(&rx_buf, pattern, data_length);
359 
360 			blockdev_write_zeroes(target, tx_buf, offset, data_length);
361 		}
362 
363 
364 		if (expected_rc == 0) {
365 			CU_ASSERT_EQUAL(g_completion_success, true);
366 		} else {
367 			CU_ASSERT_EQUAL(g_completion_success, false);
368 		}
369 		blockdev_read(target, rx_buf, offset, data_length, iov_len);
370 
371 		if (expected_rc == 0) {
372 			CU_ASSERT_EQUAL(g_completion_success, true);
373 		} else {
374 			CU_ASSERT_EQUAL(g_completion_success, false);
375 		}
376 
377 		if (g_completion_success) {
378 			rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
379 			/* Assert the write by comparing it with values read
380 			 * from each blockdev */
381 			CU_ASSERT_EQUAL(rc, 0);
382 		}
383 
384 		target = target->next;
385 	}
386 }
387 
388 static void
389 blockdev_write_read_4k(void)
390 {
391 	uint32_t data_length;
392 	uint64_t offset;
393 	int pattern;
394 	int expected_rc;
395 
396 	/* Data size = 4K */
397 	data_length = 4096;
398 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
399 	offset = 0;
400 	pattern = 0xA3;
401 	/* Params are valid, hence the expected return value
402 	 * of write and read for all blockdevs is 0. */
403 	expected_rc = 0;
404 
405 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
406 }
407 
408 static void
409 blockdev_write_zeroes_read_4k(void)
410 {
411 	uint32_t data_length;
412 	uint64_t offset;
413 	int pattern;
414 	int expected_rc;
415 
416 	/* Data size = 4K */
417 	data_length = 4096;
418 	offset = 0;
419 	pattern = 0xA3;
420 	/* Params are valid, hence the expected return value
421 	 * of write_zeroes and read for all blockdevs is 0. */
422 	expected_rc = 0;
423 
424 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
425 }
426 
427 /*
428  * This i/o will not have to split at the bdev layer.
429  */
430 static void
431 blockdev_write_zeroes_read_1m(void)
432 {
433 	uint32_t data_length;
434 	uint64_t offset;
435 	int pattern;
436 	int expected_rc;
437 
438 	/* Data size = 1M */
439 	data_length = 1048576;
440 	offset = 0;
441 	pattern = 0xA3;
442 	/* Params are valid, hence the expected return value
443 	 * of write_zeroes and read for all blockdevs is 0. */
444 	expected_rc = 0;
445 
446 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
447 }
448 
449 /*
450  * This i/o will have to split at the bdev layer if
451  * write-zeroes is not supported by the bdev.
452  */
453 static void
454 blockdev_write_zeroes_read_3m(void)
455 {
456 	uint32_t data_length;
457 	uint64_t offset;
458 	int pattern;
459 	int expected_rc;
460 
461 	/* Data size = 3M */
462 	data_length = 3145728;
463 	offset = 0;
464 	pattern = 0xA3;
465 	/* Params are valid, hence the expected return value
466 	 * of write_zeroes and read for all blockdevs is 0. */
467 	expected_rc = 0;
468 
469 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
470 }
471 
472 /*
473  * This i/o will have to split at the bdev layer if
474  * write-zeroes is not supported by the bdev. It also
475  * tests a write size that is not an even multiple of
476  * the bdev layer zero buffer size.
477  */
478 static void
479 blockdev_write_zeroes_read_3m_500k(void)
480 {
481 	uint32_t data_length;
482 	uint64_t offset;
483 	int pattern;
484 	int expected_rc;
485 
486 	/* Data size = 3.5M */
487 	data_length = 3670016;
488 	offset = 0;
489 	pattern = 0xA3;
490 	/* Params are valid, hence the expected return value
491 	 * of write_zeroes and read for all blockdevs is 0. */
492 	expected_rc = 0;
493 
494 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
495 }
496 
497 static void
498 blockdev_writev_readv_4k(void)
499 {
500 	uint32_t data_length, iov_len;
501 	uint64_t offset;
502 	int pattern;
503 	int expected_rc;
504 
505 	/* Data size = 4K */
506 	data_length = 4096;
507 	iov_len = 4096;
508 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
509 	offset = 0;
510 	pattern = 0xA3;
511 	/* Params are valid, hence the expected return value
512 	 * of write and read for all blockdevs is 0. */
513 	expected_rc = 0;
514 
515 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
516 }
517 
518 static void
519 blockdev_writev_readv_30x4k(void)
520 {
521 	uint32_t data_length, iov_len;
522 	uint64_t offset;
523 	int pattern;
524 	int expected_rc;
525 
526 	/* Data size = 4K */
527 	data_length = 4096 * 30;
528 	iov_len = 4096;
529 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
530 	offset = 0;
531 	pattern = 0xA3;
532 	/* Params are valid, hence the expected return value
533 	 * of write and read for all blockdevs is 0. */
534 	expected_rc = 0;
535 
536 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
537 }
538 
539 static void
540 blockdev_write_read_512Bytes(void)
541 {
542 	uint32_t data_length;
543 	uint64_t offset;
544 	int pattern;
545 	int expected_rc;
546 
547 	/* Data size = 512 */
548 	data_length = 512;
549 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
550 	offset = 8192;
551 	pattern = 0xA3;
552 	/* Params are valid, hence the expected return value
553 	 * of write and read for all blockdevs is 0. */
554 	expected_rc = 0;
555 
556 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
557 }
558 
559 static void
560 blockdev_writev_readv_512Bytes(void)
561 {
562 	uint32_t data_length, iov_len;
563 	uint64_t offset;
564 	int pattern;
565 	int expected_rc;
566 
567 	/* Data size = 512 */
568 	data_length = 512;
569 	iov_len = 512;
570 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
571 	offset = 8192;
572 	pattern = 0xA3;
573 	/* Params are valid, hence the expected return value
574 	 * of write and read for all blockdevs is 0. */
575 	expected_rc = 0;
576 
577 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
578 }
579 
580 static void
581 blockdev_write_read_size_gt_128k(void)
582 {
583 	uint32_t data_length;
584 	uint64_t offset;
585 	int pattern;
586 	int expected_rc;
587 
588 	/* Data size = 132K */
589 	data_length = 135168;
590 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
591 	offset = 8192;
592 	pattern = 0xA3;
593 	/* Params are valid, hence the expected return value
594 	 * of write and read for all blockdevs is 0. */
595 	expected_rc = 0;
596 
597 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
598 }
599 
600 static void
601 blockdev_writev_readv_size_gt_128k(void)
602 {
603 	uint32_t data_length, iov_len;
604 	uint64_t offset;
605 	int pattern;
606 	int expected_rc;
607 
608 	/* Data size = 132K */
609 	data_length = 135168;
610 	iov_len = 135168;
611 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
612 	offset = 8192;
613 	pattern = 0xA3;
614 	/* Params are valid, hence the expected return value
615 	 * of write and read for all blockdevs is 0. */
616 	expected_rc = 0;
617 
618 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
619 }
620 
621 static void
622 blockdev_writev_readv_size_gt_128k_two_iov(void)
623 {
624 	uint32_t data_length, iov_len;
625 	uint64_t offset;
626 	int pattern;
627 	int expected_rc;
628 
629 	/* Data size = 132K */
630 	data_length = 135168;
631 	iov_len = 128 * 1024;
632 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
633 	offset = 8192;
634 	pattern = 0xA3;
635 	/* Params are valid, hence the expected return value
636 	 * of write and read for all blockdevs is 0. */
637 	expected_rc = 0;
638 
639 	blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
640 }
641 
642 static void
643 blockdev_write_read_invalid_size(void)
644 {
645 	uint32_t data_length;
646 	uint64_t offset;
647 	int pattern;
648 	int expected_rc;
649 
650 	/* Data size is not a multiple of the block size */
651 	data_length = 0x1015;
652 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
653 	offset = 8192;
654 	pattern = 0xA3;
655 	/* Params are invalid, hence the expected return value
656 	 * of write and read for all blockdevs is < 0 */
657 	expected_rc = -1;
658 
659 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
660 }
661 
662 static void
663 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
664 {
665 	struct io_target *target;
666 	struct spdk_bdev *bdev;
667 	char	*tx_buf = NULL;
668 	char	*rx_buf = NULL;
669 	uint64_t offset;
670 	uint32_t block_size;
671 	int rc;
672 
673 	target = g_io_targets;
674 	while (target != NULL) {
675 		bdev = target->bdev;
676 
677 		block_size = spdk_bdev_get_block_size(bdev);
678 
679 		/* The start offset has been set to a marginal value
680 		 * such that offset + nbytes == Total size of
681 		 * blockdev. */
682 		offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
683 
684 		initialize_buffer(&tx_buf, 0xA3, block_size);
685 		initialize_buffer(&rx_buf, 0, block_size);
686 
687 		blockdev_write(target, tx_buf, offset, block_size, 0);
688 		CU_ASSERT_EQUAL(g_completion_success, true);
689 
690 		blockdev_read(target, rx_buf, offset, block_size, 0);
691 		CU_ASSERT_EQUAL(g_completion_success, true);
692 
693 		rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
694 		/* Assert the write by comparing it with values read
695 		 * from each blockdev */
696 		CU_ASSERT_EQUAL(rc, 0);
697 
698 		target = target->next;
699 	}
700 }
701 
702 static void
703 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
704 {
705 	struct io_target *target;
706 	struct spdk_bdev *bdev;
707 	char	*tx_buf = NULL;
708 	char	*rx_buf = NULL;
709 	int	data_length;
710 	uint64_t offset;
711 	int pattern;
712 
713 	/* Tests the overflow condition of the blockdevs. */
714 	data_length = 4096;
715 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
716 	pattern = 0xA3;
717 
718 	target = g_io_targets;
719 	while (target != NULL) {
720 		bdev = target->bdev;
721 
722 		/* The start offset has been set to a valid value
723 		 * but offset + nbytes is greater than the Total size
724 		 * of the blockdev. The test should fail. */
725 		offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
726 
727 		initialize_buffer(&tx_buf, pattern, data_length);
728 		initialize_buffer(&rx_buf, 0, data_length);
729 
730 		blockdev_write(target, tx_buf, offset, data_length, 0);
731 		CU_ASSERT_EQUAL(g_completion_success, false);
732 
733 		blockdev_read(target, rx_buf, offset, data_length, 0);
734 		CU_ASSERT_EQUAL(g_completion_success, false);
735 
736 		target = target->next;
737 	}
738 }
739 
740 static void
741 blockdev_write_read_max_offset(void)
742 {
743 	int	data_length;
744 	uint64_t offset;
745 	int pattern;
746 	int expected_rc;
747 
748 	data_length = 4096;
749 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
750 	/* The start offset has been set to UINT64_MAX such that
751 	 * adding nbytes wraps around and points to an invalid address. */
752 	offset = UINT64_MAX;
753 	pattern = 0xA3;
754 	/* Params are invalid, hence the expected return value
755 	 * of write and read for all blockdevs is < 0 */
756 	expected_rc = -1;
757 
758 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
759 }
760 
761 static void
762 blockdev_overlapped_write_read_8k(void)
763 {
764 	int	data_length;
765 	uint64_t offset;
766 	int pattern;
767 	int expected_rc;
768 
769 	/* Data size = 8K */
770 	data_length = 8192;
771 	CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
772 	offset = 0;
773 	pattern = 0xA3;
774 	/* Params are valid, hence the expected return value
775 	 * of write and read for all blockdevs is 0. */
776 	expected_rc = 0;
777 	/* Assert the write by comparing it with values read
778 	 * from the same offset for each blockdev */
779 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
780 
781 	/* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
782 	 * with the address written above and assert the new value in
783 	 * the overlapped address range */
784 	/* Populate 8k with value 0xBB */
785 	pattern = 0xBB;
786 	/* Offset = 6144; Overlap offset addresses and write value 0xbb */
787 	offset = 4096;
788 	/* Assert the write by comparing it with values read
789 	 * from the overlapped offset for each blockdev */
790 	blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
791 }
792 
793 static void
794 __blockdev_reset(void *arg1, void *arg2)
795 {
796 	struct bdevio_request *req = arg1;
797 	struct io_target *target = req->target;
798 	int rc;
799 
800 	rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
801 	if (rc < 0) {
802 		g_completion_success = false;
803 		wake_ut_thread();
804 	}
805 }
806 
807 static void
808 blockdev_reset(struct io_target *target)
809 {
810 	struct bdevio_request req;
811 
812 	req.target = target;
813 
814 	g_completion_success = false;
815 
816 	execute_spdk_function(__blockdev_reset, &req, NULL);
817 }
818 
819 static void
820 blockdev_test_reset(void)
821 {
822 	struct io_target	*target;
823 
824 	target = g_io_targets;
825 	while (target != NULL) {
826 		blockdev_reset(target);
827 		CU_ASSERT_EQUAL(g_completion_success, true);
828 
829 		target = target->next;
830 	}
831 }
832 
833 static void
834 __stop_init_thread(void *arg1, void *arg2)
835 {
836 	unsigned num_failures = (unsigned)(uintptr_t)arg1;
837 
838 	bdevio_cleanup_targets();
839 	spdk_app_stop(num_failures);
840 }
841 
842 static void
843 stop_init_thread(unsigned num_failures)
844 {
845 	struct spdk_event *event;
846 
847 	event = spdk_event_allocate(g_lcore_id_init, __stop_init_thread,
848 				    (void *)(uintptr_t)num_failures, NULL);
849 	spdk_event_call(event);
850 }
851 
852 static void
853 __run_ut_thread(void *arg1, void *arg2)
854 {
855 	CU_pSuite suite = NULL;
856 	unsigned num_failures;
857 
858 	if (CU_initialize_registry() != CUE_SUCCESS) {
859 		stop_init_thread(CU_get_error());
860 		return;
861 	}
862 
863 	suite = CU_add_suite("components_suite", NULL, NULL);
864 	if (suite == NULL) {
865 		CU_cleanup_registry();
866 		stop_init_thread(CU_get_error());
867 		return;
868 	}
869 
870 	if (
871 		CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
872 		|| CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
873 		|| CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
874 		|| CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
875 		|| CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
876 		|| CU_add_test(suite, "blockdev write read 512 bytes",
877 			       blockdev_write_read_512Bytes) == NULL
878 		|| CU_add_test(suite, "blockdev write read size > 128k",
879 			       blockdev_write_read_size_gt_128k) == NULL
880 		|| CU_add_test(suite, "blockdev write read invalid size",
881 			       blockdev_write_read_invalid_size) == NULL
882 		|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
883 			       blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
884 		|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
885 			       blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
886 		|| CU_add_test(suite, "blockdev write read max offset",
887 			       blockdev_write_read_max_offset) == NULL
888 		|| CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
889 			       blockdev_overlapped_write_read_8k) == NULL
890 		|| CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
891 		|| CU_add_test(suite, "blockdev writev readv 30 x 4k",
892 			       blockdev_writev_readv_30x4k) == NULL
893 		|| CU_add_test(suite, "blockdev writev readv 512 bytes",
894 			       blockdev_writev_readv_512Bytes) == NULL
895 		|| CU_add_test(suite, "blockdev writev readv size > 128k",
896 			       blockdev_writev_readv_size_gt_128k) == NULL
897 		|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
898 			       blockdev_writev_readv_size_gt_128k_two_iov) == NULL
899 		|| CU_add_test(suite, "blockdev reset",
900 			       blockdev_test_reset) == NULL
901 	) {
902 		CU_cleanup_registry();
903 		stop_init_thread(CU_get_error());
904 		return;
905 	}
906 
907 	CU_basic_set_mode(CU_BRM_VERBOSE);
908 	CU_basic_run_tests();
909 	num_failures = CU_get_number_of_failures();
910 	CU_cleanup_registry();
911 	stop_init_thread(num_failures);
912 }
913 
914 static void
915 test_main(void *arg1, void *arg2)
916 {
917 	struct spdk_event *event;
918 
919 	pthread_mutex_init(&g_test_mutex, NULL);
920 	pthread_cond_init(&g_test_cond, NULL);
921 
922 	g_lcore_id_init = spdk_env_get_first_core();
923 	g_lcore_id_ut = spdk_env_get_next_core(g_lcore_id_init);
924 	g_lcore_id_io = spdk_env_get_next_core(g_lcore_id_ut);
925 
926 	if (g_lcore_id_init == SPDK_ENV_LCORE_ID_ANY ||
927 	    g_lcore_id_ut == SPDK_ENV_LCORE_ID_ANY ||
928 	    g_lcore_id_io == SPDK_ENV_LCORE_ID_ANY) {
929 		SPDK_ERRLOG("Could not reserve 3 separate threads.\n");
930 		spdk_app_stop(-1);
931 	}
932 
933 	if (bdevio_construct_targets() < 0) {
934 		spdk_app_stop(-1);
935 		return;
936 	}
937 
938 	event = spdk_event_allocate(g_lcore_id_ut, __run_ut_thread, NULL, NULL);
939 	spdk_event_call(event);
940 }
941 
942 static void
943 bdevio_usage(void)
944 {
945 }
946 
947 static int
948 bdevio_parse_arg(int ch, char *arg)
949 {
950 	return 0;
951 }
952 
953 int
954 main(int argc, char **argv)
955 {
956 	int			rc;
957 	struct spdk_app_opts	opts = {};
958 
959 	spdk_app_opts_init(&opts);
960 	opts.name = "bdevio";
961 	opts.rpc_addr = NULL;
962 	opts.reactor_mask = "0x7";
963 
964 	if ((rc = spdk_app_parse_args(argc, argv, &opts, "", NULL,
965 				      bdevio_parse_arg, bdevio_usage)) !=
966 	    SPDK_APP_PARSE_ARGS_SUCCESS) {
967 		return rc;
968 	}
969 
970 	rc = spdk_app_start(&opts, test_main, NULL);
971 	spdk_app_fini();
972 
973 	return rc;
974 }
975