1488570ebSJim Harris /* SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse * Copyright (C) 2016 Intel Corporation.
3f9d5ed28SSeth Howell * All rights reserved.
4d9cea478SShuhei Matsumoto * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5f9d5ed28SSeth Howell */
6f9d5ed28SSeth Howell
7f9d5ed28SSeth Howell #include "spdk/stdinc.h"
8f9d5ed28SSeth Howell
9f9d5ed28SSeth Howell #include "spdk/bdev.h"
10081f080aSBen Walker #include "spdk/accel.h"
11f9d5ed28SSeth Howell #include "spdk/env.h"
12f9d5ed28SSeth Howell #include "spdk/log.h"
13a83f91c2SBen Walker #include "spdk/thread.h"
14f727997fSDariusz Stojaczyk #include "spdk/event.h"
157140c2f7STomasz Zawadzki #include "spdk/rpc.h"
167140c2f7STomasz Zawadzki #include "spdk/util.h"
177140c2f7STomasz Zawadzki #include "spdk/string.h"
18f9d5ed28SSeth Howell
1900647138SJim Harris #include "bdev_internal.h"
20f9d5ed28SSeth Howell #include "CUnit/Basic.h"
21f9d5ed28SSeth Howell
22f9d5ed28SSeth Howell #define BUFFER_IOVS 1024
23f9d5ed28SSeth Howell #define BUFFER_SIZE 260 * 1024
24f9d5ed28SSeth Howell #define BDEV_TASK_ARRAY_SIZE 2048
25f9d5ed28SSeth Howell
26f9d5ed28SSeth Howell pthread_mutex_t g_test_mutex;
27f9d5ed28SSeth Howell pthread_cond_t g_test_cond;
28f9d5ed28SSeth Howell
29f23e89c5SBen Walker static struct spdk_thread *g_thread_init;
30f23e89c5SBen Walker static struct spdk_thread *g_thread_ut;
31f23e89c5SBen Walker static struct spdk_thread *g_thread_io;
327140c2f7STomasz Zawadzki static bool g_wait_for_tests = false;
33f23e89c5SBen Walker static int g_num_failures = 0;
3405752408SGangCao static bool g_shutdown = false;
35ff0f97cfSDariusz Stojaczyk
36f9d5ed28SSeth Howell struct io_target {
37f9d5ed28SSeth Howell struct spdk_bdev *bdev;
38f9d5ed28SSeth Howell struct spdk_bdev_desc *bdev_desc;
39f9d5ed28SSeth Howell struct spdk_io_channel *ch;
40f9d5ed28SSeth Howell struct io_target *next;
41f9d5ed28SSeth Howell };
42f9d5ed28SSeth Howell
43f9d5ed28SSeth Howell struct bdevio_request {
44f9d5ed28SSeth Howell char *buf;
45931ac757SMaciej Szwed char *fused_buf;
46f9d5ed28SSeth Howell int data_len;
47f9d5ed28SSeth Howell uint64_t offset;
48f9d5ed28SSeth Howell struct iovec iov[BUFFER_IOVS];
49f9d5ed28SSeth Howell int iovcnt;
50931ac757SMaciej Szwed struct iovec fused_iov[BUFFER_IOVS];
51931ac757SMaciej Szwed int fused_iovcnt;
52f9d5ed28SSeth Howell struct io_target *target;
5357b0fcc4SEvgeniy Kochetov uint64_t src_offset;
54f9d5ed28SSeth Howell };
55f9d5ed28SSeth Howell
56f9d5ed28SSeth Howell struct io_target *g_io_targets = NULL;
576a8d0094STomasz Zawadzki struct io_target *g_current_io_target = NULL;
587140c2f7STomasz Zawadzki static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
59f9d5ed28SSeth Howell
60f9d5ed28SSeth Howell static void
execute_spdk_function(spdk_msg_fn fn,void * arg)61f23e89c5SBen Walker execute_spdk_function(spdk_msg_fn fn, void *arg)
62f9d5ed28SSeth Howell {
63f9d5ed28SSeth Howell pthread_mutex_lock(&g_test_mutex);
64f23e89c5SBen Walker spdk_thread_send_msg(g_thread_io, fn, arg);
65f9d5ed28SSeth Howell pthread_cond_wait(&g_test_cond, &g_test_mutex);
66f9d5ed28SSeth Howell pthread_mutex_unlock(&g_test_mutex);
67f9d5ed28SSeth Howell }
68f9d5ed28SSeth Howell
69f9d5ed28SSeth Howell static void
wake_ut_thread(void)70f9d5ed28SSeth Howell wake_ut_thread(void)
71f9d5ed28SSeth Howell {
72f9d5ed28SSeth Howell pthread_mutex_lock(&g_test_mutex);
73f9d5ed28SSeth Howell pthread_cond_signal(&g_test_cond);
74f9d5ed28SSeth Howell pthread_mutex_unlock(&g_test_mutex);
75f9d5ed28SSeth Howell }
76f9d5ed28SSeth Howell
77f9d5ed28SSeth Howell static void
__exit_io_thread(void * arg)78be19fe4bSJim Harris __exit_io_thread(void *arg)
79be19fe4bSJim Harris {
80be19fe4bSJim Harris assert(spdk_get_thread() == g_thread_io);
81be19fe4bSJim Harris spdk_thread_exit(g_thread_io);
82be19fe4bSJim Harris wake_ut_thread();
83be19fe4bSJim Harris }
84be19fe4bSJim Harris
85be19fe4bSJim Harris static void
__get_io_channel(void * arg)86f23e89c5SBen Walker __get_io_channel(void *arg)
87f9d5ed28SSeth Howell {
88f23e89c5SBen Walker struct io_target *target = arg;
89f9d5ed28SSeth Howell
90f9d5ed28SSeth Howell target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
91f9d5ed28SSeth Howell assert(target->ch);
92f9d5ed28SSeth Howell wake_ut_thread();
93f9d5ed28SSeth Howell }
94f9d5ed28SSeth Howell
95739d5413STomasz Zawadzki static void
bdevio_construct_target_open_cb(enum spdk_bdev_event_type type,struct spdk_bdev * bdev,void * event_ctx)96739d5413STomasz Zawadzki bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
97739d5413STomasz Zawadzki void *event_ctx)
98739d5413STomasz Zawadzki {
99739d5413STomasz Zawadzki }
100739d5413STomasz Zawadzki
101f9d5ed28SSeth Howell static int
bdevio_construct_target_by_name(const char * bdev_name)102f91ba919SShuhei Matsumoto bdevio_construct_target_by_name(const char *bdev_name)
103f9d5ed28SSeth Howell {
104f9d5ed28SSeth Howell struct io_target *target;
105f91ba919SShuhei Matsumoto struct spdk_bdev *bdev;
106f91ba919SShuhei Matsumoto uint64_t num_blocks;
107f91ba919SShuhei Matsumoto uint32_t block_size;
108f9d5ed28SSeth Howell int rc;
109f9d5ed28SSeth Howell
110f9d5ed28SSeth Howell target = malloc(sizeof(struct io_target));
111f9d5ed28SSeth Howell if (target == NULL) {
112f9d5ed28SSeth Howell return -ENOMEM;
113f9d5ed28SSeth Howell }
114f9d5ed28SSeth Howell
115f91ba919SShuhei Matsumoto rc = spdk_bdev_open_ext(bdev_name, true, bdevio_construct_target_open_cb, NULL,
116739d5413STomasz Zawadzki &target->bdev_desc);
117f9d5ed28SSeth Howell if (rc != 0) {
118f9d5ed28SSeth Howell free(target);
119f91ba919SShuhei Matsumoto SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", bdev_name, rc);
1201cb299adSTomasz Zawadzki return rc;
121f9d5ed28SSeth Howell }
122f9d5ed28SSeth Howell
123f91ba919SShuhei Matsumoto bdev = spdk_bdev_desc_get_bdev(target->bdev_desc);
124f91ba919SShuhei Matsumoto
125f91ba919SShuhei Matsumoto num_blocks = spdk_bdev_get_num_blocks(bdev);
126f91ba919SShuhei Matsumoto block_size = spdk_bdev_get_block_size(bdev);
127f91ba919SShuhei Matsumoto
128f9d5ed28SSeth Howell printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
129f91ba919SShuhei Matsumoto bdev_name, num_blocks, block_size,
130f9d5ed28SSeth Howell (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
131f9d5ed28SSeth Howell
132f9d5ed28SSeth Howell target->bdev = bdev;
133f9d5ed28SSeth Howell target->next = g_io_targets;
134f23e89c5SBen Walker execute_spdk_function(__get_io_channel, target);
135f9d5ed28SSeth Howell g_io_targets = target;
136f9d5ed28SSeth Howell
13784429e7bSTomasz Zawadzki return 0;
13884429e7bSTomasz Zawadzki }
13984429e7bSTomasz Zawadzki
14084429e7bSTomasz Zawadzki static int
bdevio_construct_target(void * ctx,struct spdk_bdev * bdev)141f91ba919SShuhei Matsumoto bdevio_construct_target(void *ctx, struct spdk_bdev *bdev)
142f91ba919SShuhei Matsumoto {
143f91ba919SShuhei Matsumoto const char *bdev_name = spdk_bdev_get_name(bdev);
144f91ba919SShuhei Matsumoto
145f91ba919SShuhei Matsumoto return bdevio_construct_target_by_name(bdev_name);
146f91ba919SShuhei Matsumoto }
147f91ba919SShuhei Matsumoto
148f91ba919SShuhei Matsumoto static int
bdevio_construct_targets(void)14984429e7bSTomasz Zawadzki bdevio_construct_targets(void)
15084429e7bSTomasz Zawadzki {
15184429e7bSTomasz Zawadzki int rc;
15284429e7bSTomasz Zawadzki
15384429e7bSTomasz Zawadzki printf("I/O targets:\n");
15484429e7bSTomasz Zawadzki
155d9cea478SShuhei Matsumoto rc = spdk_for_each_bdev_leaf(NULL, bdevio_construct_target);
15684429e7bSTomasz Zawadzki if (rc < 0) {
157d9cea478SShuhei Matsumoto SPDK_ERRLOG("Could not complete constructing bdevs, error=%d\n", rc);
15884429e7bSTomasz Zawadzki return rc;
15984429e7bSTomasz Zawadzki }
160f9d5ed28SSeth Howell
16142cefebeSTomasz Zawadzki if (g_io_targets == NULL) {
16242cefebeSTomasz Zawadzki SPDK_ERRLOG("No bdevs to perform tests on\n");
16342cefebeSTomasz Zawadzki return -1;
16442cefebeSTomasz Zawadzki }
16542cefebeSTomasz Zawadzki
166f9d5ed28SSeth Howell return 0;
167f9d5ed28SSeth Howell }
168f9d5ed28SSeth Howell
169f9d5ed28SSeth Howell static void
__put_io_channel(void * arg)170f23e89c5SBen Walker __put_io_channel(void *arg)
171f9d5ed28SSeth Howell {
172f23e89c5SBen Walker struct io_target *target = arg;
173f9d5ed28SSeth Howell
174f9d5ed28SSeth Howell spdk_put_io_channel(target->ch);
175f9d5ed28SSeth Howell wake_ut_thread();
176f9d5ed28SSeth Howell }
177f9d5ed28SSeth Howell
178f9d5ed28SSeth Howell static void
bdevio_cleanup_targets(void)179f9d5ed28SSeth Howell bdevio_cleanup_targets(void)
180f9d5ed28SSeth Howell {
181f9d5ed28SSeth Howell struct io_target *target;
182f9d5ed28SSeth Howell
183f9d5ed28SSeth Howell target = g_io_targets;
184f9d5ed28SSeth Howell while (target != NULL) {
185f23e89c5SBen Walker execute_spdk_function(__put_io_channel, target);
186f9d5ed28SSeth Howell spdk_bdev_close(target->bdev_desc);
187f9d5ed28SSeth Howell g_io_targets = target->next;
188f9d5ed28SSeth Howell free(target);
189f9d5ed28SSeth Howell target = g_io_targets;
190f9d5ed28SSeth Howell }
191f9d5ed28SSeth Howell }
192f9d5ed28SSeth Howell
193f9d5ed28SSeth Howell static bool g_completion_success;
194f9d5ed28SSeth Howell
195f9d5ed28SSeth Howell static void
initialize_buffer(char ** buf,int pattern,int size,uint32_t block_size)19697455abeSDenis Barakhtanov initialize_buffer(char **buf, int pattern, int size, uint32_t block_size)
197f9d5ed28SSeth Howell {
19897455abeSDenis Barakhtanov CU_ASSERT(block_size != 0);
19997455abeSDenis Barakhtanov
20095ab4194SDarek Stojaczyk *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
201f9d5ed28SSeth Howell memset(*buf, pattern, size);
20297455abeSDenis Barakhtanov
20397455abeSDenis Barakhtanov if (pattern) {
20497455abeSDenis Barakhtanov for (int offset = 0, block = 0; offset < size; offset += block_size, block++) {
20597455abeSDenis Barakhtanov *(*buf + offset) = block;
20697455abeSDenis Barakhtanov }
20797455abeSDenis Barakhtanov }
208f9d5ed28SSeth Howell }
209f9d5ed28SSeth Howell
210f9d5ed28SSeth Howell static void
quick_test_complete(struct spdk_bdev_io * bdev_io,bool success,void * arg)211f9d5ed28SSeth Howell quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
212f9d5ed28SSeth Howell {
213f9d5ed28SSeth Howell g_completion_success = success;
214f9d5ed28SSeth Howell spdk_bdev_free_io(bdev_io);
215f9d5ed28SSeth Howell wake_ut_thread();
216f9d5ed28SSeth Howell }
217f9d5ed28SSeth Howell
218a6e02246SJonas Pfefferle static uint64_t
bdev_bytes_to_blocks(struct spdk_bdev * bdev,uint64_t bytes)219a6e02246SJonas Pfefferle bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes)
220a6e02246SJonas Pfefferle {
221a6e02246SJonas Pfefferle uint32_t block_size = spdk_bdev_get_block_size(bdev);
222a6e02246SJonas Pfefferle
223a6e02246SJonas Pfefferle CU_ASSERT(bytes % block_size == 0);
224a6e02246SJonas Pfefferle return bytes / block_size;
225a6e02246SJonas Pfefferle }
226a6e02246SJonas Pfefferle
227f9d5ed28SSeth Howell static void
__blockdev_write(void * arg)228f23e89c5SBen Walker __blockdev_write(void *arg)
229f9d5ed28SSeth Howell {
230f23e89c5SBen Walker struct bdevio_request *req = arg;
231f9d5ed28SSeth Howell struct io_target *target = req->target;
232f9d5ed28SSeth Howell int rc;
233f9d5ed28SSeth Howell
234f9d5ed28SSeth Howell if (req->iovcnt) {
235f9d5ed28SSeth Howell rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
236f9d5ed28SSeth Howell req->data_len, quick_test_complete, NULL);
237f9d5ed28SSeth Howell } else {
238f9d5ed28SSeth Howell rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
239f9d5ed28SSeth Howell req->data_len, quick_test_complete, NULL);
240f9d5ed28SSeth Howell }
241f9d5ed28SSeth Howell
242f9d5ed28SSeth Howell if (rc) {
243f9d5ed28SSeth Howell g_completion_success = false;
244f9d5ed28SSeth Howell wake_ut_thread();
245f9d5ed28SSeth Howell }
246f9d5ed28SSeth Howell }
247f9d5ed28SSeth Howell
248f9d5ed28SSeth Howell static void
__blockdev_write_zeroes(void * arg)249f23e89c5SBen Walker __blockdev_write_zeroes(void *arg)
250f9d5ed28SSeth Howell {
251f23e89c5SBen Walker struct bdevio_request *req = arg;
252f9d5ed28SSeth Howell struct io_target *target = req->target;
253f9d5ed28SSeth Howell int rc;
254f9d5ed28SSeth Howell
255f9d5ed28SSeth Howell rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
256f9d5ed28SSeth Howell req->data_len, quick_test_complete, NULL);
257f9d5ed28SSeth Howell if (rc) {
258f9d5ed28SSeth Howell g_completion_success = false;
259f9d5ed28SSeth Howell wake_ut_thread();
260f9d5ed28SSeth Howell }
261f9d5ed28SSeth Howell }
262f9d5ed28SSeth Howell
263f9d5ed28SSeth Howell static void
__blockdev_compare_and_write(void * arg)264931ac757SMaciej Szwed __blockdev_compare_and_write(void *arg)
265931ac757SMaciej Szwed {
266931ac757SMaciej Szwed struct bdevio_request *req = arg;
267931ac757SMaciej Szwed struct io_target *target = req->target;
268a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
269931ac757SMaciej Szwed int rc;
270931ac757SMaciej Szwed
271931ac757SMaciej Szwed rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
272a6e02246SJonas Pfefferle req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset),
273a6e02246SJonas Pfefferle bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL);
274931ac757SMaciej Szwed
275931ac757SMaciej Szwed if (rc) {
276931ac757SMaciej Szwed g_completion_success = false;
277931ac757SMaciej Szwed wake_ut_thread();
278931ac757SMaciej Szwed }
279931ac757SMaciej Szwed }
280931ac757SMaciej Szwed
281931ac757SMaciej Szwed static void
sgl_chop_buffer(struct bdevio_request * req,int iov_len)282f9d5ed28SSeth Howell sgl_chop_buffer(struct bdevio_request *req, int iov_len)
283f9d5ed28SSeth Howell {
284f9d5ed28SSeth Howell int data_len = req->data_len;
285f9d5ed28SSeth Howell char *buf = req->buf;
286f9d5ed28SSeth Howell
287f9d5ed28SSeth Howell req->iovcnt = 0;
288f9d5ed28SSeth Howell if (!iov_len) {
289f9d5ed28SSeth Howell return;
290f9d5ed28SSeth Howell }
291f9d5ed28SSeth Howell
292f9d5ed28SSeth Howell for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
293f9d5ed28SSeth Howell if (data_len < iov_len) {
294f9d5ed28SSeth Howell iov_len = data_len;
295f9d5ed28SSeth Howell }
296f9d5ed28SSeth Howell
297f9d5ed28SSeth Howell req->iov[req->iovcnt].iov_base = buf;
298f9d5ed28SSeth Howell req->iov[req->iovcnt].iov_len = iov_len;
299f9d5ed28SSeth Howell
300f9d5ed28SSeth Howell buf += iov_len;
301f9d5ed28SSeth Howell data_len -= iov_len;
302f9d5ed28SSeth Howell }
303f9d5ed28SSeth Howell
304f9d5ed28SSeth Howell CU_ASSERT_EQUAL_FATAL(data_len, 0);
305f9d5ed28SSeth Howell }
306f9d5ed28SSeth Howell
307f9d5ed28SSeth Howell static void
sgl_chop_fused_buffer(struct bdevio_request * req,int iov_len)308931ac757SMaciej Szwed sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
309931ac757SMaciej Szwed {
310931ac757SMaciej Szwed int data_len = req->data_len;
311931ac757SMaciej Szwed char *buf = req->fused_buf;
312931ac757SMaciej Szwed
313931ac757SMaciej Szwed req->fused_iovcnt = 0;
314931ac757SMaciej Szwed if (!iov_len) {
315931ac757SMaciej Szwed return;
316931ac757SMaciej Szwed }
317931ac757SMaciej Szwed
318931ac757SMaciej Szwed for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
319931ac757SMaciej Szwed if (data_len < iov_len) {
320931ac757SMaciej Szwed iov_len = data_len;
321931ac757SMaciej Szwed }
322931ac757SMaciej Szwed
323931ac757SMaciej Szwed req->fused_iov[req->fused_iovcnt].iov_base = buf;
324931ac757SMaciej Szwed req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
325931ac757SMaciej Szwed
326931ac757SMaciej Szwed buf += iov_len;
327931ac757SMaciej Szwed data_len -= iov_len;
328931ac757SMaciej Szwed }
329931ac757SMaciej Szwed
330931ac757SMaciej Szwed CU_ASSERT_EQUAL_FATAL(data_len, 0);
331931ac757SMaciej Szwed }
332931ac757SMaciej Szwed
333931ac757SMaciej Szwed static void
blockdev_write(struct io_target * target,char * tx_buf,uint64_t offset,int data_len,int iov_len)334f9d5ed28SSeth Howell blockdev_write(struct io_target *target, char *tx_buf,
335f9d5ed28SSeth Howell uint64_t offset, int data_len, int iov_len)
336f9d5ed28SSeth Howell {
337f9d5ed28SSeth Howell struct bdevio_request req;
338f9d5ed28SSeth Howell
339f9d5ed28SSeth Howell req.target = target;
340f9d5ed28SSeth Howell req.buf = tx_buf;
341f9d5ed28SSeth Howell req.data_len = data_len;
342f9d5ed28SSeth Howell req.offset = offset;
343f9d5ed28SSeth Howell sgl_chop_buffer(&req, iov_len);
344f9d5ed28SSeth Howell
345f9d5ed28SSeth Howell g_completion_success = false;
346f9d5ed28SSeth Howell
347f23e89c5SBen Walker execute_spdk_function(__blockdev_write, &req);
348f9d5ed28SSeth Howell }
349f9d5ed28SSeth Howell
350f9d5ed28SSeth Howell static void
_blockdev_compare_and_write(struct io_target * target,char * cmp_buf,char * write_buf,uint64_t offset,int data_len,int iov_len)351931ac757SMaciej Szwed _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
352931ac757SMaciej Szwed uint64_t offset, int data_len, int iov_len)
353931ac757SMaciej Szwed {
354931ac757SMaciej Szwed struct bdevio_request req;
355931ac757SMaciej Szwed
356931ac757SMaciej Szwed req.target = target;
357931ac757SMaciej Szwed req.buf = cmp_buf;
358931ac757SMaciej Szwed req.fused_buf = write_buf;
359931ac757SMaciej Szwed req.data_len = data_len;
360931ac757SMaciej Szwed req.offset = offset;
361931ac757SMaciej Szwed sgl_chop_buffer(&req, iov_len);
362931ac757SMaciej Szwed sgl_chop_fused_buffer(&req, iov_len);
363931ac757SMaciej Szwed
364931ac757SMaciej Szwed g_completion_success = false;
365931ac757SMaciej Szwed
366931ac757SMaciej Szwed execute_spdk_function(__blockdev_compare_and_write, &req);
367931ac757SMaciej Szwed }
368931ac757SMaciej Szwed
369931ac757SMaciej Szwed static void
blockdev_write_zeroes(struct io_target * target,char * tx_buf,uint64_t offset,int data_len)370f9d5ed28SSeth Howell blockdev_write_zeroes(struct io_target *target, char *tx_buf,
371f9d5ed28SSeth Howell uint64_t offset, int data_len)
372f9d5ed28SSeth Howell {
373f9d5ed28SSeth Howell struct bdevio_request req;
374f9d5ed28SSeth Howell
375f9d5ed28SSeth Howell req.target = target;
376f9d5ed28SSeth Howell req.buf = tx_buf;
377f9d5ed28SSeth Howell req.data_len = data_len;
378f9d5ed28SSeth Howell req.offset = offset;
379f9d5ed28SSeth Howell
380f9d5ed28SSeth Howell g_completion_success = false;
381f9d5ed28SSeth Howell
382f23e89c5SBen Walker execute_spdk_function(__blockdev_write_zeroes, &req);
383f9d5ed28SSeth Howell }
384f9d5ed28SSeth Howell
385f9d5ed28SSeth Howell static void
__blockdev_read(void * arg)386f23e89c5SBen Walker __blockdev_read(void *arg)
387f9d5ed28SSeth Howell {
388f23e89c5SBen Walker struct bdevio_request *req = arg;
389f9d5ed28SSeth Howell struct io_target *target = req->target;
390f9d5ed28SSeth Howell int rc;
391f9d5ed28SSeth Howell
392f9d5ed28SSeth Howell if (req->iovcnt) {
393f9d5ed28SSeth Howell rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
394f9d5ed28SSeth Howell req->data_len, quick_test_complete, NULL);
395f9d5ed28SSeth Howell } else {
396f9d5ed28SSeth Howell rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
397f9d5ed28SSeth Howell req->data_len, quick_test_complete, NULL);
398f9d5ed28SSeth Howell }
399f9d5ed28SSeth Howell
400f9d5ed28SSeth Howell if (rc) {
401f9d5ed28SSeth Howell g_completion_success = false;
402f9d5ed28SSeth Howell wake_ut_thread();
403f9d5ed28SSeth Howell }
404f9d5ed28SSeth Howell }
405f9d5ed28SSeth Howell
406f9d5ed28SSeth Howell static void
blockdev_read(struct io_target * target,char * rx_buf,uint64_t offset,int data_len,int iov_len)407f9d5ed28SSeth Howell blockdev_read(struct io_target *target, char *rx_buf,
408f9d5ed28SSeth Howell uint64_t offset, int data_len, int iov_len)
409f9d5ed28SSeth Howell {
410f9d5ed28SSeth Howell struct bdevio_request req;
411f9d5ed28SSeth Howell
412f9d5ed28SSeth Howell req.target = target;
413f9d5ed28SSeth Howell req.buf = rx_buf;
414f9d5ed28SSeth Howell req.data_len = data_len;
415f9d5ed28SSeth Howell req.offset = offset;
416f9d5ed28SSeth Howell req.iovcnt = 0;
417f9d5ed28SSeth Howell sgl_chop_buffer(&req, iov_len);
418f9d5ed28SSeth Howell
419f9d5ed28SSeth Howell g_completion_success = false;
420f9d5ed28SSeth Howell
421f23e89c5SBen Walker execute_spdk_function(__blockdev_read, &req);
422f9d5ed28SSeth Howell }
423f9d5ed28SSeth Howell
42457b0fcc4SEvgeniy Kochetov static void
_blockdev_copy(void * arg)42557b0fcc4SEvgeniy Kochetov _blockdev_copy(void *arg)
42657b0fcc4SEvgeniy Kochetov {
42757b0fcc4SEvgeniy Kochetov struct bdevio_request *req = arg;
42857b0fcc4SEvgeniy Kochetov struct io_target *target = req->target;
42957b0fcc4SEvgeniy Kochetov struct spdk_bdev *bdev = target->bdev;
43057b0fcc4SEvgeniy Kochetov int rc;
43157b0fcc4SEvgeniy Kochetov
43257b0fcc4SEvgeniy Kochetov rc = spdk_bdev_copy_blocks(target->bdev_desc, target->ch,
43357b0fcc4SEvgeniy Kochetov bdev_bytes_to_blocks(bdev, req->offset),
43457b0fcc4SEvgeniy Kochetov bdev_bytes_to_blocks(bdev, req->src_offset),
43557b0fcc4SEvgeniy Kochetov bdev_bytes_to_blocks(bdev, req->data_len),
43657b0fcc4SEvgeniy Kochetov quick_test_complete, NULL);
43757b0fcc4SEvgeniy Kochetov
43857b0fcc4SEvgeniy Kochetov if (rc) {
43957b0fcc4SEvgeniy Kochetov g_completion_success = false;
44057b0fcc4SEvgeniy Kochetov wake_ut_thread();
44157b0fcc4SEvgeniy Kochetov }
44257b0fcc4SEvgeniy Kochetov }
44357b0fcc4SEvgeniy Kochetov
44457b0fcc4SEvgeniy Kochetov static void
blockdev_copy(struct io_target * target,uint64_t dst_offset,uint64_t src_offset,int data_len)44557b0fcc4SEvgeniy Kochetov blockdev_copy(struct io_target *target, uint64_t dst_offset, uint64_t src_offset, int data_len)
44657b0fcc4SEvgeniy Kochetov {
44757b0fcc4SEvgeniy Kochetov struct bdevio_request req;
44857b0fcc4SEvgeniy Kochetov
44957b0fcc4SEvgeniy Kochetov req.target = target;
45057b0fcc4SEvgeniy Kochetov req.data_len = data_len;
45157b0fcc4SEvgeniy Kochetov req.offset = dst_offset;
45257b0fcc4SEvgeniy Kochetov req.src_offset = src_offset;
45357b0fcc4SEvgeniy Kochetov
45457b0fcc4SEvgeniy Kochetov g_completion_success = false;
45557b0fcc4SEvgeniy Kochetov
45657b0fcc4SEvgeniy Kochetov execute_spdk_function(_blockdev_copy, &req);
45757b0fcc4SEvgeniy Kochetov }
45857b0fcc4SEvgeniy Kochetov
459f9d5ed28SSeth Howell static int
blockdev_write_read_data_match(char * rx_buf,char * tx_buf,int data_length)460f9d5ed28SSeth Howell blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
461f9d5ed28SSeth Howell {
46255bc243fSJonas Pfefferle return memcmp(rx_buf, tx_buf, data_length);
463f9d5ed28SSeth Howell }
464f9d5ed28SSeth Howell
465f9d5ed28SSeth Howell static void
blockdev_write_read(uint32_t data_length,uint32_t iov_len,int pattern,uint64_t offset,int expected_rc,bool write_zeroes,uint32_t block_size)466f9d5ed28SSeth Howell blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
46797455abeSDenis Barakhtanov int expected_rc, bool write_zeroes, uint32_t block_size)
468f9d5ed28SSeth Howell {
469f9d5ed28SSeth Howell struct io_target *target;
470f9d5ed28SSeth Howell char *tx_buf = NULL;
471f9d5ed28SSeth Howell char *rx_buf = NULL;
472f9d5ed28SSeth Howell int rc;
4733fc593b6SArtur Paszkiewicz uint64_t write_offset = offset;
4743fc593b6SArtur Paszkiewicz uint32_t write_data_len = data_length;
475f9d5ed28SSeth Howell
4766a8d0094STomasz Zawadzki target = g_current_io_target;
4776a8d0094STomasz Zawadzki
4783fc593b6SArtur Paszkiewicz if (spdk_bdev_get_write_unit_size(target->bdev) > 1 && expected_rc == 0) {
4793fc593b6SArtur Paszkiewicz uint32_t write_unit_bytes;
4803fc593b6SArtur Paszkiewicz
4813fc593b6SArtur Paszkiewicz write_unit_bytes = spdk_bdev_get_write_unit_size(target->bdev) *
4823fc593b6SArtur Paszkiewicz spdk_bdev_get_block_size(target->bdev);
4833fc593b6SArtur Paszkiewicz write_offset -= offset % write_unit_bytes;
4843fc593b6SArtur Paszkiewicz write_data_len += (offset - write_offset);
4853fc593b6SArtur Paszkiewicz
4863fc593b6SArtur Paszkiewicz if (write_data_len % write_unit_bytes) {
4873fc593b6SArtur Paszkiewicz write_data_len += write_unit_bytes - write_data_len % write_unit_bytes;
4883fc593b6SArtur Paszkiewicz }
4893fc593b6SArtur Paszkiewicz }
4903fc593b6SArtur Paszkiewicz
491f9d5ed28SSeth Howell if (!write_zeroes) {
49297455abeSDenis Barakhtanov initialize_buffer(&tx_buf, pattern, write_data_len, block_size);
49397455abeSDenis Barakhtanov initialize_buffer(&rx_buf, 0, data_length, block_size);
494f9d5ed28SSeth Howell
4953fc593b6SArtur Paszkiewicz blockdev_write(target, tx_buf, write_offset, write_data_len, iov_len);
496f9d5ed28SSeth Howell } else {
49797455abeSDenis Barakhtanov initialize_buffer(&tx_buf, 0, write_data_len, block_size);
49897455abeSDenis Barakhtanov initialize_buffer(&rx_buf, pattern, data_length, block_size);
499f9d5ed28SSeth Howell
5003fc593b6SArtur Paszkiewicz blockdev_write_zeroes(target, tx_buf, write_offset, write_data_len);
501f9d5ed28SSeth Howell }
502f9d5ed28SSeth Howell
503f9d5ed28SSeth Howell
504f9d5ed28SSeth Howell if (expected_rc == 0) {
505f9d5ed28SSeth Howell CU_ASSERT_EQUAL(g_completion_success, true);
506f9d5ed28SSeth Howell } else {
507f9d5ed28SSeth Howell CU_ASSERT_EQUAL(g_completion_success, false);
508f9d5ed28SSeth Howell }
509f9d5ed28SSeth Howell blockdev_read(target, rx_buf, offset, data_length, iov_len);
510f9d5ed28SSeth Howell
511f9d5ed28SSeth Howell if (expected_rc == 0) {
512f9d5ed28SSeth Howell CU_ASSERT_EQUAL(g_completion_success, true);
513f9d5ed28SSeth Howell } else {
514f9d5ed28SSeth Howell CU_ASSERT_EQUAL(g_completion_success, false);
515f9d5ed28SSeth Howell }
516f9d5ed28SSeth Howell
517f9d5ed28SSeth Howell if (g_completion_success) {
5183fc593b6SArtur Paszkiewicz rc = blockdev_write_read_data_match(rx_buf, tx_buf + (offset - write_offset), data_length);
519f9d5ed28SSeth Howell /* Assert the write by comparing it with values read
520f9d5ed28SSeth Howell * from each blockdev */
521f9d5ed28SSeth Howell CU_ASSERT_EQUAL(rc, 0);
522f9d5ed28SSeth Howell }
52355bc243fSJonas Pfefferle
52455bc243fSJonas Pfefferle spdk_free(rx_buf);
52555bc243fSJonas Pfefferle spdk_free(tx_buf);
526f9d5ed28SSeth Howell }
527f9d5ed28SSeth Howell
528f9d5ed28SSeth Howell static void
blockdev_compare_and_write(uint32_t data_length,uint32_t iov_len,uint64_t offset)529931ac757SMaciej Szwed blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
530931ac757SMaciej Szwed {
53197455abeSDenis Barakhtanov struct io_target *target = g_current_io_target;
53297455abeSDenis Barakhtanov struct spdk_bdev *bdev = target->bdev;
533931ac757SMaciej Szwed char *tx_buf = NULL;
534931ac757SMaciej Szwed char *write_buf = NULL;
535931ac757SMaciej Szwed char *rx_buf = NULL;
536931ac757SMaciej Szwed int rc;
53797455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
538931ac757SMaciej Szwed
53997455abeSDenis Barakhtanov initialize_buffer(&tx_buf, 0xAA, data_length, block_size);
54097455abeSDenis Barakhtanov initialize_buffer(&rx_buf, 0, data_length, block_size);
54197455abeSDenis Barakhtanov initialize_buffer(&write_buf, 0xBB, data_length, block_size);
542931ac757SMaciej Szwed
543931ac757SMaciej Szwed blockdev_write(target, tx_buf, offset, data_length, iov_len);
544931ac757SMaciej Szwed CU_ASSERT_EQUAL(g_completion_success, true);
545931ac757SMaciej Szwed
546931ac757SMaciej Szwed _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
547931ac757SMaciej Szwed CU_ASSERT_EQUAL(g_completion_success, true);
548931ac757SMaciej Szwed
549931ac757SMaciej Szwed _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
550931ac757SMaciej Szwed CU_ASSERT_EQUAL(g_completion_success, false);
551931ac757SMaciej Szwed
552931ac757SMaciej Szwed blockdev_read(target, rx_buf, offset, data_length, iov_len);
553931ac757SMaciej Szwed CU_ASSERT_EQUAL(g_completion_success, true);
554931ac757SMaciej Szwed rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
555931ac757SMaciej Szwed /* Assert the write by comparing it with values read
556931ac757SMaciej Szwed * from each blockdev */
557931ac757SMaciej Szwed CU_ASSERT_EQUAL(rc, 0);
55855bc243fSJonas Pfefferle
55955bc243fSJonas Pfefferle spdk_free(rx_buf);
56055bc243fSJonas Pfefferle spdk_free(tx_buf);
56155bc243fSJonas Pfefferle spdk_free(write_buf);
562931ac757SMaciej Szwed }
563931ac757SMaciej Szwed
564931ac757SMaciej Szwed static void
blockdev_write_read_block(void)565a6e02246SJonas Pfefferle blockdev_write_read_block(void)
566f9d5ed28SSeth Howell {
567f9d5ed28SSeth Howell uint32_t data_length;
568f9d5ed28SSeth Howell uint64_t offset;
569f9d5ed28SSeth Howell int pattern;
570f9d5ed28SSeth Howell int expected_rc;
571a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
572a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
57397455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
574f9d5ed28SSeth Howell
575a6e02246SJonas Pfefferle /* Data size = 1 block */
57697455abeSDenis Barakhtanov data_length = block_size;
577f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
578f9d5ed28SSeth Howell offset = 0;
579f9d5ed28SSeth Howell pattern = 0xA3;
580f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
581f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
582f9d5ed28SSeth Howell expected_rc = 0;
583f9d5ed28SSeth Howell
58497455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
585f9d5ed28SSeth Howell }
586f9d5ed28SSeth Howell
587f9d5ed28SSeth Howell static void
blockdev_write_zeroes_read_block(void)588a6e02246SJonas Pfefferle blockdev_write_zeroes_read_block(void)
589f9d5ed28SSeth Howell {
590f9d5ed28SSeth Howell uint32_t data_length;
591f9d5ed28SSeth Howell uint64_t offset;
592f9d5ed28SSeth Howell int pattern;
593f9d5ed28SSeth Howell int expected_rc;
594a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
595a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
59697455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
597f9d5ed28SSeth Howell
598a6e02246SJonas Pfefferle /* Data size = 1 block */
59997455abeSDenis Barakhtanov data_length = block_size;
600f9d5ed28SSeth Howell offset = 0;
601f9d5ed28SSeth Howell pattern = 0xA3;
602f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
603f9d5ed28SSeth Howell * of write_zeroes and read for all blockdevs is 0. */
604f9d5ed28SSeth Howell expected_rc = 0;
605f9d5ed28SSeth Howell
60697455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
607f9d5ed28SSeth Howell }
608f9d5ed28SSeth Howell
609f9d5ed28SSeth Howell /*
610f9d5ed28SSeth Howell * This i/o will not have to split at the bdev layer.
611f9d5ed28SSeth Howell */
612f9d5ed28SSeth Howell static void
blockdev_write_zeroes_read_no_split(void)61300647138SJim Harris blockdev_write_zeroes_read_no_split(void)
614f9d5ed28SSeth Howell {
615f9d5ed28SSeth Howell uint32_t data_length;
616f9d5ed28SSeth Howell uint64_t offset;
617f9d5ed28SSeth Howell int pattern;
618f9d5ed28SSeth Howell int expected_rc;
619a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
620a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
62197455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
622f9d5ed28SSeth Howell
623a6e02246SJonas Pfefferle /* Data size = block size aligned ZERO_BUFFER_SIZE */
62400647138SJim Harris data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */
62597455abeSDenis Barakhtanov data_length -= ZERO_BUFFER_SIZE % block_size;
626f9d5ed28SSeth Howell offset = 0;
627f9d5ed28SSeth Howell pattern = 0xA3;
628f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
629f9d5ed28SSeth Howell * of write_zeroes and read for all blockdevs is 0. */
630f9d5ed28SSeth Howell expected_rc = 0;
631f9d5ed28SSeth Howell
63297455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
633f9d5ed28SSeth Howell }
634f9d5ed28SSeth Howell
635f9d5ed28SSeth Howell /*
636f9d5ed28SSeth Howell * This i/o will have to split at the bdev layer if
637f9d5ed28SSeth Howell * write-zeroes is not supported by the bdev.
638f9d5ed28SSeth Howell */
639f9d5ed28SSeth Howell static void
blockdev_write_zeroes_read_split(void)64000647138SJim Harris blockdev_write_zeroes_read_split(void)
641f9d5ed28SSeth Howell {
642f9d5ed28SSeth Howell uint32_t data_length;
643f9d5ed28SSeth Howell uint64_t offset;
644f9d5ed28SSeth Howell int pattern;
645f9d5ed28SSeth Howell int expected_rc;
646a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
647a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
64897455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
649f9d5ed28SSeth Howell
650a6e02246SJonas Pfefferle /* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */
65100647138SJim Harris data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */
65297455abeSDenis Barakhtanov data_length -= data_length % block_size;
653f9d5ed28SSeth Howell offset = 0;
654f9d5ed28SSeth Howell pattern = 0xA3;
655f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
656f9d5ed28SSeth Howell * of write_zeroes and read for all blockdevs is 0. */
657f9d5ed28SSeth Howell expected_rc = 0;
658f9d5ed28SSeth Howell
65997455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
660f9d5ed28SSeth Howell }
661f9d5ed28SSeth Howell
662f9d5ed28SSeth Howell /*
663f9d5ed28SSeth Howell * This i/o will have to split at the bdev layer if
664f9d5ed28SSeth Howell * write-zeroes is not supported by the bdev. It also
665f9d5ed28SSeth Howell * tests a write size that is not an even multiple of
666f9d5ed28SSeth Howell * the bdev layer zero buffer size.
667f9d5ed28SSeth Howell */
668f9d5ed28SSeth Howell static void
blockdev_write_zeroes_read_split_partial(void)66900647138SJim Harris blockdev_write_zeroes_read_split_partial(void)
670f9d5ed28SSeth Howell {
671f9d5ed28SSeth Howell uint32_t data_length;
672f9d5ed28SSeth Howell uint64_t offset;
673f9d5ed28SSeth Howell int pattern;
674f9d5ed28SSeth Howell int expected_rc;
675a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
676a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
677a6e02246SJonas Pfefferle uint32_t block_size = spdk_bdev_get_block_size(bdev);
678f9d5ed28SSeth Howell
679a6e02246SJonas Pfefferle /* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */
68000647138SJim Harris data_length = ZERO_BUFFER_SIZE * 7 / 2;
681a6e02246SJonas Pfefferle data_length -= data_length % block_size;
682f9d5ed28SSeth Howell offset = 0;
683f9d5ed28SSeth Howell pattern = 0xA3;
684f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
685f9d5ed28SSeth Howell * of write_zeroes and read for all blockdevs is 0. */
686f9d5ed28SSeth Howell expected_rc = 0;
687f9d5ed28SSeth Howell
68897455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1, block_size);
689f9d5ed28SSeth Howell }
690f9d5ed28SSeth Howell
691f9d5ed28SSeth Howell static void
blockdev_writev_readv_block(void)692a6e02246SJonas Pfefferle blockdev_writev_readv_block(void)
693f9d5ed28SSeth Howell {
694f9d5ed28SSeth Howell uint32_t data_length, iov_len;
695f9d5ed28SSeth Howell uint64_t offset;
696f9d5ed28SSeth Howell int pattern;
697f9d5ed28SSeth Howell int expected_rc;
698a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
699a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
70097455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
701f9d5ed28SSeth Howell
702a6e02246SJonas Pfefferle /* Data size = 1 block */
70397455abeSDenis Barakhtanov data_length = block_size;
704a6e02246SJonas Pfefferle iov_len = data_length;
705f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
706f9d5ed28SSeth Howell offset = 0;
707f9d5ed28SSeth Howell pattern = 0xA3;
708f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
709f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
710f9d5ed28SSeth Howell expected_rc = 0;
711f9d5ed28SSeth Howell
71297455abeSDenis Barakhtanov blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
713f9d5ed28SSeth Howell }
714f9d5ed28SSeth Howell
715f9d5ed28SSeth Howell static void
blockdev_comparev_and_writev(void)716931ac757SMaciej Szwed blockdev_comparev_and_writev(void)
717931ac757SMaciej Szwed {
718931ac757SMaciej Szwed uint32_t data_length, iov_len;
719931ac757SMaciej Szwed uint64_t offset;
720a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
721a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
722931ac757SMaciej Szwed
723aaa1ddc3SJonas Pfefferle if (spdk_bdev_is_md_separate(bdev)) {
72445dc28b0SJim Harris /* TODO: remove this check once bdev layer properly supports
725aaa1ddc3SJonas Pfefferle * compare and write for bdevs with separate md.
72645dc28b0SJim Harris */
72745dc28b0SJim Harris SPDK_ERRLOG("skipping comparev_and_writev on bdev %s since it has\n"
728aaa1ddc3SJonas Pfefferle "separate metadata which is not supported yet.\n",
72945dc28b0SJim Harris spdk_bdev_get_name(bdev));
73045dc28b0SJim Harris return;
73145dc28b0SJim Harris }
73245dc28b0SJim Harris
733a6e02246SJonas Pfefferle /* Data size = acwu size */
734a6e02246SJonas Pfefferle data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev);
735a6e02246SJonas Pfefferle iov_len = data_length;
736931ac757SMaciej Szwed CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
737931ac757SMaciej Szwed offset = 0;
738931ac757SMaciej Szwed
739931ac757SMaciej Szwed blockdev_compare_and_write(data_length, iov_len, offset);
740931ac757SMaciej Szwed }
741931ac757SMaciej Szwed
742931ac757SMaciej Szwed static void
blockdev_writev_readv_30x1block(void)743a6e02246SJonas Pfefferle blockdev_writev_readv_30x1block(void)
744f9d5ed28SSeth Howell {
745f9d5ed28SSeth Howell uint32_t data_length, iov_len;
746f9d5ed28SSeth Howell uint64_t offset;
747f9d5ed28SSeth Howell int pattern;
748f9d5ed28SSeth Howell int expected_rc;
749a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
750a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
751a6e02246SJonas Pfefferle uint32_t block_size = spdk_bdev_get_block_size(bdev);
752f9d5ed28SSeth Howell
753a6e02246SJonas Pfefferle /* Data size = 30 * block size */
754a6e02246SJonas Pfefferle data_length = block_size * 30;
755a6e02246SJonas Pfefferle iov_len = block_size;
756f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
757f9d5ed28SSeth Howell offset = 0;
758f9d5ed28SSeth Howell pattern = 0xA3;
759f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
760f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
761f9d5ed28SSeth Howell expected_rc = 0;
762f9d5ed28SSeth Howell
76397455abeSDenis Barakhtanov blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
764f9d5ed28SSeth Howell }
765f9d5ed28SSeth Howell
766f9d5ed28SSeth Howell static void
blockdev_write_read_8blocks(void)767a6e02246SJonas Pfefferle blockdev_write_read_8blocks(void)
768f9d5ed28SSeth Howell {
769f9d5ed28SSeth Howell uint32_t data_length;
770f9d5ed28SSeth Howell uint64_t offset;
771f9d5ed28SSeth Howell int pattern;
772f9d5ed28SSeth Howell int expected_rc;
773a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
774a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
77597455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
776f9d5ed28SSeth Howell
777a6e02246SJonas Pfefferle /* Data size = 8 * block size */
77897455abeSDenis Barakhtanov data_length = block_size * 8;
779f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
780a6e02246SJonas Pfefferle offset = data_length;
781f9d5ed28SSeth Howell pattern = 0xA3;
782f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
783f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
784f9d5ed28SSeth Howell expected_rc = 0;
785f9d5ed28SSeth Howell
78697455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
787f9d5ed28SSeth Howell }
788f9d5ed28SSeth Howell
789f9d5ed28SSeth Howell static void
blockdev_writev_readv_8blocks(void)790a6e02246SJonas Pfefferle blockdev_writev_readv_8blocks(void)
791f9d5ed28SSeth Howell {
792f9d5ed28SSeth Howell uint32_t data_length, iov_len;
793f9d5ed28SSeth Howell uint64_t offset;
794f9d5ed28SSeth Howell int pattern;
795f9d5ed28SSeth Howell int expected_rc;
796a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
797a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
79897455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
79997455abeSDenis Barakhtanov
800f9d5ed28SSeth Howell
801a6e02246SJonas Pfefferle /* Data size = 8 * block size */
80297455abeSDenis Barakhtanov data_length = block_size * 8;
803a6e02246SJonas Pfefferle iov_len = data_length;
804f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
805a6e02246SJonas Pfefferle offset = data_length;
806f9d5ed28SSeth Howell pattern = 0xA3;
807f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
808f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
809f9d5ed28SSeth Howell expected_rc = 0;
810f9d5ed28SSeth Howell
81197455abeSDenis Barakhtanov blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
812f9d5ed28SSeth Howell }
813f9d5ed28SSeth Howell
814f9d5ed28SSeth Howell static void
blockdev_write_read_size_gt_128k(void)815f9d5ed28SSeth Howell blockdev_write_read_size_gt_128k(void)
816f9d5ed28SSeth Howell {
817f9d5ed28SSeth Howell uint32_t data_length;
818f9d5ed28SSeth Howell uint64_t offset;
819f9d5ed28SSeth Howell int pattern;
820f9d5ed28SSeth Howell int expected_rc;
821a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
822a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
823a6e02246SJonas Pfefferle uint32_t block_size = spdk_bdev_get_block_size(bdev);
824f9d5ed28SSeth Howell
825a6e02246SJonas Pfefferle /* Data size = block size aligned 128K + 1 block */
826a6e02246SJonas Pfefferle data_length = 128 * 1024;
827a6e02246SJonas Pfefferle data_length -= data_length % block_size;
828a6e02246SJonas Pfefferle data_length += block_size;
829f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
830a6e02246SJonas Pfefferle offset = block_size * 2;
831f9d5ed28SSeth Howell pattern = 0xA3;
832f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
833f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
834f9d5ed28SSeth Howell expected_rc = 0;
835f9d5ed28SSeth Howell
83697455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
837f9d5ed28SSeth Howell }
838f9d5ed28SSeth Howell
839f9d5ed28SSeth Howell static void
blockdev_writev_readv_size_gt_128k(void)840f9d5ed28SSeth Howell blockdev_writev_readv_size_gt_128k(void)
841f9d5ed28SSeth Howell {
842f9d5ed28SSeth Howell uint32_t data_length, iov_len;
843f9d5ed28SSeth Howell uint64_t offset;
844f9d5ed28SSeth Howell int pattern;
845f9d5ed28SSeth Howell int expected_rc;
846a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
847a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
848a6e02246SJonas Pfefferle uint32_t block_size = spdk_bdev_get_block_size(bdev);
849f9d5ed28SSeth Howell
850a6e02246SJonas Pfefferle /* Data size = block size aligned 128K + 1 block */
851a6e02246SJonas Pfefferle data_length = 128 * 1024;
852a6e02246SJonas Pfefferle data_length -= data_length % block_size;
853a6e02246SJonas Pfefferle data_length += block_size;
854a6e02246SJonas Pfefferle iov_len = data_length;
855f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
856a6e02246SJonas Pfefferle offset = block_size * 2;
857f9d5ed28SSeth Howell pattern = 0xA3;
858f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
859f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
860f9d5ed28SSeth Howell expected_rc = 0;
861f9d5ed28SSeth Howell
86297455abeSDenis Barakhtanov blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
863f9d5ed28SSeth Howell }
864f9d5ed28SSeth Howell
865f9d5ed28SSeth Howell static void
blockdev_writev_readv_size_gt_128k_two_iov(void)866f9d5ed28SSeth Howell blockdev_writev_readv_size_gt_128k_two_iov(void)
867f9d5ed28SSeth Howell {
868f9d5ed28SSeth Howell uint32_t data_length, iov_len;
869f9d5ed28SSeth Howell uint64_t offset;
870f9d5ed28SSeth Howell int pattern;
871f9d5ed28SSeth Howell int expected_rc;
872a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
873a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
874a6e02246SJonas Pfefferle uint32_t block_size = spdk_bdev_get_block_size(bdev);
875f9d5ed28SSeth Howell
876a6e02246SJonas Pfefferle /* Data size = block size aligned 128K + 1 block */
877a6e02246SJonas Pfefferle data_length = 128 * 1024;
878a6e02246SJonas Pfefferle data_length -= data_length % block_size;
879a6e02246SJonas Pfefferle iov_len = data_length;
880a6e02246SJonas Pfefferle data_length += block_size;
881f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
882a6e02246SJonas Pfefferle offset = block_size * 2;
883f9d5ed28SSeth Howell pattern = 0xA3;
884f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
885f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
886f9d5ed28SSeth Howell expected_rc = 0;
887f9d5ed28SSeth Howell
88897455abeSDenis Barakhtanov blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0, block_size);
889f9d5ed28SSeth Howell }
890f9d5ed28SSeth Howell
891f9d5ed28SSeth Howell static void
blockdev_write_read_invalid_size(void)892f9d5ed28SSeth Howell blockdev_write_read_invalid_size(void)
893f9d5ed28SSeth Howell {
894f9d5ed28SSeth Howell uint32_t data_length;
895f9d5ed28SSeth Howell uint64_t offset;
896f9d5ed28SSeth Howell int pattern;
897f9d5ed28SSeth Howell int expected_rc;
898a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
899a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
900a6e02246SJonas Pfefferle uint32_t block_size = spdk_bdev_get_block_size(bdev);
901f9d5ed28SSeth Howell
902f9d5ed28SSeth Howell /* Data size is not a multiple of the block size */
903a6e02246SJonas Pfefferle data_length = block_size - 1;
904f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
905a6e02246SJonas Pfefferle offset = block_size * 2;
906f9d5ed28SSeth Howell pattern = 0xA3;
907f9d5ed28SSeth Howell /* Params are invalid, hence the expected return value
908f9d5ed28SSeth Howell * of write and read for all blockdevs is < 0 */
909f9d5ed28SSeth Howell expected_rc = -1;
910f9d5ed28SSeth Howell
91197455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
912f9d5ed28SSeth Howell }
913f9d5ed28SSeth Howell
914f9d5ed28SSeth Howell static void
blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)915f9d5ed28SSeth Howell blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
916f9d5ed28SSeth Howell {
9172e73b5c7SArtur Paszkiewicz uint32_t data_length;
918f9d5ed28SSeth Howell uint64_t offset;
9192e73b5c7SArtur Paszkiewicz int pattern;
9202e73b5c7SArtur Paszkiewicz int expected_rc;
9212e73b5c7SArtur Paszkiewicz struct io_target *target = g_current_io_target;
9222e73b5c7SArtur Paszkiewicz struct spdk_bdev *bdev = target->bdev;
9232e73b5c7SArtur Paszkiewicz uint32_t block_size = spdk_bdev_get_block_size(bdev);
924f9d5ed28SSeth Howell
9252e73b5c7SArtur Paszkiewicz data_length = block_size;
9262e73b5c7SArtur Paszkiewicz CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
927f9d5ed28SSeth Howell /* The start offset has been set to a marginal value
928f9d5ed28SSeth Howell * such that offset + nbytes == Total size of
929f9d5ed28SSeth Howell * blockdev. */
930f9d5ed28SSeth Howell offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
9312e73b5c7SArtur Paszkiewicz pattern = 0xA3;
9322e73b5c7SArtur Paszkiewicz /* Params are valid, hence the expected return value
9332e73b5c7SArtur Paszkiewicz * of write and read for all blockdevs is 0. */
9342e73b5c7SArtur Paszkiewicz expected_rc = 0;
935f9d5ed28SSeth Howell
93697455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
937f9d5ed28SSeth Howell }
938f9d5ed28SSeth Howell
939f9d5ed28SSeth Howell static void
blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)940f9d5ed28SSeth Howell blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
941f9d5ed28SSeth Howell {
9422e73b5c7SArtur Paszkiewicz uint32_t data_length;
943f9d5ed28SSeth Howell uint64_t offset;
944f9d5ed28SSeth Howell int pattern;
9452e73b5c7SArtur Paszkiewicz int expected_rc;
9462e73b5c7SArtur Paszkiewicz struct io_target *target = g_current_io_target;
9472e73b5c7SArtur Paszkiewicz struct spdk_bdev *bdev = target->bdev;
948a6e02246SJonas Pfefferle uint32_t block_size = spdk_bdev_get_block_size(bdev);
949f9d5ed28SSeth Howell
950f9d5ed28SSeth Howell /* Tests the overflow condition of the blockdevs. */
951a6e02246SJonas Pfefferle data_length = block_size * 2;
952f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
953f9d5ed28SSeth Howell pattern = 0xA3;
954f9d5ed28SSeth Howell
955f9d5ed28SSeth Howell /* The start offset has been set to a valid value
956f9d5ed28SSeth Howell * but offset + nbytes is greater than the Total size
957f9d5ed28SSeth Howell * of the blockdev. The test should fail. */
958a6e02246SJonas Pfefferle offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size;
9592e73b5c7SArtur Paszkiewicz /* Params are invalid, hence the expected return value
9602e73b5c7SArtur Paszkiewicz * of write and read for all blockdevs is < 0 */
9612e73b5c7SArtur Paszkiewicz expected_rc = -1;
962f9d5ed28SSeth Howell
96397455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
964f9d5ed28SSeth Howell }
965f9d5ed28SSeth Howell
966f9d5ed28SSeth Howell static void
blockdev_write_read_max_offset(void)967f9d5ed28SSeth Howell blockdev_write_read_max_offset(void)
968f9d5ed28SSeth Howell {
969f9d5ed28SSeth Howell int data_length;
970f9d5ed28SSeth Howell uint64_t offset;
971f9d5ed28SSeth Howell int pattern;
972f9d5ed28SSeth Howell int expected_rc;
973a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
974a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
97597455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
976f9d5ed28SSeth Howell
97797455abeSDenis Barakhtanov data_length = block_size;
978f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
979f9d5ed28SSeth Howell /* The start offset has been set to UINT64_MAX such that
980f9d5ed28SSeth Howell * adding nbytes wraps around and points to an invalid address. */
981f9d5ed28SSeth Howell offset = UINT64_MAX;
982f9d5ed28SSeth Howell pattern = 0xA3;
983f9d5ed28SSeth Howell /* Params are invalid, hence the expected return value
984f9d5ed28SSeth Howell * of write and read for all blockdevs is < 0 */
985f9d5ed28SSeth Howell expected_rc = -1;
986f9d5ed28SSeth Howell
98797455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
988f9d5ed28SSeth Howell }
989f9d5ed28SSeth Howell
990f9d5ed28SSeth Howell static void
blockdev_overlapped_write_read_2blocks(void)991a6e02246SJonas Pfefferle blockdev_overlapped_write_read_2blocks(void)
992f9d5ed28SSeth Howell {
993f9d5ed28SSeth Howell int data_length;
994f9d5ed28SSeth Howell uint64_t offset;
995f9d5ed28SSeth Howell int pattern;
996f9d5ed28SSeth Howell int expected_rc;
997a6e02246SJonas Pfefferle struct io_target *target = g_current_io_target;
998a6e02246SJonas Pfefferle struct spdk_bdev *bdev = target->bdev;
99997455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
1000f9d5ed28SSeth Howell
1001a6e02246SJonas Pfefferle /* Data size = 2 blocks */
100297455abeSDenis Barakhtanov data_length = block_size * 2;
1003f9d5ed28SSeth Howell CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
1004f9d5ed28SSeth Howell offset = 0;
1005f9d5ed28SSeth Howell pattern = 0xA3;
1006f9d5ed28SSeth Howell /* Params are valid, hence the expected return value
1007f9d5ed28SSeth Howell * of write and read for all blockdevs is 0. */
1008f9d5ed28SSeth Howell expected_rc = 0;
1009f9d5ed28SSeth Howell /* Assert the write by comparing it with values read
1010f9d5ed28SSeth Howell * from the same offset for each blockdev */
101197455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
1012f9d5ed28SSeth Howell
1013a6e02246SJonas Pfefferle /* Overwrite the pattern 0xbb of size 2*block size on an address offset
1014a6e02246SJonas Pfefferle * overlapping with the address written above and assert the new value in
1015f9d5ed28SSeth Howell * the overlapped address range */
1016a6e02246SJonas Pfefferle /* Populate 2*block size with value 0xBB */
1017f9d5ed28SSeth Howell pattern = 0xBB;
1018a6e02246SJonas Pfefferle /* Offset = 1 block; Overlap offset addresses and write value 0xbb */
1019a6e02246SJonas Pfefferle offset = spdk_bdev_get_block_size(bdev);
1020f9d5ed28SSeth Howell /* Assert the write by comparing it with values read
1021f9d5ed28SSeth Howell * from the overlapped offset for each blockdev */
102297455abeSDenis Barakhtanov blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0, block_size);
1023f9d5ed28SSeth Howell }
1024f9d5ed28SSeth Howell
1025f9d5ed28SSeth Howell static void
__blockdev_reset(void * arg)1026f23e89c5SBen Walker __blockdev_reset(void *arg)
1027f9d5ed28SSeth Howell {
1028f23e89c5SBen Walker struct bdevio_request *req = arg;
1029f9d5ed28SSeth Howell struct io_target *target = req->target;
1030f9d5ed28SSeth Howell int rc;
1031f9d5ed28SSeth Howell
1032f9d5ed28SSeth Howell rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
1033f9d5ed28SSeth Howell if (rc < 0) {
1034f9d5ed28SSeth Howell g_completion_success = false;
1035f9d5ed28SSeth Howell wake_ut_thread();
1036f9d5ed28SSeth Howell }
1037f9d5ed28SSeth Howell }
1038f9d5ed28SSeth Howell
1039f9d5ed28SSeth Howell static void
blockdev_test_reset(void)104004f7575bSTomasz Zawadzki blockdev_test_reset(void)
1041f9d5ed28SSeth Howell {
1042f9d5ed28SSeth Howell struct bdevio_request req;
104304f7575bSTomasz Zawadzki struct io_target *target;
1044b37fe43dSAlexey Marchuk bool reset_supported;
1045f9d5ed28SSeth Howell
104604f7575bSTomasz Zawadzki target = g_current_io_target;
1047f9d5ed28SSeth Howell req.target = target;
1048f9d5ed28SSeth Howell
1049b37fe43dSAlexey Marchuk reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET);
1050f9d5ed28SSeth Howell g_completion_success = false;
1051f9d5ed28SSeth Howell
1052f23e89c5SBen Walker execute_spdk_function(__blockdev_reset, &req);
1053f9d5ed28SSeth Howell
1054b37fe43dSAlexey Marchuk CU_ASSERT_EQUAL(g_completion_success, reset_supported);
10554460761fSJim Harris }
1056f9d5ed28SSeth Howell
1057a529ff94SJim Harris struct bdevio_passthrough_request {
1058a529ff94SJim Harris struct spdk_nvme_cmd cmd;
1059a529ff94SJim Harris void *buf;
1060a529ff94SJim Harris uint32_t len;
1061a529ff94SJim Harris struct io_target *target;
1062a529ff94SJim Harris int sct;
1063a529ff94SJim Harris int sc;
106453b92a6cSMichael Haeuptle uint32_t cdw0;
1065a529ff94SJim Harris };
1066a529ff94SJim Harris
1067a529ff94SJim Harris static void
nvme_pt_test_complete(struct spdk_bdev_io * bdev_io,bool success,void * arg)1068a529ff94SJim Harris nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1069a529ff94SJim Harris {
1070a529ff94SJim Harris struct bdevio_passthrough_request *pt_req = arg;
1071a529ff94SJim Harris
107253b92a6cSMichael Haeuptle spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
1073a529ff94SJim Harris spdk_bdev_free_io(bdev_io);
1074a529ff94SJim Harris wake_ut_thread();
1075a529ff94SJim Harris }
1076a529ff94SJim Harris
1077a529ff94SJim Harris static void
__blockdev_nvme_passthru(void * arg)1078f23e89c5SBen Walker __blockdev_nvme_passthru(void *arg)
1079a529ff94SJim Harris {
1080f23e89c5SBen Walker struct bdevio_passthrough_request *pt_req = arg;
1081a529ff94SJim Harris struct io_target *target = pt_req->target;
1082a529ff94SJim Harris int rc;
1083a529ff94SJim Harris
1084a529ff94SJim Harris rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
1085a529ff94SJim Harris &pt_req->cmd, pt_req->buf, pt_req->len,
1086a529ff94SJim Harris nvme_pt_test_complete, pt_req);
1087a529ff94SJim Harris if (rc) {
1088a529ff94SJim Harris wake_ut_thread();
1089a529ff94SJim Harris }
1090a529ff94SJim Harris }
1091a529ff94SJim Harris
1092a529ff94SJim Harris static void
blockdev_test_nvme_passthru_rw(void)109304f7575bSTomasz Zawadzki blockdev_test_nvme_passthru_rw(void)
1094a529ff94SJim Harris {
1095a529ff94SJim Harris struct bdevio_passthrough_request pt_req;
1096a529ff94SJim Harris void *write_buf, *read_buf;
109704f7575bSTomasz Zawadzki struct io_target *target;
109804f7575bSTomasz Zawadzki
109904f7575bSTomasz Zawadzki target = g_current_io_target;
1100a529ff94SJim Harris
1101a529ff94SJim Harris if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1102a529ff94SJim Harris return;
1103a529ff94SJim Harris }
1104a529ff94SJim Harris
1105a529ff94SJim Harris memset(&pt_req, 0, sizeof(pt_req));
1106a529ff94SJim Harris pt_req.target = target;
1107a529ff94SJim Harris pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
1108a529ff94SJim Harris pt_req.cmd.nsid = 1;
1109a529ff94SJim Harris *(uint64_t *)&pt_req.cmd.cdw10 = 4;
1110a529ff94SJim Harris pt_req.cmd.cdw12 = 0;
1111a529ff94SJim Harris
1112a529ff94SJim Harris pt_req.len = spdk_bdev_get_block_size(target->bdev);
111395ab4194SDarek Stojaczyk write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1114a529ff94SJim Harris memset(write_buf, 0xA5, pt_req.len);
1115a529ff94SJim Harris pt_req.buf = write_buf;
1116a529ff94SJim Harris
1117a529ff94SJim Harris pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1118a529ff94SJim Harris pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1119f23e89c5SBen Walker execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1120a529ff94SJim Harris CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1121a529ff94SJim Harris CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1122a529ff94SJim Harris
1123a529ff94SJim Harris pt_req.cmd.opc = SPDK_NVME_OPC_READ;
112495ab4194SDarek Stojaczyk read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1125a529ff94SJim Harris pt_req.buf = read_buf;
1126a529ff94SJim Harris
1127a529ff94SJim Harris pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1128a529ff94SJim Harris pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
1129f23e89c5SBen Walker execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1130a529ff94SJim Harris CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1131a529ff94SJim Harris CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
1132a529ff94SJim Harris
1133a529ff94SJim Harris CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
113495ab4194SDarek Stojaczyk spdk_free(read_buf);
113595ab4194SDarek Stojaczyk spdk_free(write_buf);
1136a529ff94SJim Harris }
1137a529ff94SJim Harris
1138a529ff94SJim Harris static void
blockdev_test_nvme_passthru_vendor_specific(void)113904f7575bSTomasz Zawadzki blockdev_test_nvme_passthru_vendor_specific(void)
1140a529ff94SJim Harris {
114104f7575bSTomasz Zawadzki struct bdevio_passthrough_request pt_req;
1142a529ff94SJim Harris struct io_target *target;
1143a529ff94SJim Harris
11446a8d0094STomasz Zawadzki target = g_current_io_target;
1145460796a6SJim Harris
1146460796a6SJim Harris if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
1147460796a6SJim Harris return;
1148460796a6SJim Harris }
1149460796a6SJim Harris
1150460796a6SJim Harris memset(&pt_req, 0, sizeof(pt_req));
1151460796a6SJim Harris pt_req.target = target;
1152460796a6SJim Harris pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
1153460796a6SJim Harris pt_req.cmd.nsid = 1;
1154460796a6SJim Harris
1155460796a6SJim Harris pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
1156460796a6SJim Harris pt_req.sc = SPDK_NVME_SC_SUCCESS;
115753b92a6cSMichael Haeuptle pt_req.cdw0 = 0xbeef;
1158f23e89c5SBen Walker execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
1159460796a6SJim Harris CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
1160460796a6SJim Harris CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
116153b92a6cSMichael Haeuptle CU_ASSERT(pt_req.cdw0 == 0x0);
1162460796a6SJim Harris }
1163460796a6SJim Harris
1164460796a6SJim Harris static void
__blockdev_nvme_admin_passthru(void * arg)1165f23e89c5SBen Walker __blockdev_nvme_admin_passthru(void *arg)
116606680258STomasz Zawadzki {
1167f23e89c5SBen Walker struct bdevio_passthrough_request *pt_req = arg;
116806680258STomasz Zawadzki struct io_target *target = pt_req->target;
116906680258STomasz Zawadzki int rc;
117006680258STomasz Zawadzki
117106680258STomasz Zawadzki rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
117206680258STomasz Zawadzki &pt_req->cmd, pt_req->buf, pt_req->len,
117306680258STomasz Zawadzki nvme_pt_test_complete, pt_req);
117406680258STomasz Zawadzki if (rc) {
117506680258STomasz Zawadzki wake_ut_thread();
117606680258STomasz Zawadzki }
117706680258STomasz Zawadzki }
117806680258STomasz Zawadzki
117906680258STomasz Zawadzki static void
blockdev_test_nvme_admin_passthru(void)118006680258STomasz Zawadzki blockdev_test_nvme_admin_passthru(void)
118106680258STomasz Zawadzki {
118206680258STomasz Zawadzki struct io_target *target;
118306680258STomasz Zawadzki struct bdevio_passthrough_request pt_req;
118406680258STomasz Zawadzki
118506680258STomasz Zawadzki target = g_current_io_target;
118606680258STomasz Zawadzki
118706680258STomasz Zawadzki if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
118806680258STomasz Zawadzki return;
118906680258STomasz Zawadzki }
119006680258STomasz Zawadzki
119106680258STomasz Zawadzki memset(&pt_req, 0, sizeof(pt_req));
119206680258STomasz Zawadzki pt_req.target = target;
119306680258STomasz Zawadzki pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
119406680258STomasz Zawadzki pt_req.cmd.nsid = 0;
119506680258STomasz Zawadzki *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
119606680258STomasz Zawadzki
119706680258STomasz Zawadzki pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
119895ab4194SDarek Stojaczyk pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
119906680258STomasz Zawadzki
120006680258STomasz Zawadzki pt_req.sct = SPDK_NVME_SCT_GENERIC;
120106680258STomasz Zawadzki pt_req.sc = SPDK_NVME_SC_SUCCESS;
1202f23e89c5SBen Walker execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
120306680258STomasz Zawadzki CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
120406680258STomasz Zawadzki CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
120506680258STomasz Zawadzki }
120606680258STomasz Zawadzki
120706680258STomasz Zawadzki static void
blockdev_test_copy(void)120857b0fcc4SEvgeniy Kochetov blockdev_test_copy(void)
120957b0fcc4SEvgeniy Kochetov {
121057b0fcc4SEvgeniy Kochetov uint32_t data_length;
121157b0fcc4SEvgeniy Kochetov uint64_t src_offset, dst_offset;
121257b0fcc4SEvgeniy Kochetov struct io_target *target = g_current_io_target;
121357b0fcc4SEvgeniy Kochetov struct spdk_bdev *bdev = target->bdev;
121457b0fcc4SEvgeniy Kochetov char *tx_buf = NULL;
121557b0fcc4SEvgeniy Kochetov char *rx_buf = NULL;
121657b0fcc4SEvgeniy Kochetov int rc;
121797455abeSDenis Barakhtanov const uint32_t block_size = spdk_bdev_get_block_size(bdev);
121857b0fcc4SEvgeniy Kochetov
121957b0fcc4SEvgeniy Kochetov if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_COPY)) {
122057b0fcc4SEvgeniy Kochetov return;
122157b0fcc4SEvgeniy Kochetov }
122257b0fcc4SEvgeniy Kochetov
122397455abeSDenis Barakhtanov data_length = block_size;
122457b0fcc4SEvgeniy Kochetov CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
122557b0fcc4SEvgeniy Kochetov src_offset = 0;
122697455abeSDenis Barakhtanov dst_offset = block_size;
122757b0fcc4SEvgeniy Kochetov
122897455abeSDenis Barakhtanov initialize_buffer(&tx_buf, 0xAA, data_length, block_size);
122997455abeSDenis Barakhtanov initialize_buffer(&rx_buf, 0, data_length, block_size);
123057b0fcc4SEvgeniy Kochetov
123157b0fcc4SEvgeniy Kochetov blockdev_write(target, tx_buf, src_offset, data_length, data_length);
123257b0fcc4SEvgeniy Kochetov CU_ASSERT_EQUAL(g_completion_success, true);
123357b0fcc4SEvgeniy Kochetov
123457b0fcc4SEvgeniy Kochetov blockdev_copy(target, dst_offset, src_offset, data_length);
123557b0fcc4SEvgeniy Kochetov CU_ASSERT_EQUAL(g_completion_success, true);
123657b0fcc4SEvgeniy Kochetov
123757b0fcc4SEvgeniy Kochetov blockdev_read(target, rx_buf, dst_offset, data_length, data_length);
123857b0fcc4SEvgeniy Kochetov CU_ASSERT_EQUAL(g_completion_success, true);
123957b0fcc4SEvgeniy Kochetov
124057b0fcc4SEvgeniy Kochetov rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
124157b0fcc4SEvgeniy Kochetov CU_ASSERT_EQUAL(rc, 0);
124257b0fcc4SEvgeniy Kochetov }
124357b0fcc4SEvgeniy Kochetov
124457b0fcc4SEvgeniy Kochetov static void
__stop_init_thread(void * arg)1245f23e89c5SBen Walker __stop_init_thread(void *arg)
1246f9d5ed28SSeth Howell {
1247f23e89c5SBen Walker unsigned num_failures = g_num_failures;
1248f23e89c5SBen Walker struct spdk_jsonrpc_request *request = arg;
1249f23e89c5SBen Walker
1250f23e89c5SBen Walker g_num_failures = 0;
1251f9d5ed28SSeth Howell
1252f9d5ed28SSeth Howell bdevio_cleanup_targets();
125305752408SGangCao if (g_wait_for_tests && !g_shutdown) {
12547140c2f7STomasz Zawadzki /* Do not stop the app yet, wait for another RPC */
12557140c2f7STomasz Zawadzki rpc_perform_tests_cb(num_failures, request);
12567140c2f7STomasz Zawadzki return;
12577140c2f7STomasz Zawadzki }
1258be19fe4bSJim Harris assert(spdk_get_thread() == g_thread_init);
12591b1967bdSJim Harris assert(spdk_thread_is_app_thread(NULL));
1260be19fe4bSJim Harris execute_spdk_function(__exit_io_thread, NULL);
1261f9d5ed28SSeth Howell spdk_app_stop(num_failures);
1262f9d5ed28SSeth Howell }
1263f9d5ed28SSeth Howell
1264f9d5ed28SSeth Howell static void
stop_init_thread(unsigned num_failures,struct spdk_jsonrpc_request * request)12657140c2f7STomasz Zawadzki stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
1266f9d5ed28SSeth Howell {
1267f23e89c5SBen Walker g_num_failures = num_failures;
1268f9d5ed28SSeth Howell
1269f23e89c5SBen Walker spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
1270f9d5ed28SSeth Howell }
1271f9d5ed28SSeth Howell
12726a8d0094STomasz Zawadzki static int
suite_init(void)12736a8d0094STomasz Zawadzki suite_init(void)
1274f9d5ed28SSeth Howell {
12756a8d0094STomasz Zawadzki if (g_current_io_target == NULL) {
12766a8d0094STomasz Zawadzki g_current_io_target = g_io_targets;
12776a8d0094STomasz Zawadzki }
12786a8d0094STomasz Zawadzki return 0;
1279f9d5ed28SSeth Howell }
1280f9d5ed28SSeth Howell
12816a8d0094STomasz Zawadzki static int
suite_fini(void)12826a8d0094STomasz Zawadzki suite_fini(void)
12836a8d0094STomasz Zawadzki {
12846a8d0094STomasz Zawadzki g_current_io_target = g_current_io_target->next;
12856a8d0094STomasz Zawadzki return 0;
12866a8d0094STomasz Zawadzki }
12876a8d0094STomasz Zawadzki
12886a8d0094STomasz Zawadzki #define SUITE_NAME_MAX 64
12896a8d0094STomasz Zawadzki
12906a8d0094STomasz Zawadzki static int
__setup_ut_on_single_target(struct io_target * target)12916a8d0094STomasz Zawadzki __setup_ut_on_single_target(struct io_target *target)
12926a8d0094STomasz Zawadzki {
12936a8d0094STomasz Zawadzki unsigned rc = 0;
12946a8d0094STomasz Zawadzki CU_pSuite suite = NULL;
12956a8d0094STomasz Zawadzki char name[SUITE_NAME_MAX];
12966a8d0094STomasz Zawadzki
12976a8d0094STomasz Zawadzki snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
12986a8d0094STomasz Zawadzki suite = CU_add_suite(name, suite_init, suite_fini);
1299f9d5ed28SSeth Howell if (suite == NULL) {
1300f9d5ed28SSeth Howell CU_cleanup_registry();
13016a8d0094STomasz Zawadzki rc = CU_get_error();
13026a8d0094STomasz Zawadzki return -rc;
1303f9d5ed28SSeth Howell }
1304f9d5ed28SSeth Howell
1305f9d5ed28SSeth Howell if (
1306a6e02246SJonas Pfefferle CU_add_test(suite, "blockdev write read block",
1307a6e02246SJonas Pfefferle blockdev_write_read_block) == NULL
1308a6e02246SJonas Pfefferle || CU_add_test(suite, "blockdev write zeroes read block",
1309a6e02246SJonas Pfefferle blockdev_write_zeroes_read_block) == NULL
131000647138SJim Harris || CU_add_test(suite, "blockdev write zeroes read no split",
131100647138SJim Harris blockdev_write_zeroes_read_no_split) == NULL
1312a6e02246SJonas Pfefferle || CU_add_test(suite, "blockdev write zeroes read split",
1313a6e02246SJonas Pfefferle blockdev_write_zeroes_read_split) == NULL
131400647138SJim Harris || CU_add_test(suite, "blockdev write zeroes read split partial",
131500647138SJim Harris blockdev_write_zeroes_read_split_partial) == NULL
13160f1ce06dSJim Harris || CU_add_test(suite, "blockdev reset",
13170f1ce06dSJim Harris blockdev_test_reset) == NULL
1318a6e02246SJonas Pfefferle || CU_add_test(suite, "blockdev write read 8 blocks",
1319a6e02246SJonas Pfefferle blockdev_write_read_8blocks) == NULL
1320f9d5ed28SSeth Howell || CU_add_test(suite, "blockdev write read size > 128k",
1321f9d5ed28SSeth Howell blockdev_write_read_size_gt_128k) == NULL
1322f9d5ed28SSeth Howell || CU_add_test(suite, "blockdev write read invalid size",
1323f9d5ed28SSeth Howell blockdev_write_read_invalid_size) == NULL
1324f9d5ed28SSeth Howell || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
1325f9d5ed28SSeth Howell blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
1326f9d5ed28SSeth Howell || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
1327f9d5ed28SSeth Howell blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
1328f9d5ed28SSeth Howell || CU_add_test(suite, "blockdev write read max offset",
1329f9d5ed28SSeth Howell blockdev_write_read_max_offset) == NULL
1330a6e02246SJonas Pfefferle || CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset",
1331a6e02246SJonas Pfefferle blockdev_overlapped_write_read_2blocks) == NULL
1332a6e02246SJonas Pfefferle || CU_add_test(suite, "blockdev writev readv 8 blocks",
1333a6e02246SJonas Pfefferle blockdev_writev_readv_8blocks) == NULL
1334a6e02246SJonas Pfefferle || CU_add_test(suite, "blockdev writev readv 30 x 1block",
1335a6e02246SJonas Pfefferle blockdev_writev_readv_30x1block) == NULL
1336a6e02246SJonas Pfefferle || CU_add_test(suite, "blockdev writev readv block",
1337a6e02246SJonas Pfefferle blockdev_writev_readv_block) == NULL
1338f9d5ed28SSeth Howell || CU_add_test(suite, "blockdev writev readv size > 128k",
1339f9d5ed28SSeth Howell blockdev_writev_readv_size_gt_128k) == NULL
1340f9d5ed28SSeth Howell || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
1341f9d5ed28SSeth Howell blockdev_writev_readv_size_gt_128k_two_iov) == NULL
1342a6e02246SJonas Pfefferle || CU_add_test(suite, "blockdev comparev and writev",
1343a6e02246SJonas Pfefferle blockdev_comparev_and_writev) == NULL
1344a529ff94SJim Harris || CU_add_test(suite, "blockdev nvme passthru rw",
1345a529ff94SJim Harris blockdev_test_nvme_passthru_rw) == NULL
1346460796a6SJim Harris || CU_add_test(suite, "blockdev nvme passthru vendor specific",
1347460796a6SJim Harris blockdev_test_nvme_passthru_vendor_specific) == NULL
134806680258STomasz Zawadzki || CU_add_test(suite, "blockdev nvme admin passthru",
134906680258STomasz Zawadzki blockdev_test_nvme_admin_passthru) == NULL
135057b0fcc4SEvgeniy Kochetov || CU_add_test(suite, "blockdev copy",
135157b0fcc4SEvgeniy Kochetov blockdev_test_copy) == NULL
1352f9d5ed28SSeth Howell ) {
1353f9d5ed28SSeth Howell CU_cleanup_registry();
13546a8d0094STomasz Zawadzki rc = CU_get_error();
13556a8d0094STomasz Zawadzki return -rc;
13566a8d0094STomasz Zawadzki }
13576a8d0094STomasz Zawadzki return 0;
1358f9d5ed28SSeth Howell }
1359f9d5ed28SSeth Howell
13606a8d0094STomasz Zawadzki static void
__run_ut_thread(void * arg)1361f23e89c5SBen Walker __run_ut_thread(void *arg)
13626a8d0094STomasz Zawadzki {
1363f23e89c5SBen Walker struct spdk_jsonrpc_request *request = arg;
13646a8d0094STomasz Zawadzki int rc = 0;
13656a8d0094STomasz Zawadzki struct io_target *target;
13666a8d0094STomasz Zawadzki
13676a8d0094STomasz Zawadzki if (CU_initialize_registry() != CUE_SUCCESS) {
13686a8d0094STomasz Zawadzki /* CUnit error, probably won't recover */
13696a8d0094STomasz Zawadzki rc = CU_get_error();
1370be19fe4bSJim Harris rc = -rc;
1371be19fe4bSJim Harris goto ret;
13726a8d0094STomasz Zawadzki }
13736a8d0094STomasz Zawadzki
13746a8d0094STomasz Zawadzki target = g_io_targets;
13756a8d0094STomasz Zawadzki while (target != NULL) {
13766a8d0094STomasz Zawadzki rc = __setup_ut_on_single_target(target);
13776a8d0094STomasz Zawadzki if (rc < 0) {
13786a8d0094STomasz Zawadzki /* CUnit error, probably won't recover */
1379be19fe4bSJim Harris rc = -rc;
1380be19fe4bSJim Harris goto ret;
13816a8d0094STomasz Zawadzki }
13826a8d0094STomasz Zawadzki target = target->next;
13836a8d0094STomasz Zawadzki }
1384f9d5ed28SSeth Howell CU_basic_set_mode(CU_BRM_VERBOSE);
1385f9d5ed28SSeth Howell CU_basic_run_tests();
1386be19fe4bSJim Harris rc = CU_get_number_of_failures();
1387f9d5ed28SSeth Howell CU_cleanup_registry();
13886a8d0094STomasz Zawadzki
1389be19fe4bSJim Harris ret:
1390be19fe4bSJim Harris stop_init_thread(rc, request);
1391be19fe4bSJim Harris assert(spdk_get_thread() == g_thread_ut);
1392be19fe4bSJim Harris spdk_thread_exit(g_thread_ut);
1393f9d5ed28SSeth Howell }
1394f9d5ed28SSeth Howell
1395f9d5ed28SSeth Howell static void
__construct_targets(void * arg)1396f23e89c5SBen Walker __construct_targets(void *arg)
1397f23e89c5SBen Walker {
1398f23e89c5SBen Walker if (bdevio_construct_targets() < 0) {
1399f23e89c5SBen Walker spdk_app_stop(-1);
1400f23e89c5SBen Walker return;
1401f23e89c5SBen Walker }
1402f23e89c5SBen Walker
1403f23e89c5SBen Walker spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
1404f23e89c5SBen Walker }
1405f23e89c5SBen Walker
1406f23e89c5SBen Walker static void
test_main(void * arg1)1407deb8ee5cSBen Walker test_main(void *arg1)
1408f9d5ed28SSeth Howell {
1409276c31cdSChangpeng Liu struct spdk_cpuset tmpmask = {};
1410d108dcf5STomasz Zawadzki uint32_t i;
1411f9d5ed28SSeth Howell
1412f9d5ed28SSeth Howell pthread_mutex_init(&g_test_mutex, NULL);
1413f9d5ed28SSeth Howell pthread_cond_init(&g_test_cond, NULL);
1414f9d5ed28SSeth Howell
1415d108dcf5STomasz Zawadzki /* This test runs specifically on at least three cores.
1416d108dcf5STomasz Zawadzki * g_thread_init is the app_thread on main core from event framework.
1417d108dcf5STomasz Zawadzki * Next two are only for the tests and should always be on separate CPU cores. */
1418d108dcf5STomasz Zawadzki if (spdk_env_get_core_count() < 3) {
1419f23e89c5SBen Walker spdk_app_stop(-1);
1420f23e89c5SBen Walker return;
1421f23e89c5SBen Walker }
1422f23e89c5SBen Walker
1423d108dcf5STomasz Zawadzki SPDK_ENV_FOREACH_CORE(i) {
1424d108dcf5STomasz Zawadzki if (i == spdk_env_get_current_core()) {
1425f23e89c5SBen Walker g_thread_init = spdk_get_thread();
1426d108dcf5STomasz Zawadzki continue;
1427d108dcf5STomasz Zawadzki }
14280bc0463eSShuhei Matsumoto spdk_cpuset_zero(&tmpmask);
1429d108dcf5STomasz Zawadzki spdk_cpuset_set_cpu(&tmpmask, i, true);
1430d108dcf5STomasz Zawadzki if (g_thread_ut == NULL) {
14310bc0463eSShuhei Matsumoto g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
1432d108dcf5STomasz Zawadzki } else if (g_thread_io == NULL) {
14330bc0463eSShuhei Matsumoto g_thread_io = spdk_thread_create("io_thread", &tmpmask);
1434f23e89c5SBen Walker }
1435f23e89c5SBen Walker
1436f23e89c5SBen Walker }
1437f23e89c5SBen Walker
14387140c2f7STomasz Zawadzki if (g_wait_for_tests) {
14397140c2f7STomasz Zawadzki /* Do not perform any tests until RPC is received */
14407140c2f7STomasz Zawadzki return;
14417140c2f7STomasz Zawadzki }
14427140c2f7STomasz Zawadzki
1443f23e89c5SBen Walker spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
1444f9d5ed28SSeth Howell }
1445f9d5ed28SSeth Howell
1446f727997fSDariusz Stojaczyk static void
bdevio_usage(void)1447f727997fSDariusz Stojaczyk bdevio_usage(void)
1448f727997fSDariusz Stojaczyk {
14497140c2f7STomasz Zawadzki printf(" -w start bdevio app and wait for RPC to start the tests\n");
1450f727997fSDariusz Stojaczyk }
1451f727997fSDariusz Stojaczyk
145201e5610dSChunyang Hui static int
bdevio_parse_arg(int ch,char * arg)1453f727997fSDariusz Stojaczyk bdevio_parse_arg(int ch, char *arg)
1454f727997fSDariusz Stojaczyk {
14557140c2f7STomasz Zawadzki switch (ch) {
14567140c2f7STomasz Zawadzki case 'w':
14577140c2f7STomasz Zawadzki g_wait_for_tests = true;
14587140c2f7STomasz Zawadzki break;
14597140c2f7STomasz Zawadzki default:
14607140c2f7STomasz Zawadzki return -EINVAL;
14617140c2f7STomasz Zawadzki }
146201e5610dSChunyang Hui return 0;
1463f727997fSDariusz Stojaczyk }
1464f727997fSDariusz Stojaczyk
14657140c2f7STomasz Zawadzki struct rpc_perform_tests {
14667140c2f7STomasz Zawadzki char *name;
14677140c2f7STomasz Zawadzki };
14687140c2f7STomasz Zawadzki
14697140c2f7STomasz Zawadzki static void
free_rpc_perform_tests(struct rpc_perform_tests * r)14707140c2f7STomasz Zawadzki free_rpc_perform_tests(struct rpc_perform_tests *r)
14717140c2f7STomasz Zawadzki {
14727140c2f7STomasz Zawadzki free(r->name);
14737140c2f7STomasz Zawadzki }
14747140c2f7STomasz Zawadzki
14757140c2f7STomasz Zawadzki static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
14767140c2f7STomasz Zawadzki {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
14777140c2f7STomasz Zawadzki };
14787140c2f7STomasz Zawadzki
14797140c2f7STomasz Zawadzki static void
rpc_perform_tests_cb(unsigned num_failures,struct spdk_jsonrpc_request * request)14807140c2f7STomasz Zawadzki rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
14817140c2f7STomasz Zawadzki {
14827140c2f7STomasz Zawadzki struct spdk_json_write_ctx *w;
14837140c2f7STomasz Zawadzki
1484f87568a0STomasz Zawadzki if (num_failures == 0) {
14857140c2f7STomasz Zawadzki w = spdk_jsonrpc_begin_result(request);
14867140c2f7STomasz Zawadzki spdk_json_write_uint32(w, num_failures);
14877140c2f7STomasz Zawadzki spdk_jsonrpc_end_result(request, w);
1488f87568a0STomasz Zawadzki } else {
1489f87568a0STomasz Zawadzki spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1490f87568a0STomasz Zawadzki "%d test cases failed", num_failures);
1491f87568a0STomasz Zawadzki }
14927140c2f7STomasz Zawadzki }
14937140c2f7STomasz Zawadzki
14947140c2f7STomasz Zawadzki static void
rpc_perform_tests(struct spdk_jsonrpc_request * request,const struct spdk_json_val * params)14957140c2f7STomasz Zawadzki rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
14967140c2f7STomasz Zawadzki {
14977140c2f7STomasz Zawadzki struct rpc_perform_tests req = {NULL};
14987140c2f7STomasz Zawadzki int rc;
14997140c2f7STomasz Zawadzki
15007140c2f7STomasz Zawadzki if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
15017140c2f7STomasz Zawadzki SPDK_COUNTOF(rpc_perform_tests_decoders),
15027140c2f7STomasz Zawadzki &req)) {
15037140c2f7STomasz Zawadzki SPDK_ERRLOG("spdk_json_decode_object failed\n");
15047140c2f7STomasz Zawadzki spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
15057140c2f7STomasz Zawadzki goto invalid;
15067140c2f7STomasz Zawadzki }
15077140c2f7STomasz Zawadzki
15087140c2f7STomasz Zawadzki if (req.name) {
1509f91ba919SShuhei Matsumoto rc = bdevio_construct_target_by_name(req.name);
15107140c2f7STomasz Zawadzki if (rc < 0) {
1511f91ba919SShuhei Matsumoto SPDK_ERRLOG("Could not construct target for bdev '%s'\n", req.name);
15127140c2f7STomasz Zawadzki spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
15137140c2f7STomasz Zawadzki "Could not construct target for bdev '%s': %s",
1514f91ba919SShuhei Matsumoto req.name, spdk_strerror(-rc));
15157140c2f7STomasz Zawadzki goto invalid;
15167140c2f7STomasz Zawadzki }
15177140c2f7STomasz Zawadzki } else {
15187140c2f7STomasz Zawadzki rc = bdevio_construct_targets();
15197140c2f7STomasz Zawadzki if (rc < 0) {
15207140c2f7STomasz Zawadzki SPDK_ERRLOG("Could not construct targets for all bdevs\n");
15217140c2f7STomasz Zawadzki spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
15227140c2f7STomasz Zawadzki "Could not construct targets for all bdevs: %s",
15237140c2f7STomasz Zawadzki spdk_strerror(-rc));
15247140c2f7STomasz Zawadzki goto invalid;
15257140c2f7STomasz Zawadzki }
15267140c2f7STomasz Zawadzki }
15277140c2f7STomasz Zawadzki free_rpc_perform_tests(&req);
15287140c2f7STomasz Zawadzki
1529f23e89c5SBen Walker spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
15307140c2f7STomasz Zawadzki
15317140c2f7STomasz Zawadzki return;
15327140c2f7STomasz Zawadzki
15337140c2f7STomasz Zawadzki invalid:
15347140c2f7STomasz Zawadzki free_rpc_perform_tests(&req);
15357140c2f7STomasz Zawadzki }
15367140c2f7STomasz Zawadzki SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
15377140c2f7STomasz Zawadzki
153805752408SGangCao static void
spdk_bdevio_shutdown_cb(void)153905752408SGangCao spdk_bdevio_shutdown_cb(void)
154005752408SGangCao {
154105752408SGangCao g_shutdown = true;
154205752408SGangCao spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL);
154305752408SGangCao }
154405752408SGangCao
1545f9d5ed28SSeth Howell int
main(int argc,char ** argv)1546f9d5ed28SSeth Howell main(int argc, char **argv)
1547f9d5ed28SSeth Howell {
1548218fd85dSDariusz Stojaczyk int rc;
1549f9d5ed28SSeth Howell struct spdk_app_opts opts = {};
1550f9d5ed28SSeth Howell
155148701bd9SZiye Yang spdk_app_opts_init(&opts, sizeof(opts));
1552af46393eSJim Harris opts.name = "bdevio";
1553f727997fSDariusz Stojaczyk opts.reactor_mask = "0x7";
155405752408SGangCao opts.shutdown_cb = spdk_bdevio_shutdown_cb;
1555*2f3ac73dSKrzysztof Karas opts.rpc_addr = NULL;
1556f727997fSDariusz Stojaczyk
15577140c2f7STomasz Zawadzki if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
1558218fd85dSDariusz Stojaczyk bdevio_parse_arg, bdevio_usage)) !=
1559218fd85dSDariusz Stojaczyk SPDK_APP_PARSE_ARGS_SUCCESS) {
1560218fd85dSDariusz Stojaczyk return rc;
1561218fd85dSDariusz Stojaczyk }
1562f9d5ed28SSeth Howell
1563*2f3ac73dSKrzysztof Karas if (g_wait_for_tests && opts.rpc_addr == NULL) {
1564*2f3ac73dSKrzysztof Karas opts.rpc_addr = SPDK_DEFAULT_RPC_ADDR;
1565*2f3ac73dSKrzysztof Karas }
1566*2f3ac73dSKrzysztof Karas
156736287957SBen Walker rc = spdk_app_start(&opts, test_main, NULL);
1568f9d5ed28SSeth Howell spdk_app_fini();
1569f9d5ed28SSeth Howell
1570218fd85dSDariusz Stojaczyk return rc;
1571f9d5ed28SSeth Howell }
1572