xref: /spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.c (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/env.h"
36 #include "spdk/json.h"
37 #include "spdk/event.h"
38 #include "spdk/likely.h"
39 #include "spdk/util.h"
40 #include "spdk/string.h"
41 #include "spdk_internal/virtio.h"
42 #include "spdk_internal/vhost_user.h"
43 
44 #include "fuzz_common.h"
45 #include "vhost_fuzz.h"
46 
47 #include <linux/virtio_blk.h>
48 #include <linux/virtio_scsi.h>
49 
50 /* Features desired/implemented by virtio blk. */
51 #define VIRTIO_BLK_DEV_SUPPORTED_FEATURES		\
52 	(1ULL << VIRTIO_BLK_F_BLK_SIZE		|	\
53 	 1ULL << VIRTIO_BLK_F_TOPOLOGY		|	\
54 	 1ULL << VIRTIO_BLK_F_MQ		|	\
55 	 1ULL << VIRTIO_BLK_F_RO		|	\
56 	 1ULL << VIRTIO_BLK_F_DISCARD		|	\
57 	 1ULL << VIRTIO_RING_F_EVENT_IDX	|	\
58 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
59 
60 /* Features desired/implemented by virtio scsi. */
61 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES		\
62 	(1ULL << VIRTIO_SCSI_F_INOUT		|	\
63 	 1ULL << VIRTIO_SCSI_F_HOTPLUG		|	\
64 	 1ULL << VIRTIO_RING_F_EVENT_IDX	|	\
65 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
66 
67 #define VIRTIO_DEV_FIXED_QUEUES	2
68 #define VIRTIO_SCSI_CONTROLQ	0
69 #define VIRTIO_SCSI_EVENTQ	1
70 #define VIRTIO_REQUESTQ		2
71 #define FUZZ_MAX_QUEUES		3
72 
73 #define FUZZ_QUEUE_DEPTH	128
74 
75 #define BLK_IO_NAME		"vhost_blk_cmd"
76 #define SCSI_IO_NAME		"vhost_scsi_cmd"
77 #define SCSI_MGMT_NAME		"vhost_scsi_mgmt_cmd"
78 
79 struct fuzz_vhost_iov_ctx {
80 	struct iovec			iov_req;
81 	struct iovec			iov_data;
82 	struct iovec			iov_resp;
83 };
84 
85 struct fuzz_vhost_io_ctx {
86 	struct fuzz_vhost_iov_ctx		iovs;
87 	union {
88 		struct virtio_blk_outhdr	blk_req;
89 		struct virtio_scsi_cmd_req	scsi_req;
90 		struct virtio_scsi_ctrl_tmf_req	scsi_tmf_req;
91 	} req;
92 	union {
93 		uint8_t					blk_resp;
94 		struct virtio_scsi_cmd_resp		scsi_resp;
95 		union {
96 			struct virtio_scsi_ctrl_tmf_resp	scsi_tmf_resp;
97 			struct virtio_scsi_ctrl_an_resp		an_resp;
98 		} scsi_tmf_resp;
99 	} resp;
100 
101 	TAILQ_ENTRY(fuzz_vhost_io_ctx) link;
102 };
103 
104 struct fuzz_vhost_dev_ctx {
105 	struct virtio_dev			virtio_dev;
106 	struct spdk_thread			*thread;
107 	struct spdk_poller			*poller;
108 
109 	struct fuzz_vhost_io_ctx		*io_ctx_array;
110 	TAILQ_HEAD(, fuzz_vhost_io_ctx)		free_io_ctx;
111 	TAILQ_HEAD(, fuzz_vhost_io_ctx)		outstanding_io_ctx;
112 
113 	unsigned int				random_seed;
114 
115 	uint64_t				submitted_io;
116 	uint64_t				completed_io;
117 	uint64_t				successful_io;
118 	uint64_t				timeout_tsc;
119 
120 	bool					socket_is_blk;
121 	bool					test_scsi_tmf;
122 	bool					valid_lun;
123 	bool					use_bogus_buffer;
124 	bool					use_valid_buffer;
125 	bool					timed_out;
126 
127 	TAILQ_ENTRY(fuzz_vhost_dev_ctx)	link;
128 };
129 
130 /* Global run state */
131 uint64_t				g_runtime_ticks;
132 int					g_runtime;
133 int					g_num_active_threads;
134 bool					g_run = true;
135 bool					g_verbose_mode = false;
136 
137 /* Global resources */
138 TAILQ_HEAD(, fuzz_vhost_dev_ctx)	g_dev_list = TAILQ_HEAD_INITIALIZER(g_dev_list);
139 struct spdk_poller			*g_run_poller;
140 void					*g_valid_buffer;
141 unsigned int				g_random_seed;
142 
143 
144 /* Global parameters and resources for parsed commands */
145 bool					g_keep_iov_pointers = false;
146 char					*g_json_file = NULL;
147 struct fuzz_vhost_io_ctx		*g_blk_cmd_array = NULL;
148 struct fuzz_vhost_io_ctx		*g_scsi_cmd_array = NULL;
149 struct fuzz_vhost_io_ctx		*g_scsi_mgmt_cmd_array = NULL;
150 
151 size_t					g_blk_cmd_array_size;
152 size_t					g_scsi_cmd_array_size;
153 size_t					g_scsi_mgmt_cmd_array_size;
154 
155 static void
156 cleanup(void)
157 {
158 	struct fuzz_vhost_dev_ctx *dev_ctx, *tmp;
159 	printf("Fuzzing completed.\n");
160 	TAILQ_FOREACH_SAFE(dev_ctx, &g_dev_list, link, tmp) {
161 		printf("device %p stats: Completed I/O: %lu, Successful I/O: %lu\n", dev_ctx,
162 		       dev_ctx->completed_io, dev_ctx->successful_io);
163 		virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_REQUESTQ);
164 		if (!dev_ctx->socket_is_blk) {
165 			virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_SCSI_EVENTQ);
166 			virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_SCSI_CONTROLQ);
167 		}
168 		virtio_dev_stop(&dev_ctx->virtio_dev);
169 		virtio_dev_destruct(&dev_ctx->virtio_dev);
170 		if (dev_ctx->io_ctx_array) {
171 			spdk_free(dev_ctx->io_ctx_array);
172 		}
173 		free(dev_ctx);
174 	}
175 
176 	spdk_free(g_valid_buffer);
177 
178 	if (g_blk_cmd_array) {
179 		free(g_blk_cmd_array);
180 	}
181 	if (g_scsi_cmd_array) {
182 		free(g_scsi_cmd_array);
183 	}
184 	if (g_scsi_mgmt_cmd_array) {
185 		free(g_scsi_mgmt_cmd_array);
186 	}
187 }
188 
189 /* Get a memory address that is random and not located in our hugepage memory. */
190 static void *
191 get_invalid_mem_address(uint64_t length)
192 {
193 	uint64_t chosen_address = 0x0;
194 
195 	while (true) {
196 		chosen_address = rand();
197 		chosen_address = (chosen_address << 32) | rand();
198 		if (spdk_vtophys((void *)chosen_address, &length) == SPDK_VTOPHYS_ERROR) {
199 			return (void *)chosen_address;
200 		}
201 	}
202 	return NULL;
203 }
204 
205 /* dev initialization code begin. */
206 static int
207 virtio_dev_init(struct virtio_dev *vdev, const char *socket_path, uint64_t flags,
208 		uint16_t max_queues)
209 {
210 	int rc;
211 
212 	rc = virtio_user_dev_init(vdev, "dev_ctx", socket_path, 1024);
213 	if (rc != 0) {
214 		fprintf(stderr, "Failed to initialize virtual bdev\n");
215 		return rc;
216 	}
217 
218 	rc = virtio_dev_reset(vdev, flags);
219 	if (rc != 0) {
220 		return rc;
221 	}
222 
223 	rc = virtio_dev_start(vdev, max_queues, VIRTIO_DEV_FIXED_QUEUES);
224 	if (rc != 0) {
225 		return rc;
226 	}
227 
228 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_REQUESTQ);
229 	if (rc < 0) {
230 		fprintf(stderr, "Couldn't get an unused queue for the io_channel.\n");
231 		virtio_dev_stop(vdev);
232 		return rc;
233 	}
234 	return 0;
235 }
236 
237 static int
238 blk_dev_init(struct virtio_dev *vdev, const char *socket_path, uint16_t max_queues)
239 {
240 	uint16_t host_max_queues;
241 	int rc;
242 
243 	if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_MQ)) {
244 		rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, num_queues),
245 						&host_max_queues, sizeof(host_max_queues));
246 		if (rc) {
247 			fprintf(stderr, "%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
248 			return rc;
249 		}
250 	} else {
251 		host_max_queues = 1;
252 	}
253 
254 	if (max_queues == 0) {
255 		fprintf(stderr, "%s: requested 0 request queues (%"PRIu16" available).\n",
256 			vdev->name, host_max_queues);
257 		return -EINVAL;
258 	}
259 
260 	if (max_queues > host_max_queues) {
261 		fprintf(stderr, "%s: requested %"PRIu16" request queues "
262 			"but only %"PRIu16" available.\n",
263 			vdev->name, max_queues, host_max_queues);
264 		max_queues = host_max_queues;
265 	}
266 
267 	return virtio_dev_init(vdev, socket_path, VIRTIO_BLK_DEV_SUPPORTED_FEATURES, max_queues);
268 }
269 
270 static int
271 scsi_dev_init(struct virtio_dev *vdev, const char *socket_path, uint16_t max_queues)
272 {
273 	int rc;
274 
275 	rc = virtio_dev_init(vdev, socket_path, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES, max_queues);
276 	if (rc != 0) {
277 		return rc;
278 	}
279 
280 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ);
281 	if (rc != 0) {
282 		SPDK_ERRLOG("Failed to acquire the controlq.\n");
283 		return rc;
284 	}
285 
286 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ);
287 	if (rc != 0) {
288 		SPDK_ERRLOG("Failed to acquire the eventq.\n");
289 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
290 		return rc;
291 	}
292 
293 	return 0;
294 }
295 
296 int
297 fuzz_vhost_dev_init(const char *socket_path, bool is_blk_dev, bool use_bogus_buffer,
298 		    bool use_valid_buffer, bool valid_lun, bool test_scsi_tmf)
299 {
300 	struct fuzz_vhost_dev_ctx *dev_ctx;
301 	int rc = 0, i;
302 
303 	dev_ctx = calloc(1, sizeof(*dev_ctx));
304 	if (dev_ctx == NULL) {
305 		return -ENOMEM;
306 	}
307 
308 	dev_ctx->socket_is_blk = is_blk_dev;
309 	dev_ctx->use_bogus_buffer = use_bogus_buffer;
310 	dev_ctx->use_valid_buffer = use_valid_buffer;
311 	dev_ctx->valid_lun = valid_lun;
312 	dev_ctx->test_scsi_tmf = test_scsi_tmf;
313 
314 	TAILQ_INIT(&dev_ctx->free_io_ctx);
315 	TAILQ_INIT(&dev_ctx->outstanding_io_ctx);
316 
317 	assert(sizeof(*dev_ctx->io_ctx_array) <= UINT64_MAX / FUZZ_QUEUE_DEPTH);
318 	dev_ctx->io_ctx_array = spdk_malloc(sizeof(*dev_ctx->io_ctx_array) * FUZZ_QUEUE_DEPTH, 0x0, NULL,
319 					    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
320 	if (dev_ctx->io_ctx_array == NULL) {
321 		free(dev_ctx);
322 		return -ENOMEM;
323 	}
324 
325 	for (i = 0; i < FUZZ_QUEUE_DEPTH; i++) {
326 		TAILQ_INSERT_HEAD(&dev_ctx->free_io_ctx, &dev_ctx->io_ctx_array[i], link);
327 	}
328 
329 	dev_ctx->thread = spdk_thread_create(NULL, NULL);
330 	if (dev_ctx->thread == NULL) {
331 		fprintf(stderr, "Unable to allocate a thread for a fuzz device.\n");
332 		rc = -ENOMEM;
333 		goto error_out;
334 	}
335 
336 	if (is_blk_dev) {
337 		rc = blk_dev_init(&dev_ctx->virtio_dev, socket_path, FUZZ_MAX_QUEUES);
338 	} else {
339 		rc = scsi_dev_init(&dev_ctx->virtio_dev, socket_path, FUZZ_MAX_QUEUES);
340 	}
341 
342 	if (rc) {
343 		fprintf(stderr, "Unable to prepare the device to perform I/O.\n");
344 		goto error_out;
345 	}
346 
347 	TAILQ_INSERT_TAIL(&g_dev_list, dev_ctx, link);
348 	return 0;
349 
350 error_out:
351 	spdk_free(dev_ctx->io_ctx_array);
352 	free(dev_ctx);
353 	return rc;
354 }
355 /* dev initialization code end */
356 
357 /* data dumping functions begin */
358 static int
359 dump_virtio_cmd(void *ctx, const void *data, size_t size)
360 {
361 	fprintf(stderr, "%s\n", (const char *)data);
362 	return 0;
363 }
364 
365 static void
366 print_blk_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
367 {
368 	spdk_json_write_named_uint32(w, "type", io_ctx->req.blk_req.type);
369 	spdk_json_write_named_uint32(w, "ioprio", io_ctx->req.blk_req.ioprio);
370 	spdk_json_write_named_uint64(w, "sector", io_ctx->req.blk_req.sector);
371 }
372 
373 static void
374 print_scsi_tmf_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
375 {
376 	char *lun_data;
377 
378 	lun_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_tmf_req.lun,
379 			sizeof(io_ctx->req.scsi_tmf_req.lun));
380 
381 	spdk_json_write_named_uint32(w, "type", io_ctx->req.scsi_tmf_req.type);
382 	spdk_json_write_named_uint32(w, "subtype", io_ctx->req.scsi_tmf_req.subtype);
383 	spdk_json_write_named_string(w, "lun", lun_data);
384 	spdk_json_write_named_uint64(w, "tag", io_ctx->req.scsi_tmf_req.tag);
385 
386 	free(lun_data);
387 }
388 
389 static void
390 print_scsi_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
391 {
392 	char *lun_data;
393 	char *cdb_data;
394 
395 	lun_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_req.lun,
396 			sizeof(io_ctx->req.scsi_req.lun));
397 	cdb_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_req.cdb,
398 			sizeof(io_ctx->req.scsi_req.cdb));
399 
400 	spdk_json_write_named_string(w, "lun", lun_data);
401 	spdk_json_write_named_uint64(w, "tag", io_ctx->req.scsi_req.tag);
402 	spdk_json_write_named_uint32(w, "task_attr", io_ctx->req.scsi_req.task_attr);
403 	spdk_json_write_named_uint32(w, "prio", io_ctx->req.scsi_req.prio);
404 	spdk_json_write_named_uint32(w, "crn", io_ctx->req.scsi_req.crn);
405 	spdk_json_write_named_string(w, "cdb", cdb_data);
406 
407 	free(lun_data);
408 	free(cdb_data);
409 }
410 
411 static void
412 print_iov_obj(struct spdk_json_write_ctx *w, const char *iov_name, struct iovec *iov)
413 {
414 	/* "0x" + up to 16 digits + null terminator */
415 	char hex_addr[19];
416 	int rc;
417 
418 	rc = snprintf(hex_addr, 19, "%lx", (uintptr_t)iov->iov_base);
419 
420 	/* default to 0. */
421 	if (rc < 0 || rc >= 19) {
422 		hex_addr[0] = '0';
423 		hex_addr[1] = '\0';
424 	}
425 
426 	spdk_json_write_named_object_begin(w, iov_name);
427 	spdk_json_write_named_string(w, "iov_base", hex_addr);
428 	spdk_json_write_named_uint64(w, "iov_len", iov->iov_len);
429 	spdk_json_write_object_end(w);
430 }
431 
432 static void
433 print_iovs(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
434 {
435 	print_iov_obj(w, "req_iov", &io_ctx->iovs.iov_req);
436 	print_iov_obj(w, "data_iov", &io_ctx->iovs.iov_data);
437 	print_iov_obj(w, "resp_iov", &io_ctx->iovs.iov_resp);
438 }
439 
440 static void
441 print_req_obj(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
442 {
443 
444 	struct spdk_json_write_ctx *w;
445 
446 	w = spdk_json_write_begin(dump_virtio_cmd, NULL, SPDK_JSON_WRITE_FLAG_FORMATTED);
447 
448 	if (dev_ctx->socket_is_blk) {
449 		spdk_json_write_named_object_begin(w, BLK_IO_NAME);
450 		print_iovs(w, io_ctx);
451 		print_blk_io_data(w, io_ctx);
452 	} else if (dev_ctx->test_scsi_tmf) {
453 		spdk_json_write_named_object_begin(w, SCSI_MGMT_NAME);
454 		print_iovs(w, io_ctx);
455 		print_scsi_tmf_io_data(w, io_ctx);
456 	} else {
457 		spdk_json_write_named_object_begin(w, SCSI_IO_NAME);
458 		print_iovs(w, io_ctx);
459 		print_scsi_io_data(w, io_ctx);
460 	}
461 	spdk_json_write_object_end(w);
462 	spdk_json_write_end(w);
463 }
464 
465 static void
466 dump_outstanding_io(struct fuzz_vhost_dev_ctx *dev_ctx)
467 {
468 	struct fuzz_vhost_io_ctx *io_ctx, *tmp;
469 
470 	TAILQ_FOREACH_SAFE(io_ctx, &dev_ctx->outstanding_io_ctx, link, tmp) {
471 		print_req_obj(dev_ctx, io_ctx);
472 		TAILQ_REMOVE(&dev_ctx->outstanding_io_ctx, io_ctx, link);
473 		TAILQ_INSERT_TAIL(&dev_ctx->free_io_ctx, io_ctx, link);
474 	}
475 }
476 /* data dumping functions end */
477 
478 /* data parsing functions begin */
479 static int
480 hex_value(uint8_t c)
481 {
482 #define V(x, y) [x] = y + 1
483 	static const int8_t val[256] = {
484 		V('0', 0), V('1', 1), V('2', 2), V('3', 3), V('4', 4),
485 		V('5', 5), V('6', 6), V('7', 7), V('8', 8), V('9', 9),
486 		V('A', 0xA), V('B', 0xB), V('C', 0xC), V('D', 0xD), V('E', 0xE), V('F', 0xF),
487 		V('a', 0xA), V('b', 0xB), V('c', 0xC), V('d', 0xD), V('e', 0xE), V('f', 0xF),
488 	};
489 #undef V
490 
491 	return val[c] - 1;
492 }
493 
494 static int
495 fuzz_json_decode_hex_uint64(const struct spdk_json_val *val, void *out)
496 {
497 	uint64_t *out_val = out;
498 	size_t i;
499 	char *val_pointer = val->start;
500 	int current_val;
501 
502 	if (val->len > 16) {
503 		return -EINVAL;
504 	}
505 
506 	*out_val = 0;
507 	for (i = 0; i < val->len; i++) {
508 		*out_val = *out_val << 4;
509 		current_val = hex_value(*val_pointer);
510 		if (current_val < 0) {
511 			return -EINVAL;
512 		}
513 		*out_val += current_val;
514 		val_pointer++;
515 	}
516 
517 	return 0;
518 }
519 
520 static const struct spdk_json_object_decoder fuzz_vhost_iov_decoders[] = {
521 	{"iov_base", offsetof(struct iovec, iov_base), fuzz_json_decode_hex_uint64},
522 	{"iov_len", offsetof(struct iovec, iov_len), spdk_json_decode_uint64},
523 };
524 
525 static size_t
526 parse_iov_struct(struct iovec *iovec, struct spdk_json_val *value)
527 {
528 	int rc;
529 
530 	if (value->type != SPDK_JSON_VAL_OBJECT_BEGIN) {
531 		return -1;
532 	}
533 
534 	rc = spdk_json_decode_object(value,
535 				     fuzz_vhost_iov_decoders,
536 				     SPDK_COUNTOF(fuzz_vhost_iov_decoders),
537 				     iovec);
538 	if (rc) {
539 		return -1;
540 	}
541 
542 	while (value->type != SPDK_JSON_VAL_OBJECT_END) {
543 		value++;
544 		rc++;
545 	}
546 
547 	/* The +1 instructs the calling function to skip over the OBJECT_END function. */
548 	rc += 1;
549 	return rc;
550 }
551 
552 static bool
553 parse_vhost_blk_cmds(void *item, struct spdk_json_val *value, size_t num_values)
554 {
555 	struct fuzz_vhost_io_ctx *io_ctx = item;
556 	struct spdk_json_val *prev_value;
557 	int nested_object_size;
558 	uint64_t tmp_val;
559 	size_t i = 0;
560 
561 	while (i < num_values) {
562 		nested_object_size = 1;
563 		if (value->type == SPDK_JSON_VAL_NAME) {
564 			prev_value = value;
565 			value++;
566 			i++;
567 			if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
568 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
569 			} else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
570 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
571 			} else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
572 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
573 			} else if (!strncmp(prev_value->start, "type", prev_value->len)) {
574 				if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
575 					nested_object_size = -1;
576 				} else {
577 					io_ctx->req.blk_req.type = tmp_val;
578 				}
579 			} else if (!strncmp(prev_value->start, "ioprio", prev_value->len)) {
580 				if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
581 					nested_object_size = -1;
582 				} else {
583 					io_ctx->req.blk_req.ioprio = tmp_val;
584 				}
585 			} else if (!strncmp(prev_value->start, "sector", prev_value->len)) {
586 				if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
587 					nested_object_size = -1;
588 				} else {
589 					io_ctx->req.blk_req.sector = tmp_val;
590 				}
591 			}
592 		}
593 		if (nested_object_size < 0) {
594 			fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
595 				(char *)prev_value->start, value->len, (char *)value->start);
596 			return false;
597 		}
598 		value += nested_object_size;
599 		i += nested_object_size;
600 	}
601 	return true;
602 }
603 
604 static bool
605 parse_vhost_scsi_cmds(void *item, struct spdk_json_val *value, size_t num_values)
606 {
607 	struct fuzz_vhost_io_ctx *io_ctx = item;
608 	struct spdk_json_val *prev_value;
609 	int nested_object_size;
610 	uint64_t tmp_val;
611 	size_t i = 0;
612 
613 	while (i < num_values) {
614 		nested_object_size = 1;
615 		if (value->type == SPDK_JSON_VAL_NAME) {
616 			prev_value = value;
617 			value++;
618 			i++;
619 			if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
620 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
621 			} else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
622 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
623 			} else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
624 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
625 			} else if (!strncmp(prev_value->start, "lun", prev_value->len)) {
626 				if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_req.lun,
627 								  sizeof(io_ctx->req.scsi_req.lun),
628 								  (char *)value->start,
629 								  value->len)) {
630 					nested_object_size = -1;
631 				}
632 			} else if (!strncmp(prev_value->start, "tag", prev_value->len)) {
633 				if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
634 					nested_object_size = -1;
635 				} else {
636 					io_ctx->req.scsi_req.tag = tmp_val;
637 				}
638 			} else if (!strncmp(prev_value->start, "task_attr", prev_value->len)) {
639 				if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
640 					nested_object_size = -1;
641 				} else {
642 					io_ctx->req.scsi_req.task_attr = tmp_val;
643 				}
644 			} else if (!strncmp(prev_value->start, "prio", prev_value->len)) {
645 				if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
646 					nested_object_size = -1;
647 				} else {
648 					io_ctx->req.scsi_req.prio = tmp_val;
649 				}
650 			} else if (!strncmp(prev_value->start, "crn", prev_value->len)) {
651 				if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
652 					nested_object_size = -1;
653 				} else {
654 					io_ctx->req.scsi_req.crn = tmp_val;
655 				}
656 			} else if (!strncmp(prev_value->start, "cdb", prev_value->len)) {
657 				if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_req.cdb,
658 								  sizeof(io_ctx->req.scsi_req.cdb),
659 								  (char *)value->start,
660 								  value->len)) {
661 					nested_object_size = -1;
662 				}
663 			}
664 		}
665 		if (nested_object_size < 0) {
666 			fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
667 				(char *)prev_value->start, value->len, (char *)value->start);
668 			return false;
669 		}
670 		value += nested_object_size;
671 		i += nested_object_size;
672 	}
673 	return true;
674 
675 }
676 
677 static bool
678 parse_vhost_scsi_mgmt_cmds(void *item, struct spdk_json_val *value, size_t num_values)
679 {
680 	struct fuzz_vhost_io_ctx *io_ctx = item;
681 	struct spdk_json_val *prev_value;
682 	int nested_object_size;
683 	uint64_t tmp_val;
684 	size_t i = 0;
685 
686 	while (i < num_values) {
687 		nested_object_size = 1;
688 		if (value->type == SPDK_JSON_VAL_NAME) {
689 			prev_value = value;
690 			value++;
691 			i++;
692 			if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
693 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
694 			} else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
695 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
696 			} else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
697 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
698 			} else if (!strncmp(prev_value->start, "type", prev_value->len)) {
699 				if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
700 					nested_object_size = -1;
701 				} else {
702 					io_ctx->req.scsi_tmf_req.type = tmp_val;
703 				}
704 			} else if (!strncmp(prev_value->start, "subtype", prev_value->len)) {
705 				if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
706 					nested_object_size = -1;
707 				} else {
708 					io_ctx->req.scsi_tmf_req.subtype = tmp_val;
709 				}
710 			}  else if (!strncmp(prev_value->start, "lun", prev_value->len)) {
711 				if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_tmf_req.lun,
712 								  sizeof(io_ctx->req.scsi_tmf_req.lun),
713 								  (char *)value->start,
714 								  value->len)) {
715 					nested_object_size = -1;
716 				}
717 			} else if (!strncmp(prev_value->start, "tag", prev_value->len)) {
718 				if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
719 					nested_object_size = -1;
720 				} else {
721 					io_ctx->req.scsi_tmf_req.tag = tmp_val;
722 				}
723 			}
724 		}
725 		if (nested_object_size < 0) {
726 			fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
727 				(char *)prev_value->start, value->len, (char *)value->start);
728 			return false;
729 		}
730 		value += nested_object_size;
731 		i += nested_object_size;
732 	}
733 	return true;
734 }
735 /* data parsing functions end */
736 
737 /* build requests begin */
738 static void
739 craft_io_from_array(struct fuzz_vhost_io_ctx *src_ctx, struct fuzz_vhost_io_ctx *dest_ctx)
740 {
741 	if (g_keep_iov_pointers) {
742 		dest_ctx->iovs = src_ctx->iovs;
743 	}
744 	dest_ctx->req = src_ctx->req;
745 }
746 
747 static void
748 craft_virtio_scsi_req(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
749 {
750 	io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.scsi_req);
751 	io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.scsi_resp);
752 	fuzz_fill_random_bytes((char *)&io_ctx->req.scsi_req, sizeof(io_ctx->req.scsi_req),
753 			       &dev_ctx->random_seed);
754 	/* TODO: set up the logic to find all luns on the target. Right now we are just assuming the first is OK. */
755 	if (dev_ctx->valid_lun) {
756 		io_ctx->req.scsi_req.lun[0] = 1;
757 		io_ctx->req.scsi_req.lun[1] = 0;
758 	}
759 }
760 
761 static void
762 craft_virtio_scsi_tmf_req(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
763 {
764 	io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.scsi_tmf_req);
765 	io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.scsi_tmf_resp);
766 	fuzz_fill_random_bytes((char *)&io_ctx->req.scsi_tmf_req, sizeof(io_ctx->req.scsi_tmf_req),
767 			       &dev_ctx->random_seed);
768 	/* TODO: set up the logic to find all luns on the target. Right now we are just assuming the first is OK. */
769 	if (dev_ctx->valid_lun) {
770 		io_ctx->req.scsi_tmf_req.lun[0] = 1;
771 		io_ctx->req.scsi_tmf_req.lun[1] = 0;
772 	}
773 
774 	/* Valid controlqueue commands have to be of type 0, 1, or 2. Any others just return immediately from the target. */
775 	/* Try to only test the opcodes that will exercise extra paths in the target side. But allow for at least one invalid value. */
776 	io_ctx->req.scsi_tmf_req.type = rand() % 4;
777 }
778 
779 static void
780 craft_virtio_blk_req(struct fuzz_vhost_io_ctx *io_ctx)
781 {
782 	io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.blk_req);
783 	io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.blk_resp);
784 	io_ctx->req.blk_req.type = rand();
785 	io_ctx->req.blk_req.sector = rand();
786 }
787 
788 static void
789 craft_virtio_req_rsp_pair(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
790 {
791 	struct fuzz_vhost_iov_ctx *iovs = &io_ctx->iovs;
792 
793 	/*
794 	 * Always set these buffer values up front.
795 	 * If the user wants to override this with the json values,
796 	 * they can specify -k when starting the app. */
797 	iovs->iov_req.iov_base = &io_ctx->req;
798 	if (dev_ctx->use_bogus_buffer) {
799 		iovs->iov_data.iov_len = rand();
800 		iovs->iov_data.iov_base = get_invalid_mem_address(iovs->iov_data.iov_len);
801 	} else if (dev_ctx->use_valid_buffer) {
802 		iovs->iov_data.iov_len = 1024;
803 		iovs->iov_data.iov_base = g_valid_buffer;
804 	}
805 	iovs->iov_resp.iov_base = &io_ctx->resp;
806 
807 	if (dev_ctx->socket_is_blk && g_blk_cmd_array) {
808 		craft_io_from_array(&g_blk_cmd_array[dev_ctx->submitted_io], io_ctx);
809 		return;
810 	} else if (dev_ctx->test_scsi_tmf && g_scsi_mgmt_cmd_array) {
811 		craft_io_from_array(&g_scsi_mgmt_cmd_array[dev_ctx->submitted_io], io_ctx);
812 		return;
813 	} else if (g_scsi_cmd_array) {
814 		craft_io_from_array(&g_scsi_cmd_array[dev_ctx->submitted_io], io_ctx);
815 		return;
816 	}
817 
818 	if (dev_ctx->socket_is_blk) {
819 		craft_virtio_blk_req(io_ctx);
820 	} else if (dev_ctx->test_scsi_tmf) {
821 		craft_virtio_scsi_tmf_req(dev_ctx, io_ctx);
822 	} else {
823 		craft_virtio_scsi_req(dev_ctx, io_ctx);
824 	}
825 }
826 /* build requests end */
827 
828 /* submit requests begin */
829 static uint64_t
830 get_max_num_io(struct fuzz_vhost_dev_ctx *dev_ctx)
831 {
832 	if (dev_ctx->socket_is_blk) {
833 		return g_blk_cmd_array_size;
834 	} else if (dev_ctx->test_scsi_tmf) {
835 		return g_scsi_mgmt_cmd_array_size;
836 	} else {
837 		return g_scsi_cmd_array_size;
838 	}
839 }
840 
841 static int
842 submit_virtio_req_rsp_pair(struct fuzz_vhost_dev_ctx *dev_ctx, struct virtqueue *vq,
843 			   struct fuzz_vhost_io_ctx *io_ctx)
844 {
845 	struct fuzz_vhost_iov_ctx *iovs = &io_ctx->iovs;
846 	int num_iovs = 2, rc;
847 
848 	num_iovs += dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer ? 1 : 0;
849 
850 	rc = virtqueue_req_start(vq, io_ctx, num_iovs);
851 	if (rc) {
852 		return rc;
853 	}
854 	virtqueue_req_add_iovs(vq, &iovs->iov_req, 1, SPDK_VIRTIO_DESC_RO);
855 	/* blk and scsi requests favor different orders for the iov objects. */
856 	if (dev_ctx->socket_is_blk) {
857 		if (dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer) {
858 			virtqueue_req_add_iovs(vq, &iovs->iov_data, 1, SPDK_VIRTIO_DESC_WR);
859 		}
860 		virtqueue_req_add_iovs(vq, &iovs->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
861 	} else {
862 		virtqueue_req_add_iovs(vq, &iovs->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
863 		if (dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer) {
864 			virtqueue_req_add_iovs(vq, &iovs->iov_data, 1, SPDK_VIRTIO_DESC_WR);
865 		}
866 	}
867 	virtqueue_req_flush(vq);
868 	return 0;
869 }
870 
871 static void
872 dev_submit_requests(struct fuzz_vhost_dev_ctx *dev_ctx, struct virtqueue *vq,
873 		    uint64_t max_io_to_submit)
874 {
875 	struct fuzz_vhost_io_ctx *io_ctx;
876 	int rc;
877 
878 	while (!TAILQ_EMPTY(&dev_ctx->free_io_ctx) && dev_ctx->submitted_io < max_io_to_submit) {
879 		io_ctx = TAILQ_FIRST(&dev_ctx->free_io_ctx);
880 		craft_virtio_req_rsp_pair(dev_ctx, io_ctx);
881 		rc = submit_virtio_req_rsp_pair(dev_ctx, vq, io_ctx);
882 		if (rc == 0) {
883 			TAILQ_REMOVE(&dev_ctx->free_io_ctx, io_ctx, link);
884 			TAILQ_INSERT_TAIL(&dev_ctx->outstanding_io_ctx, io_ctx, link);
885 			dev_ctx->submitted_io++;
886 		} else if (rc == -ENOMEM) {
887 			/* There are just not enough available buffers right now. try later. */
888 			return;
889 		} else if (rc == -EINVAL) {
890 			/* The virtqueue must be broken. We know we can fit at least three descriptors */
891 			fprintf(stderr, "One of the virtqueues for dev %p is broken. stopping all devices.\n", dev_ctx);
892 			g_run = 0;
893 		}
894 	}
895 }
896 /* submit requests end */
897 
898 /* complete requests begin */
899 static void
900 check_successful_op(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
901 {
902 	bool is_successful = false;
903 
904 	if (dev_ctx->socket_is_blk) {
905 		if (io_ctx->resp.blk_resp == 0) {
906 			is_successful = true;
907 		}
908 	} else if (dev_ctx->test_scsi_tmf) {
909 		if (io_ctx->resp.scsi_tmf_resp.scsi_tmf_resp.response == 0 &&
910 		    io_ctx->resp.scsi_tmf_resp.an_resp.response == 0) {
911 			is_successful = true;
912 		}
913 	} else {
914 		if (io_ctx->resp.scsi_resp.status == 0) {
915 			is_successful = true;
916 		}
917 	}
918 
919 	if (is_successful) {
920 		fprintf(stderr, "An I/O completed without an error status. This could be worth looking into.\n");
921 		fprintf(stderr,
922 			"There is also a good chance that the target just failed before setting a status.\n");
923 		dev_ctx->successful_io++;
924 		print_req_obj(dev_ctx, io_ctx);
925 	} else if (g_verbose_mode) {
926 		fprintf(stderr, "The following I/O failed as expected.\n");
927 		print_req_obj(dev_ctx, io_ctx);
928 	}
929 }
930 
931 static void
932 complete_io(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
933 {
934 	TAILQ_REMOVE(&dev_ctx->outstanding_io_ctx, io_ctx, link);
935 	TAILQ_INSERT_HEAD(&dev_ctx->free_io_ctx, io_ctx, link);
936 	check_successful_op(dev_ctx, io_ctx);
937 	dev_ctx->completed_io++;
938 	dev_ctx->timeout_tsc = fuzz_refresh_timeout();
939 }
940 
941 static int
942 poll_dev(void *ctx)
943 {
944 	struct fuzz_vhost_dev_ctx *dev_ctx = ctx;
945 	struct virtqueue *vq;
946 	struct fuzz_vhost_io_ctx *io_ctx[FUZZ_QUEUE_DEPTH];
947 	int num_active_threads;
948 	uint64_t max_io_to_complete = UINT64_MAX;
949 	uint64_t current_ticks;
950 	uint32_t len[FUZZ_QUEUE_DEPTH];
951 	uint16_t num_cpl, i;
952 
953 	if (g_json_file) {
954 		max_io_to_complete = get_max_num_io(dev_ctx);
955 	}
956 
957 	if (!dev_ctx->socket_is_blk && dev_ctx->test_scsi_tmf) {
958 		vq = dev_ctx->virtio_dev.vqs[VIRTIO_SCSI_CONTROLQ];
959 	} else {
960 		vq = dev_ctx->virtio_dev.vqs[VIRTIO_REQUESTQ];
961 	}
962 
963 	num_cpl = virtio_recv_pkts(vq, (void **)io_ctx, len, FUZZ_QUEUE_DEPTH);
964 
965 	for (i = 0; i < num_cpl; i++) {
966 		complete_io(dev_ctx, io_ctx[i]);
967 	}
968 
969 	current_ticks = spdk_get_ticks();
970 
971 	if (current_ticks > dev_ctx->timeout_tsc) {
972 		dev_ctx->timed_out = true;
973 		g_run = false;
974 		fprintf(stderr, "The VQ on device %p timed out. Dumping contents now.\n", dev_ctx);
975 		dump_outstanding_io(dev_ctx);
976 	}
977 
978 	if (current_ticks > g_runtime_ticks) {
979 		g_run = 0;
980 	}
981 
982 	if (!g_run || dev_ctx->completed_io >= max_io_to_complete) {
983 		if (TAILQ_EMPTY(&dev_ctx->outstanding_io_ctx)) {
984 			spdk_poller_unregister(&dev_ctx->poller);
985 			num_active_threads = __sync_sub_and_fetch(&g_num_active_threads, 1);
986 			if (num_active_threads == 0) {
987 				g_run = 0;
988 			}
989 			spdk_thread_exit(dev_ctx->thread);
990 		}
991 		return 0;
992 	}
993 
994 	dev_submit_requests(dev_ctx, vq, max_io_to_complete);
995 	return 0;
996 }
997 /* complete requests end */
998 
999 static void
1000 start_io(void *ctx)
1001 {
1002 	struct fuzz_vhost_dev_ctx *dev_ctx = ctx;
1003 
1004 	if (g_random_seed) {
1005 		dev_ctx->random_seed = g_random_seed;
1006 	} else {
1007 		dev_ctx->random_seed = spdk_get_ticks();
1008 	}
1009 
1010 	dev_ctx->timeout_tsc = fuzz_refresh_timeout();
1011 
1012 	dev_ctx->poller = SPDK_POLLER_REGISTER(poll_dev, dev_ctx, 0);
1013 	if (dev_ctx->poller == NULL) {
1014 		return;
1015 	}
1016 
1017 }
1018 
1019 static int
1020 end_fuzz(void *ctx)
1021 {
1022 	if (!g_run && !g_num_active_threads) {
1023 		spdk_poller_unregister(&g_run_poller);
1024 		cleanup();
1025 		spdk_app_stop(0);
1026 	}
1027 	return 0;
1028 }
1029 
1030 static void
1031 begin_fuzz(void *ctx)
1032 {
1033 	struct fuzz_vhost_dev_ctx *dev_ctx;
1034 
1035 	g_runtime_ticks = spdk_get_ticks() + spdk_get_ticks_hz() * g_runtime;
1036 
1037 	g_valid_buffer = spdk_malloc(0x1000, 0x200, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
1038 	if (g_valid_buffer == NULL) {
1039 		fprintf(stderr, "Failed to allocate a valid buffer for I/O\n");
1040 		goto out;
1041 	}
1042 
1043 	g_run_poller = SPDK_POLLER_REGISTER(end_fuzz, NULL, 0);
1044 	if (g_run_poller == NULL) {
1045 		fprintf(stderr, "Failed to register a poller for test completion checking.\n");
1046 	}
1047 
1048 	TAILQ_FOREACH(dev_ctx, &g_dev_list, link) {
1049 		assert(dev_ctx->thread != NULL);
1050 		spdk_thread_send_msg(dev_ctx->thread, start_io, dev_ctx);
1051 		__sync_add_and_fetch(&g_num_active_threads, 1);
1052 	}
1053 
1054 	return;
1055 out:
1056 	cleanup();
1057 	spdk_app_stop(0);
1058 }
1059 
1060 static void
1061 fuzz_vhost_usage(void)
1062 {
1063 	fprintf(stderr, " -j <path>                 Path to a json file containing named objects.\n");
1064 	fprintf(stderr,
1065 		" -k                        Keep the iov pointer addresses from the json file. only valid with -j.\n");
1066 	fprintf(stderr, " -S <integer>              Seed value for test.\n");
1067 	fprintf(stderr, " -t <integer>              Time in seconds to run the fuzz test.\n");
1068 	fprintf(stderr, " -V                        Enable logging of each submitted command.\n");
1069 }
1070 
1071 static int
1072 fuzz_vhost_parse(int ch, char *arg)
1073 {
1074 	int64_t error_test;
1075 
1076 	switch (ch) {
1077 	case 'j':
1078 		g_json_file = optarg;
1079 		break;
1080 	case 'k':
1081 		g_keep_iov_pointers = true;
1082 		break;
1083 	case 'S':
1084 		error_test = spdk_strtol(arg, 10);
1085 		if (error_test < 0) {
1086 			fprintf(stderr, "Invalid value supplied for the random seed.\n");
1087 			return -1;
1088 		} else {
1089 			g_random_seed = spdk_strtol(arg, 10);
1090 		}
1091 		break;
1092 	case 't':
1093 		g_runtime = spdk_strtol(arg, 10);
1094 		if (g_runtime < 0 || g_runtime > MAX_RUNTIME_S) {
1095 			fprintf(stderr, "You must supply a positive runtime value less than 86401.\n");
1096 			return -1;
1097 		}
1098 		break;
1099 	case 'V':
1100 		g_verbose_mode = true;
1101 		break;
1102 	case '?':
1103 	default:
1104 		return -EINVAL;
1105 	}
1106 	return 0;
1107 }
1108 
1109 int
1110 main(int argc, char **argv)
1111 {
1112 	struct spdk_app_opts opts = {};
1113 	int rc;
1114 
1115 	spdk_app_opts_init(&opts, sizeof(opts));
1116 	opts.name = "vhost_fuzz";
1117 	g_runtime = DEFAULT_RUNTIME;
1118 
1119 	rc = spdk_app_parse_args(argc, argv, &opts, "j:kS:t:V", NULL, fuzz_vhost_parse, fuzz_vhost_usage);
1120 	if (rc != SPDK_APP_PARSE_ARGS_SUCCESS) {
1121 		fprintf(stderr, "Unable to parse the application arguments.\n");
1122 		return -1;
1123 	}
1124 
1125 	if (g_json_file != NULL) {
1126 		g_blk_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
1127 				       (void **)&g_blk_cmd_array,
1128 				       sizeof(struct fuzz_vhost_io_ctx),
1129 				       BLK_IO_NAME, parse_vhost_blk_cmds);
1130 		g_scsi_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
1131 					(void **)&g_scsi_cmd_array,
1132 					sizeof(struct fuzz_vhost_io_ctx),
1133 					SCSI_IO_NAME, parse_vhost_scsi_cmds);
1134 		g_scsi_mgmt_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
1135 					     (void **)&g_scsi_mgmt_cmd_array,
1136 					     sizeof(struct fuzz_vhost_io_ctx),
1137 					     SCSI_IO_NAME, parse_vhost_scsi_mgmt_cmds);
1138 		if (g_blk_cmd_array_size == 0 && g_scsi_cmd_array_size == 0 && g_scsi_mgmt_cmd_array_size == 0) {
1139 			fprintf(stderr, "The provided json file did not contain any valid commands. Exiting.\n");
1140 			return -EINVAL;
1141 		}
1142 	}
1143 
1144 	spdk_app_start(&opts, begin_fuzz, NULL);
1145 }
1146