xref: /spdk/test/app/fuzz/vhost_fuzz/vhost_fuzz.c (revision 5db859da082485cbb7a8429d13818da4131da63a)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk/env.h"
8 #include "spdk/json.h"
9 #include "spdk/event.h"
10 #include "spdk/likely.h"
11 #include "spdk/util.h"
12 #include "spdk/string.h"
13 #include "spdk_internal/virtio.h"
14 #include "spdk_internal/vhost_user.h"
15 
16 #include "fuzz_common.h"
17 #include "vhost_fuzz.h"
18 
19 #include <linux/virtio_blk.h>
20 #include <linux/virtio_scsi.h>
21 
22 /* Features desired/implemented by virtio blk. */
23 #define VIRTIO_BLK_DEV_SUPPORTED_FEATURES		\
24 	(1ULL << VIRTIO_BLK_F_BLK_SIZE		|	\
25 	 1ULL << VIRTIO_BLK_F_TOPOLOGY		|	\
26 	 1ULL << VIRTIO_BLK_F_MQ		|	\
27 	 1ULL << VIRTIO_BLK_F_RO		|	\
28 	 1ULL << VIRTIO_BLK_F_DISCARD		|	\
29 	 1ULL << VIRTIO_RING_F_EVENT_IDX	|	\
30 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
31 
32 /* Features desired/implemented by virtio scsi. */
33 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES		\
34 	(1ULL << VIRTIO_SCSI_F_INOUT		|	\
35 	 1ULL << VIRTIO_SCSI_F_HOTPLUG		|	\
36 	 1ULL << VIRTIO_RING_F_EVENT_IDX	|	\
37 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
38 
39 #define VIRTIO_DEV_FIXED_QUEUES	2
40 #define VIRTIO_SCSI_CONTROLQ	0
41 #define VIRTIO_SCSI_EVENTQ	1
42 #define VIRTIO_REQUESTQ		2
43 #define FUZZ_MAX_QUEUES		3
44 
45 #define FUZZ_QUEUE_DEPTH	128
46 
47 #define BLK_IO_NAME		"vhost_blk_cmd"
48 #define SCSI_IO_NAME		"vhost_scsi_cmd"
49 #define SCSI_MGMT_NAME		"vhost_scsi_mgmt_cmd"
50 
51 struct fuzz_vhost_iov_ctx {
52 	struct iovec			iov_req;
53 	struct iovec			iov_data;
54 	struct iovec			iov_resp;
55 };
56 
57 struct fuzz_vhost_io_ctx {
58 	struct fuzz_vhost_iov_ctx		iovs;
59 	union {
60 		struct virtio_blk_outhdr	blk_req;
61 		struct virtio_scsi_cmd_req	scsi_req;
62 		struct virtio_scsi_ctrl_tmf_req	scsi_tmf_req;
63 	} req;
64 	union {
65 		uint8_t					blk_resp;
66 		struct virtio_scsi_cmd_resp		scsi_resp;
67 		union {
68 			struct virtio_scsi_ctrl_tmf_resp	scsi_tmf_resp;
69 			struct virtio_scsi_ctrl_an_resp		an_resp;
70 		} scsi_tmf_resp;
71 	} resp;
72 
73 	TAILQ_ENTRY(fuzz_vhost_io_ctx) link;
74 };
75 
76 struct fuzz_vhost_dev_ctx {
77 	struct virtio_dev			virtio_dev;
78 	struct spdk_thread			*thread;
79 	struct spdk_poller			*poller;
80 
81 	struct fuzz_vhost_io_ctx		*io_ctx_array;
82 	TAILQ_HEAD(, fuzz_vhost_io_ctx)		free_io_ctx;
83 	TAILQ_HEAD(, fuzz_vhost_io_ctx)		outstanding_io_ctx;
84 
85 	unsigned int				random_seed;
86 
87 	uint64_t				submitted_io;
88 	uint64_t				completed_io;
89 	uint64_t				successful_io;
90 	uint64_t				timeout_tsc;
91 
92 	bool					socket_is_blk;
93 	bool					test_scsi_tmf;
94 	bool					valid_lun;
95 	bool					use_bogus_buffer;
96 	bool					use_valid_buffer;
97 	bool					timed_out;
98 
99 	TAILQ_ENTRY(fuzz_vhost_dev_ctx)	link;
100 };
101 
102 /* Global run state */
103 uint64_t				g_runtime_ticks;
104 int					g_runtime;
105 int					g_num_active_threads;
106 bool					g_run = true;
107 bool					g_verbose_mode = false;
108 
109 /* Global resources */
110 TAILQ_HEAD(, fuzz_vhost_dev_ctx)	g_dev_list = TAILQ_HEAD_INITIALIZER(g_dev_list);
111 struct spdk_poller			*g_run_poller;
112 void					*g_valid_buffer;
113 unsigned int				g_random_seed;
114 
115 
116 /* Global parameters and resources for parsed commands */
117 bool					g_keep_iov_pointers = false;
118 char					*g_json_file = NULL;
119 struct fuzz_vhost_io_ctx		*g_blk_cmd_array = NULL;
120 struct fuzz_vhost_io_ctx		*g_scsi_cmd_array = NULL;
121 struct fuzz_vhost_io_ctx		*g_scsi_mgmt_cmd_array = NULL;
122 
123 size_t					g_blk_cmd_array_size;
124 size_t					g_scsi_cmd_array_size;
125 size_t					g_scsi_mgmt_cmd_array_size;
126 
127 static void
cleanup(void)128 cleanup(void)
129 {
130 	struct fuzz_vhost_dev_ctx *dev_ctx, *tmp;
131 	printf("Fuzzing completed.\n");
132 	TAILQ_FOREACH_SAFE(dev_ctx, &g_dev_list, link, tmp) {
133 		printf("device %p stats: Completed I/O: %lu, Successful I/O: %lu\n", dev_ctx,
134 		       dev_ctx->completed_io, dev_ctx->successful_io);
135 		virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_REQUESTQ);
136 		if (!dev_ctx->socket_is_blk) {
137 			virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_SCSI_EVENTQ);
138 			virtio_dev_release_queue(&dev_ctx->virtio_dev, VIRTIO_SCSI_CONTROLQ);
139 		}
140 		virtio_dev_stop(&dev_ctx->virtio_dev);
141 		virtio_dev_destruct(&dev_ctx->virtio_dev);
142 		if (dev_ctx->io_ctx_array) {
143 			spdk_free(dev_ctx->io_ctx_array);
144 		}
145 		free(dev_ctx);
146 	}
147 
148 	spdk_free(g_valid_buffer);
149 
150 	if (g_blk_cmd_array) {
151 		free(g_blk_cmd_array);
152 	}
153 	if (g_scsi_cmd_array) {
154 		free(g_scsi_cmd_array);
155 	}
156 	if (g_scsi_mgmt_cmd_array) {
157 		free(g_scsi_mgmt_cmd_array);
158 	}
159 }
160 
161 /* Get a memory address that is random and not located in our hugepage memory. */
162 static void *
get_invalid_mem_address(uint64_t length)163 get_invalid_mem_address(uint64_t length)
164 {
165 	uint64_t chosen_address = 0x0;
166 
167 	while (true) {
168 		chosen_address = rand();
169 		chosen_address = (chosen_address << 32) | rand();
170 		if (spdk_vtophys((void *)chosen_address, &length) == SPDK_VTOPHYS_ERROR) {
171 			return (void *)chosen_address;
172 		}
173 	}
174 	return NULL;
175 }
176 
177 /* dev initialization code begin. */
178 static int
virtio_dev_init(struct virtio_dev * vdev,const char * socket_path,uint64_t flags,uint16_t max_queues)179 virtio_dev_init(struct virtio_dev *vdev, const char *socket_path, uint64_t flags,
180 		uint16_t max_queues)
181 {
182 	int rc;
183 
184 	rc = virtio_user_dev_init(vdev, "dev_ctx", socket_path, 1024);
185 	if (rc != 0) {
186 		fprintf(stderr, "Failed to initialize virtual bdev\n");
187 		return rc;
188 	}
189 
190 	rc = virtio_dev_reset(vdev, flags);
191 	if (rc != 0) {
192 		return rc;
193 	}
194 
195 	rc = virtio_dev_start(vdev, max_queues, VIRTIO_DEV_FIXED_QUEUES);
196 	if (rc != 0) {
197 		return rc;
198 	}
199 
200 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_REQUESTQ);
201 	if (rc < 0) {
202 		fprintf(stderr, "Couldn't get an unused queue for the io_channel.\n");
203 		virtio_dev_stop(vdev);
204 		return rc;
205 	}
206 	return 0;
207 }
208 
209 static int
blk_dev_init(struct virtio_dev * vdev,const char * socket_path,uint16_t max_queues)210 blk_dev_init(struct virtio_dev *vdev, const char *socket_path, uint16_t max_queues)
211 {
212 	return virtio_dev_init(vdev, socket_path, VIRTIO_BLK_DEV_SUPPORTED_FEATURES, max_queues);
213 }
214 
215 static int
scsi_dev_init(struct virtio_dev * vdev,const char * socket_path,uint16_t max_queues)216 scsi_dev_init(struct virtio_dev *vdev, const char *socket_path, uint16_t max_queues)
217 {
218 	int rc;
219 
220 	rc = virtio_dev_init(vdev, socket_path, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES, max_queues);
221 	if (rc != 0) {
222 		return rc;
223 	}
224 
225 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ);
226 	if (rc != 0) {
227 		SPDK_ERRLOG("Failed to acquire the controlq.\n");
228 		return rc;
229 	}
230 
231 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ);
232 	if (rc != 0) {
233 		SPDK_ERRLOG("Failed to acquire the eventq.\n");
234 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
235 		return rc;
236 	}
237 
238 	return 0;
239 }
240 
241 int
fuzz_vhost_dev_init(const char * socket_path,bool is_blk_dev,bool use_bogus_buffer,bool use_valid_buffer,bool valid_lun,bool test_scsi_tmf)242 fuzz_vhost_dev_init(const char *socket_path, bool is_blk_dev, bool use_bogus_buffer,
243 		    bool use_valid_buffer, bool valid_lun, bool test_scsi_tmf)
244 {
245 	struct fuzz_vhost_dev_ctx *dev_ctx;
246 	int rc = 0, i;
247 
248 	dev_ctx = calloc(1, sizeof(*dev_ctx));
249 	if (dev_ctx == NULL) {
250 		return -ENOMEM;
251 	}
252 
253 	dev_ctx->socket_is_blk = is_blk_dev;
254 	dev_ctx->use_bogus_buffer = use_bogus_buffer;
255 	dev_ctx->use_valid_buffer = use_valid_buffer;
256 	dev_ctx->valid_lun = valid_lun;
257 	dev_ctx->test_scsi_tmf = test_scsi_tmf;
258 
259 	TAILQ_INIT(&dev_ctx->free_io_ctx);
260 	TAILQ_INIT(&dev_ctx->outstanding_io_ctx);
261 
262 	assert(sizeof(*dev_ctx->io_ctx_array) <= UINT64_MAX / FUZZ_QUEUE_DEPTH);
263 	dev_ctx->io_ctx_array = spdk_malloc(sizeof(*dev_ctx->io_ctx_array) * FUZZ_QUEUE_DEPTH, 0x0, NULL,
264 					    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
265 	if (dev_ctx->io_ctx_array == NULL) {
266 		free(dev_ctx);
267 		return -ENOMEM;
268 	}
269 
270 	for (i = 0; i < FUZZ_QUEUE_DEPTH; i++) {
271 		TAILQ_INSERT_HEAD(&dev_ctx->free_io_ctx, &dev_ctx->io_ctx_array[i], link);
272 	}
273 
274 	dev_ctx->thread = spdk_thread_create(NULL, NULL);
275 	if (dev_ctx->thread == NULL) {
276 		fprintf(stderr, "Unable to allocate a thread for a fuzz device.\n");
277 		rc = -ENOMEM;
278 		goto error_out;
279 	}
280 
281 	if (is_blk_dev) {
282 		rc = blk_dev_init(&dev_ctx->virtio_dev, socket_path, FUZZ_MAX_QUEUES);
283 	} else {
284 		rc = scsi_dev_init(&dev_ctx->virtio_dev, socket_path, FUZZ_MAX_QUEUES);
285 	}
286 
287 	if (rc) {
288 		fprintf(stderr, "Unable to prepare the device to perform I/O.\n");
289 		goto error_out;
290 	}
291 
292 	TAILQ_INSERT_TAIL(&g_dev_list, dev_ctx, link);
293 	return 0;
294 
295 error_out:
296 	spdk_free(dev_ctx->io_ctx_array);
297 	free(dev_ctx);
298 	return rc;
299 }
300 /* dev initialization code end */
301 
302 /* data dumping functions begin */
303 static int
dump_virtio_cmd(void * ctx,const void * data,size_t size)304 dump_virtio_cmd(void *ctx, const void *data, size_t size)
305 {
306 	fprintf(stderr, "%s\n", (const char *)data);
307 	return 0;
308 }
309 
310 static void
print_blk_io_data(struct spdk_json_write_ctx * w,struct fuzz_vhost_io_ctx * io_ctx)311 print_blk_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
312 {
313 	spdk_json_write_named_uint32(w, "type", io_ctx->req.blk_req.type);
314 	spdk_json_write_named_uint32(w, "ioprio", io_ctx->req.blk_req.ioprio);
315 	spdk_json_write_named_uint64(w, "sector", io_ctx->req.blk_req.sector);
316 }
317 
318 static void
print_scsi_tmf_io_data(struct spdk_json_write_ctx * w,struct fuzz_vhost_io_ctx * io_ctx)319 print_scsi_tmf_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
320 {
321 	char *lun_data;
322 
323 	lun_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_tmf_req.lun,
324 			sizeof(io_ctx->req.scsi_tmf_req.lun));
325 
326 	spdk_json_write_named_uint32(w, "type", io_ctx->req.scsi_tmf_req.type);
327 	spdk_json_write_named_uint32(w, "subtype", io_ctx->req.scsi_tmf_req.subtype);
328 	spdk_json_write_named_string(w, "lun", lun_data);
329 	spdk_json_write_named_uint64(w, "tag", io_ctx->req.scsi_tmf_req.tag);
330 
331 	free(lun_data);
332 }
333 
334 static void
print_scsi_io_data(struct spdk_json_write_ctx * w,struct fuzz_vhost_io_ctx * io_ctx)335 print_scsi_io_data(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
336 {
337 	char *lun_data;
338 	char *cdb_data;
339 
340 	lun_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_req.lun,
341 			sizeof(io_ctx->req.scsi_req.lun));
342 	cdb_data = fuzz_get_value_base_64_buffer(io_ctx->req.scsi_req.cdb,
343 			sizeof(io_ctx->req.scsi_req.cdb));
344 
345 	spdk_json_write_named_string(w, "lun", lun_data);
346 	spdk_json_write_named_uint64(w, "tag", io_ctx->req.scsi_req.tag);
347 	spdk_json_write_named_uint32(w, "task_attr", io_ctx->req.scsi_req.task_attr);
348 	spdk_json_write_named_uint32(w, "prio", io_ctx->req.scsi_req.prio);
349 	spdk_json_write_named_uint32(w, "crn", io_ctx->req.scsi_req.crn);
350 	spdk_json_write_named_string(w, "cdb", cdb_data);
351 
352 	free(lun_data);
353 	free(cdb_data);
354 }
355 
356 static void
print_iov_obj(struct spdk_json_write_ctx * w,const char * iov_name,struct iovec * iov)357 print_iov_obj(struct spdk_json_write_ctx *w, const char *iov_name, struct iovec *iov)
358 {
359 	/* "0x" + up to 16 digits + null terminator */
360 	char hex_addr[19];
361 	int rc;
362 
363 	rc = snprintf(hex_addr, 19, "%lx", (uintptr_t)iov->iov_base);
364 
365 	/* default to 0. */
366 	if (rc < 0 || rc >= 19) {
367 		hex_addr[0] = '0';
368 		hex_addr[1] = '\0';
369 	}
370 
371 	spdk_json_write_named_object_begin(w, iov_name);
372 	spdk_json_write_named_string(w, "iov_base", hex_addr);
373 	spdk_json_write_named_uint64(w, "iov_len", iov->iov_len);
374 	spdk_json_write_object_end(w);
375 }
376 
377 static void
print_iovs(struct spdk_json_write_ctx * w,struct fuzz_vhost_io_ctx * io_ctx)378 print_iovs(struct spdk_json_write_ctx *w, struct fuzz_vhost_io_ctx *io_ctx)
379 {
380 	print_iov_obj(w, "req_iov", &io_ctx->iovs.iov_req);
381 	print_iov_obj(w, "data_iov", &io_ctx->iovs.iov_data);
382 	print_iov_obj(w, "resp_iov", &io_ctx->iovs.iov_resp);
383 }
384 
385 static void
print_req_obj(struct fuzz_vhost_dev_ctx * dev_ctx,struct fuzz_vhost_io_ctx * io_ctx)386 print_req_obj(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
387 {
388 
389 	struct spdk_json_write_ctx *w;
390 
391 	w = spdk_json_write_begin(dump_virtio_cmd, NULL, SPDK_JSON_WRITE_FLAG_FORMATTED);
392 
393 	if (dev_ctx->socket_is_blk) {
394 		spdk_json_write_named_object_begin(w, BLK_IO_NAME);
395 		print_iovs(w, io_ctx);
396 		print_blk_io_data(w, io_ctx);
397 	} else if (dev_ctx->test_scsi_tmf) {
398 		spdk_json_write_named_object_begin(w, SCSI_MGMT_NAME);
399 		print_iovs(w, io_ctx);
400 		print_scsi_tmf_io_data(w, io_ctx);
401 	} else {
402 		spdk_json_write_named_object_begin(w, SCSI_IO_NAME);
403 		print_iovs(w, io_ctx);
404 		print_scsi_io_data(w, io_ctx);
405 	}
406 	spdk_json_write_object_end(w);
407 	spdk_json_write_end(w);
408 }
409 
410 static void
dump_outstanding_io(struct fuzz_vhost_dev_ctx * dev_ctx)411 dump_outstanding_io(struct fuzz_vhost_dev_ctx *dev_ctx)
412 {
413 	struct fuzz_vhost_io_ctx *io_ctx, *tmp;
414 
415 	TAILQ_FOREACH_SAFE(io_ctx, &dev_ctx->outstanding_io_ctx, link, tmp) {
416 		print_req_obj(dev_ctx, io_ctx);
417 		TAILQ_REMOVE(&dev_ctx->outstanding_io_ctx, io_ctx, link);
418 		TAILQ_INSERT_TAIL(&dev_ctx->free_io_ctx, io_ctx, link);
419 	}
420 }
421 /* data dumping functions end */
422 
423 /* data parsing functions begin */
424 static int
hex_value(uint8_t c)425 hex_value(uint8_t c)
426 {
427 #define V(x, y) [x] = y + 1
428 	static const int8_t val[256] = {
429 		V('0', 0), V('1', 1), V('2', 2), V('3', 3), V('4', 4),
430 		V('5', 5), V('6', 6), V('7', 7), V('8', 8), V('9', 9),
431 		V('A', 0xA), V('B', 0xB), V('C', 0xC), V('D', 0xD), V('E', 0xE), V('F', 0xF),
432 		V('a', 0xA), V('b', 0xB), V('c', 0xC), V('d', 0xD), V('e', 0xE), V('f', 0xF),
433 	};
434 #undef V
435 
436 	return val[c] - 1;
437 }
438 
439 static int
fuzz_json_decode_hex_uint64(const struct spdk_json_val * val,void * out)440 fuzz_json_decode_hex_uint64(const struct spdk_json_val *val, void *out)
441 {
442 	uint64_t *out_val = out;
443 	size_t i;
444 	char *val_pointer = val->start;
445 	int current_val;
446 
447 	if (val->len > 16) {
448 		return -EINVAL;
449 	}
450 
451 	*out_val = 0;
452 	for (i = 0; i < val->len; i++) {
453 		*out_val = *out_val << 4;
454 		current_val = hex_value(*val_pointer);
455 		if (current_val < 0) {
456 			return -EINVAL;
457 		}
458 		*out_val += current_val;
459 		val_pointer++;
460 	}
461 
462 	return 0;
463 }
464 
465 static const struct spdk_json_object_decoder fuzz_vhost_iov_decoders[] = {
466 	{"iov_base", offsetof(struct iovec, iov_base), fuzz_json_decode_hex_uint64},
467 	{"iov_len", offsetof(struct iovec, iov_len), spdk_json_decode_uint64},
468 };
469 
470 static size_t
parse_iov_struct(struct iovec * iovec,struct spdk_json_val * value)471 parse_iov_struct(struct iovec *iovec, struct spdk_json_val *value)
472 {
473 	int rc;
474 
475 	if (value->type != SPDK_JSON_VAL_OBJECT_BEGIN) {
476 		return -1;
477 	}
478 
479 	rc = spdk_json_decode_object(value,
480 				     fuzz_vhost_iov_decoders,
481 				     SPDK_COUNTOF(fuzz_vhost_iov_decoders),
482 				     iovec);
483 	if (rc) {
484 		return -1;
485 	}
486 
487 	while (value->type != SPDK_JSON_VAL_OBJECT_END) {
488 		value++;
489 		rc++;
490 	}
491 
492 	/* The +1 instructs the calling function to skip over the OBJECT_END function. */
493 	rc += 1;
494 	return rc;
495 }
496 
497 static bool
parse_vhost_blk_cmds(void * item,struct spdk_json_val * value,size_t num_values)498 parse_vhost_blk_cmds(void *item, struct spdk_json_val *value, size_t num_values)
499 {
500 	struct fuzz_vhost_io_ctx *io_ctx = item;
501 	struct spdk_json_val *prev_value;
502 	int nested_object_size;
503 	uint64_t tmp_val;
504 	size_t i = 0;
505 
506 	while (i < num_values) {
507 		nested_object_size = 1;
508 		if (value->type == SPDK_JSON_VAL_NAME) {
509 			prev_value = value;
510 			value++;
511 			i++;
512 			if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
513 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
514 			} else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
515 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
516 			} else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
517 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
518 			} else if (!strncmp(prev_value->start, "type", prev_value->len)) {
519 				if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
520 					nested_object_size = -1;
521 				} else {
522 					io_ctx->req.blk_req.type = tmp_val;
523 				}
524 			} else if (!strncmp(prev_value->start, "ioprio", prev_value->len)) {
525 				if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
526 					nested_object_size = -1;
527 				} else {
528 					io_ctx->req.blk_req.ioprio = tmp_val;
529 				}
530 			} else if (!strncmp(prev_value->start, "sector", prev_value->len)) {
531 				if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
532 					nested_object_size = -1;
533 				} else {
534 					io_ctx->req.blk_req.sector = tmp_val;
535 				}
536 			}
537 		}
538 		if (nested_object_size < 0) {
539 			fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
540 				(char *)prev_value->start, value->len, (char *)value->start);
541 			return false;
542 		}
543 		value += nested_object_size;
544 		i += nested_object_size;
545 	}
546 	return true;
547 }
548 
549 static bool
parse_vhost_scsi_cmds(void * item,struct spdk_json_val * value,size_t num_values)550 parse_vhost_scsi_cmds(void *item, struct spdk_json_val *value, size_t num_values)
551 {
552 	struct fuzz_vhost_io_ctx *io_ctx = item;
553 	struct spdk_json_val *prev_value;
554 	int nested_object_size;
555 	uint64_t tmp_val;
556 	size_t i = 0;
557 
558 	while (i < num_values) {
559 		nested_object_size = 1;
560 		if (value->type == SPDK_JSON_VAL_NAME) {
561 			prev_value = value;
562 			value++;
563 			i++;
564 			if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
565 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
566 			} else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
567 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
568 			} else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
569 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
570 			} else if (!strncmp(prev_value->start, "lun", prev_value->len)) {
571 				if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_req.lun,
572 								  sizeof(io_ctx->req.scsi_req.lun),
573 								  (char *)value->start,
574 								  value->len)) {
575 					nested_object_size = -1;
576 				}
577 			} else if (!strncmp(prev_value->start, "tag", prev_value->len)) {
578 				if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
579 					nested_object_size = -1;
580 				} else {
581 					io_ctx->req.scsi_req.tag = tmp_val;
582 				}
583 			} else if (!strncmp(prev_value->start, "task_attr", prev_value->len)) {
584 				if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
585 					nested_object_size = -1;
586 				} else {
587 					io_ctx->req.scsi_req.task_attr = tmp_val;
588 				}
589 			} else if (!strncmp(prev_value->start, "prio", prev_value->len)) {
590 				if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
591 					nested_object_size = -1;
592 				} else {
593 					io_ctx->req.scsi_req.prio = tmp_val;
594 				}
595 			} else if (!strncmp(prev_value->start, "crn", prev_value->len)) {
596 				if (fuzz_parse_json_num(value, UINT8_MAX, &tmp_val)) {
597 					nested_object_size = -1;
598 				} else {
599 					io_ctx->req.scsi_req.crn = tmp_val;
600 				}
601 			} else if (!strncmp(prev_value->start, "cdb", prev_value->len)) {
602 				if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_req.cdb,
603 								  sizeof(io_ctx->req.scsi_req.cdb),
604 								  (char *)value->start,
605 								  value->len)) {
606 					nested_object_size = -1;
607 				}
608 			}
609 		}
610 		if (nested_object_size < 0) {
611 			fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
612 				(char *)prev_value->start, value->len, (char *)value->start);
613 			return false;
614 		}
615 		value += nested_object_size;
616 		i += nested_object_size;
617 	}
618 	return true;
619 
620 }
621 
622 static bool
parse_vhost_scsi_mgmt_cmds(void * item,struct spdk_json_val * value,size_t num_values)623 parse_vhost_scsi_mgmt_cmds(void *item, struct spdk_json_val *value, size_t num_values)
624 {
625 	struct fuzz_vhost_io_ctx *io_ctx = item;
626 	struct spdk_json_val *prev_value;
627 	int nested_object_size;
628 	uint64_t tmp_val;
629 	size_t i = 0;
630 
631 	while (i < num_values) {
632 		nested_object_size = 1;
633 		if (value->type == SPDK_JSON_VAL_NAME) {
634 			prev_value = value;
635 			value++;
636 			i++;
637 			if (!strncmp(prev_value->start, "req_iov", prev_value->len)) {
638 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_req, value);
639 			} else if (!strncmp(prev_value->start, "data_iov", prev_value->len)) {
640 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
641 			} else if (!strncmp(prev_value->start, "resp_iov", prev_value->len)) {
642 				nested_object_size = parse_iov_struct(&io_ctx->iovs.iov_data, value);
643 			} else if (!strncmp(prev_value->start, "type", prev_value->len)) {
644 				if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
645 					nested_object_size = -1;
646 				} else {
647 					io_ctx->req.scsi_tmf_req.type = tmp_val;
648 				}
649 			} else if (!strncmp(prev_value->start, "subtype", prev_value->len)) {
650 				if (fuzz_parse_json_num(value, UINT32_MAX, &tmp_val)) {
651 					nested_object_size = -1;
652 				} else {
653 					io_ctx->req.scsi_tmf_req.subtype = tmp_val;
654 				}
655 			}  else if (!strncmp(prev_value->start, "lun", prev_value->len)) {
656 				if (fuzz_get_base_64_buffer_value(&io_ctx->req.scsi_tmf_req.lun,
657 								  sizeof(io_ctx->req.scsi_tmf_req.lun),
658 								  (char *)value->start,
659 								  value->len)) {
660 					nested_object_size = -1;
661 				}
662 			} else if (!strncmp(prev_value->start, "tag", prev_value->len)) {
663 				if (fuzz_parse_json_num(value, UINT64_MAX, &tmp_val)) {
664 					nested_object_size = -1;
665 				} else {
666 					io_ctx->req.scsi_tmf_req.tag = tmp_val;
667 				}
668 			}
669 		}
670 		if (nested_object_size < 0) {
671 			fprintf(stderr, "Invalid value supplied for io_ctx->%.*s: %.*s\n", prev_value->len,
672 				(char *)prev_value->start, value->len, (char *)value->start);
673 			return false;
674 		}
675 		value += nested_object_size;
676 		i += nested_object_size;
677 	}
678 	return true;
679 }
680 /* data parsing functions end */
681 
682 /* build requests begin */
683 static void
craft_io_from_array(struct fuzz_vhost_io_ctx * src_ctx,struct fuzz_vhost_io_ctx * dest_ctx)684 craft_io_from_array(struct fuzz_vhost_io_ctx *src_ctx, struct fuzz_vhost_io_ctx *dest_ctx)
685 {
686 	if (g_keep_iov_pointers) {
687 		dest_ctx->iovs = src_ctx->iovs;
688 	}
689 	dest_ctx->req = src_ctx->req;
690 }
691 
692 static void
craft_virtio_scsi_req(struct fuzz_vhost_dev_ctx * dev_ctx,struct fuzz_vhost_io_ctx * io_ctx)693 craft_virtio_scsi_req(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
694 {
695 	io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.scsi_req);
696 	io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.scsi_resp);
697 	fuzz_fill_random_bytes((char *)&io_ctx->req.scsi_req, sizeof(io_ctx->req.scsi_req),
698 			       &dev_ctx->random_seed);
699 	/* TODO: set up the logic to find all luns on the target. Right now we are just assuming the first is OK. */
700 	if (dev_ctx->valid_lun) {
701 		io_ctx->req.scsi_req.lun[0] = 1;
702 		io_ctx->req.scsi_req.lun[1] = 0;
703 	}
704 }
705 
706 static void
craft_virtio_scsi_tmf_req(struct fuzz_vhost_dev_ctx * dev_ctx,struct fuzz_vhost_io_ctx * io_ctx)707 craft_virtio_scsi_tmf_req(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
708 {
709 	io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.scsi_tmf_req);
710 	io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.scsi_tmf_resp);
711 	fuzz_fill_random_bytes((char *)&io_ctx->req.scsi_tmf_req, sizeof(io_ctx->req.scsi_tmf_req),
712 			       &dev_ctx->random_seed);
713 	/* TODO: set up the logic to find all luns on the target. Right now we are just assuming the first is OK. */
714 	if (dev_ctx->valid_lun) {
715 		io_ctx->req.scsi_tmf_req.lun[0] = 1;
716 		io_ctx->req.scsi_tmf_req.lun[1] = 0;
717 	}
718 
719 	/* Valid controlq commands have to be of type 0, 1, or 2. Any others just return immediately from the target. */
720 	/* Try to only test the opcodes that will exercise extra paths in the target side. But allow for at least one invalid value. */
721 	io_ctx->req.scsi_tmf_req.type = rand() % 4;
722 }
723 
724 static void
craft_virtio_blk_req(struct fuzz_vhost_io_ctx * io_ctx)725 craft_virtio_blk_req(struct fuzz_vhost_io_ctx *io_ctx)
726 {
727 	io_ctx->iovs.iov_req.iov_len = sizeof(io_ctx->req.blk_req);
728 	io_ctx->iovs.iov_resp.iov_len = sizeof(io_ctx->resp.blk_resp);
729 	io_ctx->req.blk_req.type = rand();
730 	io_ctx->req.blk_req.sector = rand();
731 }
732 
733 static void
craft_virtio_req_rsp_pair(struct fuzz_vhost_dev_ctx * dev_ctx,struct fuzz_vhost_io_ctx * io_ctx)734 craft_virtio_req_rsp_pair(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
735 {
736 	struct fuzz_vhost_iov_ctx *iovs = &io_ctx->iovs;
737 
738 	/*
739 	 * Always set these buffer values up front.
740 	 * If the user wants to override this with the json values,
741 	 * they can specify -k when starting the app. */
742 	iovs->iov_req.iov_base = &io_ctx->req;
743 	if (dev_ctx->use_bogus_buffer) {
744 		iovs->iov_data.iov_len = rand();
745 		iovs->iov_data.iov_base = get_invalid_mem_address(iovs->iov_data.iov_len);
746 	} else if (dev_ctx->use_valid_buffer) {
747 		iovs->iov_data.iov_len = 1024;
748 		iovs->iov_data.iov_base = g_valid_buffer;
749 	}
750 	iovs->iov_resp.iov_base = &io_ctx->resp;
751 
752 	if (dev_ctx->socket_is_blk && g_blk_cmd_array) {
753 		craft_io_from_array(&g_blk_cmd_array[dev_ctx->submitted_io], io_ctx);
754 		return;
755 	} else if (dev_ctx->test_scsi_tmf && g_scsi_mgmt_cmd_array) {
756 		craft_io_from_array(&g_scsi_mgmt_cmd_array[dev_ctx->submitted_io], io_ctx);
757 		return;
758 	} else if (g_scsi_cmd_array) {
759 		craft_io_from_array(&g_scsi_cmd_array[dev_ctx->submitted_io], io_ctx);
760 		return;
761 	}
762 
763 	if (dev_ctx->socket_is_blk) {
764 		craft_virtio_blk_req(io_ctx);
765 	} else if (dev_ctx->test_scsi_tmf) {
766 		craft_virtio_scsi_tmf_req(dev_ctx, io_ctx);
767 	} else {
768 		craft_virtio_scsi_req(dev_ctx, io_ctx);
769 	}
770 }
771 /* build requests end */
772 
773 /* submit requests begin */
774 static uint64_t
get_max_num_io(struct fuzz_vhost_dev_ctx * dev_ctx)775 get_max_num_io(struct fuzz_vhost_dev_ctx *dev_ctx)
776 {
777 	if (dev_ctx->socket_is_blk) {
778 		return g_blk_cmd_array_size;
779 	} else if (dev_ctx->test_scsi_tmf) {
780 		return g_scsi_mgmt_cmd_array_size;
781 	} else {
782 		return g_scsi_cmd_array_size;
783 	}
784 }
785 
786 static int
submit_virtio_req_rsp_pair(struct fuzz_vhost_dev_ctx * dev_ctx,struct virtqueue * vq,struct fuzz_vhost_io_ctx * io_ctx)787 submit_virtio_req_rsp_pair(struct fuzz_vhost_dev_ctx *dev_ctx, struct virtqueue *vq,
788 			   struct fuzz_vhost_io_ctx *io_ctx)
789 {
790 	struct fuzz_vhost_iov_ctx *iovs = &io_ctx->iovs;
791 	int num_iovs = 2, rc;
792 
793 	num_iovs += dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer ? 1 : 0;
794 
795 	rc = virtqueue_req_start(vq, io_ctx, num_iovs);
796 	if (rc) {
797 		return rc;
798 	}
799 	virtqueue_req_add_iovs(vq, &iovs->iov_req, 1, SPDK_VIRTIO_DESC_RO);
800 	/* blk and scsi requests favor different orders for the iov objects. */
801 	if (dev_ctx->socket_is_blk) {
802 		if (dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer) {
803 			virtqueue_req_add_iovs(vq, &iovs->iov_data, 1, SPDK_VIRTIO_DESC_WR);
804 		}
805 		virtqueue_req_add_iovs(vq, &iovs->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
806 	} else {
807 		virtqueue_req_add_iovs(vq, &iovs->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
808 		if (dev_ctx->use_bogus_buffer || dev_ctx->use_valid_buffer) {
809 			virtqueue_req_add_iovs(vq, &iovs->iov_data, 1, SPDK_VIRTIO_DESC_WR);
810 		}
811 	}
812 	virtqueue_req_flush(vq);
813 	return 0;
814 }
815 
816 static void
dev_submit_requests(struct fuzz_vhost_dev_ctx * dev_ctx,struct virtqueue * vq,uint64_t max_io_to_submit)817 dev_submit_requests(struct fuzz_vhost_dev_ctx *dev_ctx, struct virtqueue *vq,
818 		    uint64_t max_io_to_submit)
819 {
820 	struct fuzz_vhost_io_ctx *io_ctx;
821 	int rc;
822 
823 	while (!TAILQ_EMPTY(&dev_ctx->free_io_ctx) && dev_ctx->submitted_io < max_io_to_submit) {
824 		io_ctx = TAILQ_FIRST(&dev_ctx->free_io_ctx);
825 		craft_virtio_req_rsp_pair(dev_ctx, io_ctx);
826 		rc = submit_virtio_req_rsp_pair(dev_ctx, vq, io_ctx);
827 		if (rc == 0) {
828 			TAILQ_REMOVE(&dev_ctx->free_io_ctx, io_ctx, link);
829 			TAILQ_INSERT_TAIL(&dev_ctx->outstanding_io_ctx, io_ctx, link);
830 			dev_ctx->submitted_io++;
831 		} else if (rc == -ENOMEM) {
832 			/* There are just not enough available buffers right now. try later. */
833 			return;
834 		} else if (rc == -EINVAL) {
835 			/* The virtqueue must be broken. We know we can fit at least three descriptors */
836 			fprintf(stderr, "One of the virtqueues for dev %p is broken. stopping all devices.\n", dev_ctx);
837 			g_run = 0;
838 		}
839 	}
840 }
841 /* submit requests end */
842 
843 /* complete requests begin */
844 static void
check_successful_op(struct fuzz_vhost_dev_ctx * dev_ctx,struct fuzz_vhost_io_ctx * io_ctx)845 check_successful_op(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
846 {
847 	bool is_successful = false;
848 
849 	if (dev_ctx->socket_is_blk) {
850 		if (io_ctx->resp.blk_resp == 0) {
851 			is_successful = true;
852 		}
853 	} else if (dev_ctx->test_scsi_tmf) {
854 		if (io_ctx->resp.scsi_tmf_resp.scsi_tmf_resp.response == 0 &&
855 		    io_ctx->resp.scsi_tmf_resp.an_resp.response == 0) {
856 			is_successful = true;
857 		}
858 	} else {
859 		if (io_ctx->resp.scsi_resp.status == 0) {
860 			is_successful = true;
861 		}
862 	}
863 
864 	if (is_successful) {
865 		fprintf(stderr, "An I/O completed without an error status. This could be worth looking into.\n");
866 		fprintf(stderr,
867 			"There is also a good chance that the target just failed before setting a status.\n");
868 		dev_ctx->successful_io++;
869 		print_req_obj(dev_ctx, io_ctx);
870 	} else if (g_verbose_mode) {
871 		fprintf(stderr, "The following I/O failed as expected.\n");
872 		print_req_obj(dev_ctx, io_ctx);
873 	}
874 }
875 
876 static void
complete_io(struct fuzz_vhost_dev_ctx * dev_ctx,struct fuzz_vhost_io_ctx * io_ctx)877 complete_io(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_io_ctx *io_ctx)
878 {
879 	TAILQ_REMOVE(&dev_ctx->outstanding_io_ctx, io_ctx, link);
880 	TAILQ_INSERT_HEAD(&dev_ctx->free_io_ctx, io_ctx, link);
881 	check_successful_op(dev_ctx, io_ctx);
882 	dev_ctx->completed_io++;
883 	dev_ctx->timeout_tsc = fuzz_refresh_timeout();
884 }
885 
886 static int
poll_dev(void * ctx)887 poll_dev(void *ctx)
888 {
889 	struct fuzz_vhost_dev_ctx *dev_ctx = ctx;
890 	struct virtqueue *vq;
891 	struct fuzz_vhost_io_ctx *io_ctx[FUZZ_QUEUE_DEPTH];
892 	int num_active_threads;
893 	uint64_t max_io_to_complete = UINT64_MAX;
894 	uint64_t current_ticks;
895 	uint32_t len[FUZZ_QUEUE_DEPTH];
896 	uint16_t num_cpl, i;
897 
898 	if (g_json_file) {
899 		max_io_to_complete = get_max_num_io(dev_ctx);
900 	}
901 
902 	if (!dev_ctx->socket_is_blk && dev_ctx->test_scsi_tmf) {
903 		vq = dev_ctx->virtio_dev.vqs[VIRTIO_SCSI_CONTROLQ];
904 	} else {
905 		vq = dev_ctx->virtio_dev.vqs[VIRTIO_REQUESTQ];
906 	}
907 
908 	num_cpl = virtio_recv_pkts(vq, (void **)io_ctx, len, FUZZ_QUEUE_DEPTH);
909 
910 	for (i = 0; i < num_cpl; i++) {
911 		complete_io(dev_ctx, io_ctx[i]);
912 	}
913 
914 	current_ticks = spdk_get_ticks();
915 
916 	if (current_ticks > dev_ctx->timeout_tsc) {
917 		dev_ctx->timed_out = true;
918 		g_run = false;
919 		fprintf(stderr, "The VQ on device %p timed out. Dumping contents now.\n", dev_ctx);
920 		dump_outstanding_io(dev_ctx);
921 	}
922 
923 	if (current_ticks > g_runtime_ticks) {
924 		g_run = 0;
925 	}
926 
927 	if (!g_run || dev_ctx->completed_io >= max_io_to_complete) {
928 		if (TAILQ_EMPTY(&dev_ctx->outstanding_io_ctx)) {
929 			spdk_poller_unregister(&dev_ctx->poller);
930 			num_active_threads = __sync_sub_and_fetch(&g_num_active_threads, 1);
931 			if (num_active_threads == 0) {
932 				g_run = 0;
933 			}
934 			spdk_thread_exit(dev_ctx->thread);
935 		}
936 		return 0;
937 	}
938 
939 	dev_submit_requests(dev_ctx, vq, max_io_to_complete);
940 	return 0;
941 }
942 /* complete requests end */
943 
944 static void
start_io(void * ctx)945 start_io(void *ctx)
946 {
947 	struct fuzz_vhost_dev_ctx *dev_ctx = ctx;
948 
949 	if (g_random_seed) {
950 		dev_ctx->random_seed = g_random_seed;
951 	} else {
952 		dev_ctx->random_seed = spdk_get_ticks();
953 	}
954 
955 	dev_ctx->timeout_tsc = fuzz_refresh_timeout();
956 
957 	dev_ctx->poller = SPDK_POLLER_REGISTER(poll_dev, dev_ctx, 0);
958 	if (dev_ctx->poller == NULL) {
959 		return;
960 	}
961 
962 }
963 
964 static int
end_fuzz(void * ctx)965 end_fuzz(void *ctx)
966 {
967 	if (!g_run && !g_num_active_threads) {
968 		spdk_poller_unregister(&g_run_poller);
969 		cleanup();
970 		spdk_app_stop(0);
971 	}
972 	return 0;
973 }
974 
975 static void
begin_fuzz(void * ctx)976 begin_fuzz(void *ctx)
977 {
978 	struct fuzz_vhost_dev_ctx *dev_ctx;
979 
980 	g_runtime_ticks = spdk_get_ticks() + spdk_get_ticks_hz() * g_runtime;
981 
982 	g_valid_buffer = spdk_malloc(0x1000, 0x200, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
983 	if (g_valid_buffer == NULL) {
984 		fprintf(stderr, "Failed to allocate a valid buffer for I/O\n");
985 		goto out;
986 	}
987 
988 	g_run_poller = SPDK_POLLER_REGISTER(end_fuzz, NULL, 0);
989 	if (g_run_poller == NULL) {
990 		fprintf(stderr, "Failed to register a poller for test completion checking.\n");
991 	}
992 
993 	TAILQ_FOREACH(dev_ctx, &g_dev_list, link) {
994 		assert(dev_ctx->thread != NULL);
995 		spdk_thread_send_msg(dev_ctx->thread, start_io, dev_ctx);
996 		__sync_add_and_fetch(&g_num_active_threads, 1);
997 	}
998 
999 	return;
1000 out:
1001 	cleanup();
1002 	spdk_app_stop(0);
1003 }
1004 
1005 static void
fuzz_vhost_usage(void)1006 fuzz_vhost_usage(void)
1007 {
1008 	fprintf(stderr, " -j <path>                 Path to a json file containing named objects.\n");
1009 	fprintf(stderr,
1010 		" -k                        Keep the iov pointer addresses from the json file. only valid with -j.\n");
1011 	fprintf(stderr, " -S <integer>              Seed value for test.\n");
1012 	fprintf(stderr, " -t <integer>              Time in seconds to run the fuzz test.\n");
1013 	fprintf(stderr, " -V                        Enable logging of each submitted command.\n");
1014 }
1015 
1016 static int
fuzz_vhost_parse(int ch,char * arg)1017 fuzz_vhost_parse(int ch, char *arg)
1018 {
1019 	int64_t error_test;
1020 
1021 	switch (ch) {
1022 	case 'j':
1023 		g_json_file = optarg;
1024 		break;
1025 	case 'k':
1026 		g_keep_iov_pointers = true;
1027 		break;
1028 	case 'S':
1029 		error_test = spdk_strtol(arg, 10);
1030 		if (error_test < 0) {
1031 			fprintf(stderr, "Invalid value supplied for the random seed.\n");
1032 			return -1;
1033 		} else {
1034 			g_random_seed = spdk_strtol(arg, 10);
1035 		}
1036 		break;
1037 	case 't':
1038 		g_runtime = spdk_strtol(arg, 10);
1039 		if (g_runtime < 0 || g_runtime > MAX_RUNTIME_S) {
1040 			fprintf(stderr, "You must supply a positive runtime value less than 86401.\n");
1041 			return -1;
1042 		}
1043 		break;
1044 	case 'V':
1045 		g_verbose_mode = true;
1046 		break;
1047 	case '?':
1048 	default:
1049 		return -EINVAL;
1050 	}
1051 	return 0;
1052 }
1053 
1054 int
main(int argc,char ** argv)1055 main(int argc, char **argv)
1056 {
1057 	struct spdk_app_opts opts = {};
1058 	int rc;
1059 
1060 	spdk_app_opts_init(&opts, sizeof(opts));
1061 	opts.name = "vhost_fuzz";
1062 	opts.rpc_addr = NULL;
1063 	g_runtime = DEFAULT_RUNTIME;
1064 
1065 	rc = spdk_app_parse_args(argc, argv, &opts, "j:kS:t:V", NULL, fuzz_vhost_parse, fuzz_vhost_usage);
1066 	if (rc != SPDK_APP_PARSE_ARGS_SUCCESS) {
1067 		fprintf(stderr, "Unable to parse the application arguments.\n");
1068 		return -1;
1069 	}
1070 
1071 	if (g_json_file != NULL) {
1072 		g_blk_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
1073 				       (void **)&g_blk_cmd_array,
1074 				       sizeof(struct fuzz_vhost_io_ctx),
1075 				       BLK_IO_NAME, parse_vhost_blk_cmds);
1076 		g_scsi_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
1077 					(void **)&g_scsi_cmd_array,
1078 					sizeof(struct fuzz_vhost_io_ctx),
1079 					SCSI_IO_NAME, parse_vhost_scsi_cmds);
1080 		g_scsi_mgmt_cmd_array_size = fuzz_parse_args_into_array(g_json_file,
1081 					     (void **)&g_scsi_mgmt_cmd_array,
1082 					     sizeof(struct fuzz_vhost_io_ctx),
1083 					     SCSI_IO_NAME, parse_vhost_scsi_mgmt_cmds);
1084 		if (g_blk_cmd_array_size == 0 && g_scsi_cmd_array_size == 0 && g_scsi_mgmt_cmd_array_size == 0) {
1085 			fprintf(stderr, "The provided json file did not contain any valid commands. Exiting.\n");
1086 			return -EINVAL;
1087 		}
1088 	}
1089 
1090 	rc = spdk_app_start(&opts, begin_fuzz, NULL);
1091 
1092 	spdk_app_fini();
1093 	return rc;
1094 }
1095