1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. All rights reserved. 3 */ 4 #include "spdk/stdinc.h" 5 #include "spdk/conf.h" 6 #include "spdk/env.h" 7 #include "spdk/event.h" 8 #include "spdk/util.h" 9 #include "spdk/string.h" 10 #include "spdk/nvme_spec.h" 11 #include "spdk/nvme.h" 12 #include "spdk/likely.h" 13 #include "spdk/file.h" 14 15 #include "spdk/vfio_user_pci.h" 16 #include <linux/vfio.h> 17 #include "spdk/vfio_user_spec.h" 18 #include "spdk/config.h" 19 20 #ifdef SPDK_CONFIG_ASAN 21 #include <sanitizer/lsan_interface.h> 22 #endif 23 #define VFIO_MAXIMUM_SPARSE_MMAP_REGIONS 8 24 #define VFIO_USER_GET_REGION_INFO_LEN 4096 25 26 typedef int (*fuzzer_fn)(const uint8_t *data, size_t size, struct vfio_device *dev); 27 struct fuzz_type { 28 fuzzer_fn fn; 29 uint32_t bytes_per_cmd; 30 }; 31 32 #define VFIO_USER_MAX_PAYLOAD_SIZE (4096) 33 static uint8_t payload[VFIO_USER_MAX_PAYLOAD_SIZE]; 34 35 static char *g_ctrlr_path; 36 static char *g_artifact_prefix; 37 static int32_t g_time_in_sec = 10; 38 static char *g_corpus_dir; 39 static uint8_t *g_repro_data; 40 static size_t g_repro_size; 41 static pthread_t g_fuzz_td; 42 static pthread_t g_reactor_td; 43 static struct fuzz_type *g_fuzzer; 44 45 enum IO_POLLER_STATE { 46 IO_POLLER_STATE_IDLE, 47 IO_POLLER_STATE_PROCESSING, 48 IO_POLLER_STATE_TERMINATE_INIT, 49 IO_POLLER_STATE_TERMINATE_WAIT, 50 IO_POLLER_STATE_TERMINATE_DONE, 51 }; 52 53 struct io_thread { 54 enum IO_POLLER_STATE state; 55 int lba_num; 56 char *write_buf; 57 char *read_buf; 58 size_t buf_size; 59 struct spdk_poller *run_poller; 60 struct spdk_thread *thread; 61 struct spdk_nvme_ctrlr *io_ctrlr; 62 pthread_t io_td; 63 pthread_t term_td; 64 struct spdk_nvme_ns *io_ns; 65 struct spdk_nvme_qpair *io_qpair; 66 char *io_ctrlr_path; 67 } g_io_thread; 68 69 static int 70 fuzz_vfio_user_version(const uint8_t *data, size_t size, struct vfio_device *dev) 71 { 72 struct vfio_user_version *version = (struct vfio_user_version *)payload; 73 74 version->major = ((uint16_t)data[0] << 8) + (uint16_t)data[1]; 75 version->minor = ((uint16_t)data[2] << 8) + (uint16_t)data[3]; 76 77 return spdk_vfio_user_dev_send_request(dev, VFIO_USER_VERSION, payload, 78 sizeof(struct vfio_user_version), 79 sizeof(payload), NULL, 0); 80 } 81 82 static int 83 fuzz_vfio_user_region_rw(const uint8_t *data, size_t size, struct vfio_device *dev) 84 { 85 uint8_t buf[4]; 86 uint64_t offset = 0; 87 88 offset = ((uint64_t)data[0] << 8) + (uint64_t)data[1]; 89 offset = (SPDK_ALIGN_FLOOR(offset, 4)) % 4096; 90 memcpy(buf, &data[2], sizeof(buf)); 91 92 /* writes to BAR0 depending on the register, therefore the return value is never checked */ 93 spdk_vfio_user_pci_bar_access(dev, VFIO_PCI_BAR0_REGION_INDEX, offset, sizeof(buf), 94 &buf, true); 95 return spdk_vfio_user_pci_bar_access(dev, VFIO_PCI_BAR0_REGION_INDEX, offset, sizeof(buf), 96 &buf, false); 97 } 98 99 #define VFIO_USER_GET_REGION_INFO_LEN 4096 100 101 static int 102 fuzz_vfio_user_get_region_info(const uint8_t *data, size_t size, struct vfio_device *dev) 103 { 104 int ret = 0; 105 int fds[VFIO_MAXIMUM_SPARSE_MMAP_REGIONS]; 106 uint8_t buf[VFIO_USER_GET_REGION_INFO_LEN]; 107 struct vfio_region_info *info = (struct vfio_region_info *)buf; 108 109 memcpy(&info->index, &data[0], 4); 110 memcpy(&info->argsz, &data[4], 4); 111 112 ret = spdk_vfio_user_dev_send_request(dev, VFIO_USER_DEVICE_GET_REGION_INFO, 113 info, info->argsz, VFIO_USER_GET_REGION_INFO_LEN, fds, 114 VFIO_MAXIMUM_SPARSE_MMAP_REGIONS); 115 return ret; 116 } 117 118 /* Since both ends of the connection are in the same process, 119 * picking completely random addresses is actually fine, since 120 * we won't be actually mapping anything. 121 */ 122 static int 123 fuzz_vfio_user_dma_map(const uint8_t *data, size_t size, struct vfio_device *dev) 124 { 125 struct vfio_user_dma_map dma_map = { 0 }; 126 int fd; 127 128 memcpy(&fd, &data[0], 4); 129 dma_map.argsz = sizeof(struct vfio_user_dma_map); 130 131 memcpy(&dma_map.addr, &data[8], 8); 132 memcpy(&dma_map.size, &data[16], 8); 133 memcpy(&dma_map.offset, &data[24], 8); 134 135 dma_map.flags = VFIO_USER_F_DMA_REGION_READ | VFIO_USER_F_DMA_REGION_WRITE; 136 137 spdk_vfio_user_dev_send_request(dev, VFIO_USER_DMA_MAP, 138 &dma_map, sizeof(dma_map), sizeof(dma_map), &fd, 1); 139 return 0; 140 } 141 142 static int 143 fuzz_vfio_user_dma_unmap(const uint8_t *data, size_t size, struct vfio_device *dev) 144 { 145 struct vfio_user_dma_unmap dma_unmap = { 0 }; 146 struct vfio_user_dma_map dma_map = { 0 }; 147 int fd; 148 149 memcpy(&fd, &data[0], 4); 150 dma_map.argsz = sizeof(struct vfio_user_dma_map); 151 152 memcpy(&dma_map.addr, &data[8], 8); 153 memcpy(&dma_map.size, &data[16], 8); 154 memcpy(&dma_map.offset, &data[24], 8); 155 156 dma_map.flags = VFIO_USER_F_DMA_REGION_READ | VFIO_USER_F_DMA_REGION_WRITE; 157 158 dma_unmap.argsz = sizeof(struct vfio_user_dma_unmap); 159 dma_unmap.addr = dma_map.addr; 160 dma_unmap.size = dma_map.size; 161 162 spdk_vfio_user_dev_send_request(dev, VFIO_USER_DMA_MAP, 163 &dma_map, sizeof(dma_map), sizeof(dma_map), &fd, 1); 164 /* Don't verify return value to check unmapping not previously mapped region */ 165 spdk_vfio_user_dev_send_request(dev, VFIO_USER_DMA_UNMAP, 166 &dma_unmap, sizeof(dma_unmap), sizeof(dma_unmap), &fd, 1); 167 return 0; 168 } 169 static int 170 fuzz_vfio_user_irq_set(const uint8_t *data, size_t size, struct vfio_device *dev) 171 { 172 uint8_t buf[VFIO_USER_GET_REGION_INFO_LEN]; 173 struct vfio_irq_set *irq_set = (struct vfio_irq_set *)buf; 174 175 irq_set->argsz = sizeof(struct vfio_irq_set) ; 176 memcpy(&irq_set->flags, &data[0], 4); 177 /* max index is up to VFIO_PCI_NUM_IRQS, no need to fuzz all uint */ 178 irq_set->index = data[4]; 179 memcpy(&irq_set->start, &data[5], 4); 180 memcpy(&irq_set->count, &data[9], 4); 181 182 spdk_vfio_user_dev_send_request(dev, VFIO_USER_DEVICE_SET_IRQS, 183 irq_set, irq_set->argsz, 184 VFIO_USER_GET_REGION_INFO_LEN, NULL, 0); 185 return 0; 186 } 187 188 static int 189 fuzz_vfio_user_set_msix(const uint8_t *data, size_t size, struct vfio_device *dev) 190 { 191 struct vfio_irq_set irq_set; 192 193 irq_set.argsz = sizeof(struct vfio_irq_set); 194 /* Max value is VFIO_IRQ_SET_ACTION_TRIGGER, try different combination too */ 195 irq_set.flags = data[0] & ((1 << 6) - 1); 196 irq_set.index = VFIO_PCI_MSIX_IRQ_INDEX; 197 memcpy(&irq_set.start, &data[1], 4); 198 memcpy(&irq_set.count, &data[5], 4); 199 200 spdk_vfio_user_dev_send_request(dev, VFIO_USER_DEVICE_SET_IRQS, 201 &irq_set, sizeof(irq_set), sizeof(irq_set), NULL, 0); 202 return 0; 203 } 204 205 static struct fuzz_type g_fuzzers[] = { 206 { .fn = fuzz_vfio_user_region_rw, .bytes_per_cmd = 6}, 207 { .fn = fuzz_vfio_user_version, .bytes_per_cmd = 4}, 208 { .fn = fuzz_vfio_user_get_region_info, .bytes_per_cmd = 8}, 209 { .fn = fuzz_vfio_user_dma_map, .bytes_per_cmd = 32}, 210 { .fn = fuzz_vfio_user_dma_unmap, .bytes_per_cmd = 32}, 211 { .fn = fuzz_vfio_user_irq_set, .bytes_per_cmd = 13}, 212 { .fn = fuzz_vfio_user_set_msix, .bytes_per_cmd = 9}, 213 { .fn = NULL, .bytes_per_cmd = 0} 214 }; 215 216 #define NUM_FUZZERS (SPDK_COUNTOF(g_fuzzers) - 1) 217 218 static int 219 TestOneInput(const uint8_t *data, size_t size) 220 { 221 struct vfio_device *dev = NULL; 222 char ctrlr_path[PATH_MAX]; 223 int ret = 0; 224 225 /* Reject any input of insufficient length */ 226 if (size < g_fuzzer->bytes_per_cmd) { 227 return -1; 228 } 229 230 snprintf(ctrlr_path, sizeof(ctrlr_path), "%s/cntrl", g_ctrlr_path); 231 ret = access(ctrlr_path, F_OK); 232 if (ret != 0) { 233 fprintf(stderr, "Access path %s failed\n", ctrlr_path); 234 spdk_app_start_shutdown(); 235 return -1; 236 } 237 238 dev = spdk_vfio_user_setup(ctrlr_path); 239 if (dev == NULL) { 240 fprintf(stderr, "spdk_vfio_user_setup() failed for controller path '%s'\n", 241 ctrlr_path); 242 spdk_app_start_shutdown(); 243 return -1; 244 } 245 246 /* run cmds here */ 247 if (g_fuzzer->fn != NULL) { 248 g_fuzzer->fn(data, size, dev); 249 } 250 251 spdk_vfio_user_release(dev); 252 return 0; 253 } 254 255 int LLVMFuzzerRunDriver(int *argc, char ***argv, int (*UserCb)(const uint8_t *Data, size_t Size)); 256 257 static void 258 io_terminate(void *ctx) 259 { 260 ((struct io_thread *)ctx)->state = IO_POLLER_STATE_TERMINATE_INIT; 261 } 262 263 static void 264 exit_handler(void) 265 { 266 if (g_io_thread.io_ctrlr_path && g_io_thread.thread) { 267 spdk_thread_send_msg(g_io_thread.thread, io_terminate, &g_io_thread); 268 269 } else if (spdk_thread_get_app_thread()) { 270 spdk_app_stop(0); 271 } 272 273 pthread_join(g_reactor_td, NULL); 274 } 275 276 static void * 277 start_fuzzer(void *ctx) 278 { 279 char *_argv[] = { 280 "spdk", 281 "-len_control=0", 282 "-detect_leaks=1", 283 NULL, 284 NULL, 285 NULL, 286 NULL 287 }; 288 char time_str[128]; 289 char prefix[PATH_MAX]; 290 char len_str[128]; 291 char **argv = _argv; 292 int argc = SPDK_COUNTOF(_argv); 293 294 spdk_unaffinitize_thread(); 295 snprintf(prefix, sizeof(prefix), "-artifact_prefix=%s", g_artifact_prefix); 296 argv[argc - 4] = prefix; 297 snprintf(len_str, sizeof(len_str), "-max_len=%d", g_fuzzer->bytes_per_cmd); 298 argv[argc - 3] = len_str; 299 snprintf(time_str, sizeof(time_str), "-max_total_time=%d", g_time_in_sec); 300 argv[argc - 2] = time_str; 301 argv[argc - 1] = g_corpus_dir; 302 303 atexit(exit_handler); 304 305 free(g_artifact_prefix); 306 307 if (g_repro_data) { 308 printf("Running single test based on reproduction data file.\n"); 309 TestOneInput(g_repro_data, g_repro_size); 310 printf("Done.\n"); 311 } else { 312 LLVMFuzzerRunDriver(&argc, &argv, TestOneInput); 313 /* TODO: in the normal case, LLVMFuzzerRunDriver never returns - it calls exit() 314 * directly and we never get here. But this behavior isn't really documented 315 * anywhere by LLVM. 316 */ 317 } 318 319 return NULL; 320 } 321 322 static void 323 read_complete(void *arg, const struct spdk_nvme_cpl *completion) 324 { 325 int sectors_num = 0; 326 struct io_thread *io = (struct io_thread *)arg; 327 328 if (spdk_nvme_cpl_is_error(completion)) { 329 spdk_nvme_qpair_print_completion(io->io_qpair, (struct spdk_nvme_cpl *)completion); 330 fprintf(stderr, "I/O read error status: %s\n", 331 spdk_nvme_cpl_get_status_string(&completion->status)); 332 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 333 pthread_kill(g_fuzz_td, SIGSEGV); 334 return; 335 } 336 337 if (memcmp(io->read_buf, io->write_buf, io->buf_size)) { 338 fprintf(stderr, "I/O corrupt, value not the same\n"); 339 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 340 pthread_kill(g_fuzz_td, SIGSEGV); 341 return; 342 } 343 344 sectors_num = spdk_nvme_ns_get_num_sectors(io->io_ns); 345 io->lba_num = (io->lba_num + 1) % sectors_num; 346 if (io->state != IO_POLLER_STATE_TERMINATE_INIT) { 347 io->state = IO_POLLER_STATE_IDLE; 348 } 349 } 350 351 static void 352 write_complete(void *arg, const struct spdk_nvme_cpl *completion) 353 { 354 int rc = 0; 355 struct io_thread *io = (struct io_thread *)arg; 356 357 if (spdk_nvme_cpl_is_error(completion)) { 358 spdk_nvme_qpair_print_completion(io->io_qpair, 359 (struct spdk_nvme_cpl *)completion); 360 fprintf(stderr, "I/O write error status: %s\n", 361 spdk_nvme_cpl_get_status_string(&completion->status)); 362 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 363 pthread_kill(g_fuzz_td, SIGSEGV); 364 return; 365 } 366 rc = spdk_nvme_ns_cmd_read(io->io_ns, io->io_qpair, 367 io->read_buf, io->lba_num, 1, 368 read_complete, io, 0); 369 if (rc != 0) { 370 fprintf(stderr, "starting read I/O failed\n"); 371 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 372 pthread_kill(g_fuzz_td, SIGSEGV); 373 } 374 } 375 376 static void * 377 terminate_io_thread(void *ctx) 378 { 379 struct io_thread *io = (struct io_thread *)ctx; 380 381 spdk_nvme_ctrlr_free_io_qpair(io->io_qpair); 382 spdk_nvme_detach(io->io_ctrlr); 383 spdk_free(io->write_buf); 384 spdk_free(io->read_buf); 385 386 io->state = IO_POLLER_STATE_TERMINATE_DONE; 387 388 return NULL; 389 } 390 391 static int 392 io_poller(void *ctx) 393 { 394 int ret = 0; 395 struct io_thread *io = (struct io_thread *)ctx; 396 size_t i; 397 unsigned int seed = 0; 398 int *write_buf = (int *)io->write_buf; 399 400 switch (io->state) { 401 case IO_POLLER_STATE_IDLE: 402 break; 403 case IO_POLLER_STATE_PROCESSING: 404 spdk_nvme_qpair_process_completions(io->io_qpair, 0); 405 return SPDK_POLLER_BUSY; 406 case IO_POLLER_STATE_TERMINATE_INIT: 407 if (spdk_nvme_qpair_get_num_outstanding_reqs(io->io_qpair) > 0) { 408 spdk_nvme_qpair_process_completions(io->io_qpair, 0); 409 return SPDK_POLLER_BUSY; 410 } 411 412 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 413 ret = pthread_create(&io->term_td, NULL, terminate_io_thread, ctx); 414 if (ret != 0) { 415 abort(); 416 } 417 return SPDK_POLLER_BUSY; 418 case IO_POLLER_STATE_TERMINATE_WAIT: 419 return SPDK_POLLER_BUSY; 420 case IO_POLLER_STATE_TERMINATE_DONE: 421 spdk_poller_unregister(&io->run_poller); 422 spdk_thread_exit(spdk_get_thread()); 423 spdk_app_stop(0); 424 return SPDK_POLLER_IDLE; 425 default: 426 break; 427 } 428 429 io->state = IO_POLLER_STATE_PROCESSING; 430 431 /* Compiler should optimize the "/ sizeof(int)" into a right shift. */ 432 for (i = 0; i < io->buf_size / sizeof(int); i++) { 433 write_buf[i] = rand_r(&seed); 434 } 435 436 ret = spdk_nvme_ns_cmd_write(io->io_ns, io->io_qpair, 437 io->write_buf, io->lba_num, 1, 438 write_complete, io, 0); 439 if (ret < 0) { 440 fprintf(stderr, "starting write I/O failed\n"); 441 pthread_kill(g_fuzz_td, SIGSEGV); 442 return SPDK_POLLER_IDLE; 443 } 444 445 return SPDK_POLLER_IDLE; 446 } 447 448 static void 449 start_io_poller(void *ctx) 450 { 451 struct io_thread *io = (struct io_thread *)ctx; 452 453 io->run_poller = SPDK_POLLER_REGISTER(io_poller, ctx, 0); 454 if (io->run_poller == NULL) { 455 fprintf(stderr, "Failed to register a poller for IO.\n"); 456 spdk_app_start_shutdown(); 457 } 458 } 459 460 static void * 461 init_io(void *ctx) 462 { 463 struct spdk_nvme_transport_id trid = {}; 464 int nsid = 0; 465 466 snprintf(trid.traddr, sizeof(trid.traddr), "%s", g_io_thread.io_ctrlr_path); 467 468 trid.trtype = SPDK_NVME_TRANSPORT_VFIOUSER; 469 g_io_thread.io_ctrlr = spdk_nvme_connect(&trid, NULL, 0); 470 if (g_io_thread.io_ctrlr == NULL) { 471 fprintf(stderr, "spdk_nvme_connect() failed for transport address '%s'\n", 472 trid.traddr); 473 spdk_app_start_shutdown(); 474 return NULL; 475 } 476 477 /* Even if ASan is enabled in DPDK, leak sanitizer has problems detecting 478 * references allocated in DPDK-manage memory. This causes LSAN to report 479 * a false memory leak when the 'pqpair->stat' variable is allocated on 480 * the heap, but the only reference is stored on `qpair` that is DPDK-manage 481 * making it not visible for LSAN. */ 482 #ifdef SPDK_CONFIG_ASAN 483 __lsan_disable(); 484 #endif 485 g_io_thread.io_qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_io_thread.io_ctrlr, NULL, 0); 486 #ifdef SPDK_CONFIG_ASAN 487 __lsan_enable(); 488 #endif 489 if (g_io_thread.io_qpair == NULL) { 490 spdk_nvme_detach(g_io_thread.io_ctrlr); 491 fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair failed\n"); 492 spdk_app_start_shutdown(); 493 return NULL; 494 } 495 496 if (spdk_nvme_ctrlr_get_num_ns(g_io_thread.io_ctrlr) == 0) { 497 fprintf(stderr, "no namespaces for IO\n"); 498 spdk_app_start_shutdown(); 499 return NULL; 500 } 501 502 nsid = spdk_nvme_ctrlr_get_first_active_ns(g_io_thread.io_ctrlr); 503 g_io_thread.io_ns = spdk_nvme_ctrlr_get_ns(g_io_thread.io_ctrlr, nsid); 504 if (!g_io_thread.io_ns) { 505 fprintf(stderr, "no io_ns for IO\n"); 506 spdk_app_start_shutdown(); 507 return NULL; 508 } 509 510 g_io_thread.buf_size = spdk_nvme_ns_get_sector_size(g_io_thread.io_ns); 511 512 g_io_thread.read_buf = spdk_zmalloc(g_io_thread.buf_size, 0x1000, NULL, 513 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 514 515 g_io_thread.write_buf = spdk_zmalloc(g_io_thread.buf_size, 0x1000, NULL, 516 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 517 518 if (!g_io_thread.write_buf || !g_io_thread.read_buf) { 519 fprintf(stderr, "cannot allocated memory for io buffers\n"); 520 spdk_app_start_shutdown(); 521 return NULL; 522 } 523 524 g_io_thread.thread = spdk_thread_create("io_thread", NULL); 525 if (g_io_thread.thread == NULL) { 526 fprintf(stderr, "cannot create io thread\n"); 527 spdk_app_start_shutdown(); 528 return NULL; 529 } 530 531 spdk_thread_send_msg(g_io_thread.thread, start_io_poller, &g_io_thread); 532 533 return NULL; 534 } 535 536 static void 537 begin_fuzz(void *ctx) 538 { 539 int rc = 0; 540 541 g_reactor_td = pthread_self(); 542 543 rc = pthread_create(&g_fuzz_td, NULL, start_fuzzer, NULL); 544 if (rc != 0) { 545 spdk_app_stop(-1); 546 return; 547 } 548 549 /* posix thread is use to avoid deadlock during spdk_nvme_connect 550 * vfio-user version negotiation may block when waiting for response 551 */ 552 if (g_io_thread.io_ctrlr_path) { 553 rc = pthread_create(&g_io_thread.io_td, NULL, init_io, NULL); 554 if (rc != 0) { 555 spdk_app_start_shutdown(); 556 } 557 } 558 } 559 560 static void 561 vfio_fuzz_usage(void) 562 { 563 fprintf(stderr, " -D Path of corpus directory.\n"); 564 fprintf(stderr, " -F Path for ctrlr that should be fuzzed.\n"); 565 fprintf(stderr, " -N Name of reproduction data file.\n"); 566 fprintf(stderr, " -P Provide a prefix to use when saving artifacts.\n"); 567 fprintf(stderr, " -t Time to run fuzz tests (in seconds). Default: 10\n"); 568 fprintf(stderr, " -Y Path of addition controller to perform io.\n"); 569 fprintf(stderr, " -Z Fuzzer to run (0 to %lu)\n", NUM_FUZZERS - 1); 570 } 571 572 static int 573 vfio_fuzz_parse(int ch, char *arg) 574 { 575 long long tmp = 0; 576 577 switch (ch) { 578 case 'D': 579 g_corpus_dir = strdup(optarg); 580 if (!g_corpus_dir) { 581 fprintf(stderr, "cannot strdup: %s\n", optarg); 582 return -ENOMEM; 583 } 584 break; 585 case 'F': 586 g_ctrlr_path = strdup(optarg); 587 if (!g_ctrlr_path) { 588 fprintf(stderr, "cannot strdup: %s\n", optarg); 589 return -ENOMEM; 590 } 591 break; 592 case 'N': 593 g_repro_data = spdk_posix_file_load_from_name(optarg, &g_repro_size); 594 if (g_repro_data == NULL) { 595 fprintf(stderr, "could not load data for file %s\n", optarg); 596 return -1; 597 } 598 break; 599 case 'P': 600 g_artifact_prefix = strdup(optarg); 601 if (!g_artifact_prefix) { 602 fprintf(stderr, "cannot strdup: %s\n", optarg); 603 return -ENOMEM; 604 } 605 break; 606 case 'Y': 607 g_io_thread.io_ctrlr_path = strdup(optarg); 608 if (!g_io_thread.io_ctrlr_path) { 609 fprintf(stderr, "cannot strdup: %s\n", optarg); 610 return -ENOMEM; 611 } 612 break; 613 case 't': 614 case 'Z': 615 tmp = spdk_strtoll(optarg, 10); 616 if (tmp < 0 || tmp >= INT_MAX) { 617 fprintf(stderr, "Invalid value '%s' for option -%c.\n", optarg, ch); 618 return -EINVAL; 619 } 620 switch (ch) { 621 case 't': 622 g_time_in_sec = tmp; 623 break; 624 case 'Z': 625 if ((unsigned long)tmp >= NUM_FUZZERS) { 626 fprintf(stderr, "Invalid fuzz type %lld (max %lu)\n", tmp, NUM_FUZZERS - 1); 627 return -EINVAL; 628 } 629 g_fuzzer = &g_fuzzers[tmp]; 630 break; 631 } 632 break; 633 case '?': 634 default: 635 return -EINVAL; 636 } 637 return 0; 638 } 639 640 static void 641 fuzz_shutdown(void) 642 { 643 /* If the user terminates the fuzzer prematurely, it is likely due 644 * to an input hang. So raise a SIGSEGV signal which will cause the 645 * fuzzer to generate a crash file for the last input. 646 * 647 * Note that the fuzzer will always generate a crash file, even if 648 * we get our TestOneInput() function (which is called by the fuzzer) 649 * to pthread_exit(). So just doing the SIGSEGV here in all cases is 650 * simpler than trying to differentiate between hung inputs and 651 * an impatient user. 652 */ 653 spdk_app_stop(-1); 654 655 if (g_fuzz_td) { 656 fprintf(stderr, "Terminate fuzzer driver with SIGSEGV.\n"); 657 pthread_kill(g_fuzz_td, SIGSEGV); 658 } 659 } 660 661 int 662 main(int argc, char **argv) 663 { 664 struct spdk_app_opts opts = {}; 665 int rc = 0; 666 667 spdk_app_opts_init(&opts, sizeof(opts)); 668 opts.name = "vfio_fuzz"; 669 opts.shutdown_cb = fuzz_shutdown; 670 671 if ((rc = spdk_app_parse_args(argc, argv, &opts, "D:F:N:P:t:Y:Z:", NULL, vfio_fuzz_parse, 672 vfio_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) { 673 return rc; 674 } 675 676 if (!g_corpus_dir) { 677 fprintf(stderr, "Must specify corpus dir with -D option\n"); 678 return -1; 679 } 680 681 if (!g_ctrlr_path) { 682 fprintf(stderr, "Must specify ctrlr path with -F option\n"); 683 return -1; 684 } 685 686 if (!g_fuzzer) { 687 fprintf(stderr, "Must specify fuzzer with -Z option\n"); 688 return -1; 689 } 690 691 rc = spdk_app_start(&opts, begin_fuzz, NULL); 692 693 spdk_app_fini(); 694 return rc; 695 } 696