1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. All rights reserved. 3 */ 4 #include "spdk/stdinc.h" 5 #include "spdk/conf.h" 6 #include "spdk/env.h" 7 #include "spdk/event.h" 8 #include "spdk/util.h" 9 #include "spdk/string.h" 10 #include "spdk/nvme_spec.h" 11 #include "spdk/nvme.h" 12 #include "spdk/likely.h" 13 #include "spdk/file.h" 14 #include "spdk/util.h" 15 16 #include "spdk/vfio_user_pci.h" 17 #include <linux/vfio.h> 18 #include "spdk/vfio_user_spec.h" 19 20 #define VFIO_MAXIMUM_SPARSE_MMAP_REGIONS 8 21 22 typedef int (*fuzzer_fn)(const uint8_t *data, size_t size, struct vfio_device *dev); 23 struct fuzz_type { 24 fuzzer_fn fn; 25 uint32_t bytes_per_cmd; 26 }; 27 28 #define VFIO_USER_MAX_PAYLOAD_SIZE (4096) 29 static uint8_t payload[VFIO_USER_MAX_PAYLOAD_SIZE]; 30 31 static char *g_ctrlr_path; 32 static char *g_artifact_prefix; 33 static int32_t g_time_in_sec = 10; 34 static char *g_corpus_dir; 35 static uint8_t *g_repro_data; 36 static size_t g_repro_size; 37 static pthread_t g_fuzz_td; 38 static pthread_t g_reactor_td; 39 static struct fuzz_type *g_fuzzer; 40 41 enum IO_POLLER_STATE { 42 IO_POLLER_STATE_IDLE, 43 IO_POLLER_STATE_PROCESSING, 44 IO_POLLER_STATE_TERMINATE_INIT, 45 IO_POLLER_STATE_TERMINATE_WAIT, 46 IO_POLLER_STATE_TERMINATE_DONE, 47 }; 48 49 struct io_thread { 50 enum IO_POLLER_STATE state; 51 int lba_num; 52 char *write_buf; 53 char *read_buf; 54 size_t buf_size; 55 struct spdk_poller *run_poller; 56 struct spdk_thread *thread; 57 struct spdk_nvme_ctrlr *io_ctrlr; 58 pthread_t io_td; 59 pthread_t term_td; 60 struct spdk_nvme_ns *io_ns; 61 struct spdk_nvme_qpair *io_qpair; 62 char *io_ctrlr_path; 63 } g_io_thread; 64 65 static int 66 fuzz_vfio_user_version(const uint8_t *data, size_t size, struct vfio_device *dev) 67 { 68 struct vfio_user_version *version = (struct vfio_user_version *)payload; 69 70 version->major = ((uint16_t)data[0] << 8) + (uint16_t)data[1]; 71 version->minor = ((uint16_t)data[2] << 8) + (uint16_t)data[3]; 72 73 return spdk_vfio_user_dev_send_request(dev, VFIO_USER_VERSION, payload, 74 sizeof(struct vfio_user_version), 75 sizeof(payload), NULL, 0); 76 } 77 78 static int 79 fuzz_vfio_user_region_rw(const uint8_t *data, size_t size, struct vfio_device *dev) 80 { 81 uint8_t buf[4]; 82 uint64_t offset = 0; 83 84 offset = ((uint64_t)data[0] << 8) + (uint64_t)data[1]; 85 offset = (SPDK_ALIGN_FLOOR(offset, 4)) % 4096; 86 memcpy(buf, &data[2], sizeof(buf)); 87 88 /* writes to BAR0 depending on the register, therefore the return value is never checked */ 89 spdk_vfio_user_pci_bar_access(dev, VFIO_PCI_BAR0_REGION_INDEX, offset, sizeof(buf), 90 &buf, true); 91 return spdk_vfio_user_pci_bar_access(dev, VFIO_PCI_BAR0_REGION_INDEX, offset, sizeof(buf), 92 &buf, false); 93 } 94 95 static struct fuzz_type g_fuzzers[] = { 96 { .fn = fuzz_vfio_user_region_rw, .bytes_per_cmd = 6}, 97 { .fn = fuzz_vfio_user_version, .bytes_per_cmd = 4}, 98 { .fn = NULL, .bytes_per_cmd = 0} 99 }; 100 101 #define NUM_FUZZERS (SPDK_COUNTOF(g_fuzzers) - 1) 102 103 static int 104 TestOneInput(const uint8_t *data, size_t size) 105 { 106 struct vfio_device *dev = NULL; 107 char ctrlr_path[PATH_MAX]; 108 int ret = 0; 109 110 if (size < g_fuzzer->bytes_per_cmd) { 111 return -1; 112 } 113 114 snprintf(ctrlr_path, sizeof(ctrlr_path), "%s/cntrl", g_ctrlr_path); 115 ret = access(ctrlr_path, F_OK); 116 if (ret != 0) { 117 fprintf(stderr, "Access path %s failed\n", ctrlr_path); 118 spdk_app_stop(-1); 119 return -1; 120 } 121 122 dev = spdk_vfio_user_setup(ctrlr_path); 123 if (dev == NULL) { 124 fprintf(stderr, "spdk_vfio_user_setup() failed for controller path '%s'\n", 125 ctrlr_path); 126 spdk_app_stop(-1); 127 return -1; 128 } 129 130 /* run cmds here */ 131 if (g_fuzzer->fn != NULL) { 132 g_fuzzer->fn(data, size, dev); 133 } 134 135 spdk_vfio_user_release(dev); 136 return 0; 137 } 138 139 int LLVMFuzzerRunDriver(int *argc, char ***argv, int (*UserCb)(const uint8_t *Data, size_t Size)); 140 141 static void 142 io_terminate(void *ctx) 143 { 144 ((struct io_thread *)ctx)->state = IO_POLLER_STATE_TERMINATE_INIT; 145 } 146 147 static void 148 exit_handler(void) 149 { 150 if (g_io_thread.io_ctrlr_path && g_io_thread.thread) { 151 spdk_thread_send_msg(g_io_thread.thread, io_terminate, &g_io_thread); 152 153 } else { 154 spdk_app_stop(0); 155 } 156 157 pthread_join(g_reactor_td, NULL); 158 } 159 160 static void * 161 start_fuzzer(void *ctx) 162 { 163 char *_argv[] = { 164 "spdk", 165 "-len_control=0", 166 "-detect_leaks=1", 167 NULL, 168 NULL, 169 NULL, 170 NULL 171 }; 172 char time_str[128]; 173 char prefix[PATH_MAX]; 174 char len_str[128]; 175 char **argv = _argv; 176 int argc = SPDK_COUNTOF(_argv); 177 uint32_t len = 0; 178 179 spdk_unaffinitize_thread(); 180 snprintf(prefix, sizeof(prefix), "-artifact_prefix=%s", g_artifact_prefix); 181 argv[argc - 4] = prefix; 182 len = 10 * g_fuzzer->bytes_per_cmd; 183 snprintf(len_str, sizeof(len_str), "-max_len=%d", len); 184 argv[argc - 3] = len_str; 185 snprintf(time_str, sizeof(time_str), "-max_total_time=%d", g_time_in_sec); 186 argv[argc - 2] = time_str; 187 argv[argc - 1] = g_corpus_dir; 188 189 atexit(exit_handler); 190 191 free(g_artifact_prefix); 192 193 if (g_repro_data) { 194 printf("Running single test based on reproduction data file.\n"); 195 TestOneInput(g_repro_data, g_repro_size); 196 printf("Done.\n"); 197 } else { 198 LLVMFuzzerRunDriver(&argc, &argv, TestOneInput); 199 /* TODO: in the normal case, LLVMFuzzerRunDriver never returns - it calls exit() 200 * directly and we never get here. But this behavior isn't really documented 201 * anywhere by LLVM. 202 */ 203 } 204 205 return NULL; 206 } 207 208 static void 209 read_complete(void *arg, const struct spdk_nvme_cpl *completion) 210 { 211 int sectors_num = 0; 212 struct io_thread *io = (struct io_thread *)arg; 213 214 if (spdk_nvme_cpl_is_error(completion)) { 215 spdk_nvme_qpair_print_completion(io->io_qpair, (struct spdk_nvme_cpl *)completion); 216 fprintf(stderr, "I/O read error status: %s\n", 217 spdk_nvme_cpl_get_status_string(&completion->status)); 218 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 219 pthread_kill(g_fuzz_td, SIGSEGV); 220 return; 221 } 222 223 if (memcmp(io->read_buf, io->write_buf, io->buf_size)) { 224 fprintf(stderr, "I/O corrupt, value not the same\n"); 225 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 226 pthread_kill(g_fuzz_td, SIGSEGV); 227 return; 228 } 229 230 sectors_num = spdk_nvme_ns_get_num_sectors(io->io_ns); 231 io->lba_num = (io->lba_num + 1) % sectors_num; 232 if (io->state != IO_POLLER_STATE_TERMINATE_INIT) { 233 io->state = IO_POLLER_STATE_IDLE; 234 } 235 } 236 237 static void 238 write_complete(void *arg, const struct spdk_nvme_cpl *completion) 239 { 240 int rc = 0; 241 struct io_thread *io = (struct io_thread *)arg; 242 243 if (spdk_nvme_cpl_is_error(completion)) { 244 spdk_nvme_qpair_print_completion(io->io_qpair, 245 (struct spdk_nvme_cpl *)completion); 246 fprintf(stderr, "I/O write error status: %s\n", 247 spdk_nvme_cpl_get_status_string(&completion->status)); 248 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 249 pthread_kill(g_fuzz_td, SIGSEGV); 250 return; 251 } 252 rc = spdk_nvme_ns_cmd_read(io->io_ns, io->io_qpair, 253 io->read_buf, io->lba_num, 1, 254 read_complete, io, 0); 255 if (rc != 0) { 256 fprintf(stderr, "starting read I/O failed\n"); 257 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 258 pthread_kill(g_fuzz_td, SIGSEGV); 259 } 260 } 261 262 static void * 263 terminate_io_thread(void *ctx) 264 { 265 struct io_thread *io = (struct io_thread *)ctx; 266 267 spdk_nvme_ctrlr_free_io_qpair(io->io_qpair); 268 spdk_nvme_detach(io->io_ctrlr); 269 spdk_free(io->write_buf); 270 spdk_free(io->read_buf); 271 272 io->state = IO_POLLER_STATE_TERMINATE_DONE; 273 274 return NULL; 275 } 276 277 static int 278 io_poller(void *ctx) 279 { 280 int ret = 0; 281 struct io_thread *io = (struct io_thread *)ctx; 282 size_t i; 283 unsigned int seed = 0; 284 int *write_buf = (int *)io->write_buf; 285 286 switch (io->state) { 287 case IO_POLLER_STATE_IDLE: 288 break; 289 case IO_POLLER_STATE_PROCESSING: 290 spdk_nvme_qpair_process_completions(io->io_qpair, 0); 291 return SPDK_POLLER_BUSY; 292 case IO_POLLER_STATE_TERMINATE_INIT: 293 if (spdk_nvme_qpair_get_num_outstanding_reqs(io->io_qpair) > 0) { 294 spdk_nvme_qpair_process_completions(io->io_qpair, 0); 295 return SPDK_POLLER_BUSY; 296 } 297 298 io->state = IO_POLLER_STATE_TERMINATE_WAIT; 299 ret = pthread_create(&io->term_td, NULL, terminate_io_thread, ctx); 300 if (ret != 0) { 301 abort(); 302 } 303 return SPDK_POLLER_BUSY; 304 case IO_POLLER_STATE_TERMINATE_WAIT: 305 return SPDK_POLLER_BUSY; 306 case IO_POLLER_STATE_TERMINATE_DONE: 307 spdk_poller_unregister(&io->run_poller); 308 spdk_thread_exit(spdk_get_thread()); 309 spdk_app_stop(0); 310 return SPDK_POLLER_IDLE; 311 default: 312 break; 313 } 314 315 io->state = IO_POLLER_STATE_PROCESSING; 316 317 /* Compiler should optimize the "/ sizeof(int)" into a right shift. */ 318 for (i = 0; i < io->buf_size / sizeof(int); i++) { 319 write_buf[i] = rand_r(&seed); 320 } 321 322 ret = spdk_nvme_ns_cmd_write(io->io_ns, io->io_qpair, 323 io->write_buf, io->lba_num, 1, 324 write_complete, io, 0); 325 if (ret < 0) { 326 fprintf(stderr, "starting write I/O failed\n"); 327 pthread_kill(g_fuzz_td, SIGSEGV); 328 return SPDK_POLLER_IDLE; 329 } 330 331 return SPDK_POLLER_IDLE; 332 } 333 334 static void 335 start_io_poller(void *ctx) 336 { 337 struct io_thread *io = (struct io_thread *)ctx; 338 339 io->run_poller = SPDK_POLLER_REGISTER(io_poller, ctx, 0); 340 if (io->run_poller == NULL) { 341 fprintf(stderr, "Failed to register a poller for IO.\n"); 342 spdk_app_stop(-1); 343 pthread_kill(g_fuzz_td, SIGSEGV); 344 } 345 } 346 347 static void * 348 init_io(void *ctx) 349 { 350 struct spdk_nvme_transport_id trid = {}; 351 int nsid = 0; 352 353 snprintf(trid.traddr, sizeof(trid.traddr), "%s", g_io_thread.io_ctrlr_path); 354 355 trid.trtype = SPDK_NVME_TRANSPORT_VFIOUSER; 356 g_io_thread.io_ctrlr = spdk_nvme_connect(&trid, NULL, 0); 357 if (g_io_thread.io_ctrlr == NULL) { 358 fprintf(stderr, "spdk_nvme_connect() failed for transport address '%s'\n", 359 trid.traddr); 360 spdk_app_stop(-1); 361 pthread_kill(g_fuzz_td, SIGSEGV); 362 return NULL; 363 } 364 365 g_io_thread.io_qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_io_thread.io_ctrlr, NULL, 0); 366 if (g_io_thread.io_qpair == NULL) { 367 spdk_nvme_detach(g_io_thread.io_ctrlr); 368 fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair failed\n"); 369 spdk_app_stop(-1); 370 pthread_kill(g_fuzz_td, SIGSEGV); 371 return NULL; 372 } 373 374 if (spdk_nvme_ctrlr_get_num_ns(g_io_thread.io_ctrlr) == 0) { 375 fprintf(stderr, "no namespaces for IO\n"); 376 spdk_app_stop(-1); 377 pthread_kill(g_fuzz_td, SIGSEGV); 378 return NULL; 379 } 380 381 nsid = spdk_nvme_ctrlr_get_first_active_ns(g_io_thread.io_ctrlr); 382 g_io_thread.io_ns = spdk_nvme_ctrlr_get_ns(g_io_thread.io_ctrlr, nsid); 383 if (!g_io_thread.io_ns) { 384 fprintf(stderr, "no io_ns for IO\n"); 385 spdk_app_stop(-1); 386 pthread_kill(g_fuzz_td, SIGSEGV); 387 return NULL; 388 } 389 390 g_io_thread.buf_size = spdk_nvme_ns_get_sector_size(g_io_thread.io_ns); 391 392 g_io_thread.read_buf = spdk_zmalloc(g_io_thread.buf_size, 0x1000, NULL, 393 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 394 395 g_io_thread.write_buf = spdk_zmalloc(g_io_thread.buf_size, 0x1000, NULL, 396 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 397 398 if (!g_io_thread.write_buf || !g_io_thread.read_buf) { 399 fprintf(stderr, "cannot allocated memory for io buffers\n"); 400 spdk_app_stop(-1); 401 pthread_kill(g_fuzz_td, SIGSEGV); 402 return NULL; 403 } 404 405 g_io_thread.thread = spdk_thread_create("io_thread", NULL); 406 if (g_io_thread.thread == NULL) { 407 fprintf(stderr, "cannot create io thread\n"); 408 spdk_app_stop(-1); 409 pthread_kill(g_fuzz_td, SIGSEGV); 410 return NULL; 411 } 412 413 spdk_thread_send_msg(g_io_thread.thread, start_io_poller, &g_io_thread); 414 415 return NULL; 416 } 417 418 static void 419 begin_fuzz(void *ctx) 420 { 421 int rc = 0; 422 423 g_reactor_td = pthread_self(); 424 425 rc = pthread_create(&g_fuzz_td, NULL, start_fuzzer, NULL); 426 if (rc != 0) { 427 spdk_app_stop(-1); 428 return; 429 } 430 431 /* posix thread is use to avoid deadlock during spdk_nvme_connect 432 * vfio-user version negotiation may block when waiting for response 433 */ 434 if (g_io_thread.io_ctrlr_path) { 435 rc = pthread_create(&g_io_thread.io_td, NULL, init_io, NULL); 436 if (rc != 0) { 437 spdk_app_stop(-1); 438 pthread_kill(g_fuzz_td, SIGSEGV); 439 } 440 } 441 } 442 443 static void 444 vfio_fuzz_usage(void) 445 { 446 fprintf(stderr, " -D Path of corpus directory.\n"); 447 fprintf(stderr, " -F Path for ctrlr that should be fuzzed.\n"); 448 fprintf(stderr, " -N Name of reproduction data file.\n"); 449 fprintf(stderr, " -P Provide a prefix to use when saving artifacts.\n"); 450 fprintf(stderr, " -t Time to run fuzz tests (in seconds). Default: 10\n"); 451 fprintf(stderr, " -Y Path of addition controller to perform io.\n"); 452 fprintf(stderr, " -Z Fuzzer to run (0 to %lu)\n", NUM_FUZZERS - 1); 453 } 454 455 static int 456 vfio_fuzz_parse(int ch, char *arg) 457 { 458 long long tmp = 0; 459 FILE *repro_file = NULL; 460 461 switch (ch) { 462 case 'D': 463 g_corpus_dir = strdup(optarg); 464 if (!g_corpus_dir) { 465 fprintf(stderr, "cannot strdup: %s\n", optarg); 466 return -ENOMEM; 467 } 468 break; 469 case 'F': 470 g_ctrlr_path = strdup(optarg); 471 if (!g_ctrlr_path) { 472 fprintf(stderr, "cannot strdup: %s\n", optarg); 473 return -ENOMEM; 474 } 475 break; 476 case 'N': 477 repro_file = fopen(optarg, "r"); 478 if (repro_file == NULL) { 479 fprintf(stderr, "could not open %s: %s\n", optarg, spdk_strerror(errno)); 480 return -1; 481 } 482 g_repro_data = spdk_posix_file_load(repro_file, &g_repro_size); 483 if (g_repro_data == NULL) { 484 fprintf(stderr, "could not load data for file %s\n", optarg); 485 return -1; 486 } 487 break; 488 case 'P': 489 g_artifact_prefix = strdup(optarg); 490 if (!g_artifact_prefix) { 491 fprintf(stderr, "cannot strdup: %s\n", optarg); 492 return -ENOMEM; 493 } 494 break; 495 case 'Y': 496 g_io_thread.io_ctrlr_path = strdup(optarg); 497 if (!g_io_thread.io_ctrlr_path) { 498 fprintf(stderr, "cannot strdup: %s\n", optarg); 499 return -ENOMEM; 500 } 501 break; 502 case 't': 503 case 'Z': 504 tmp = spdk_strtoll(optarg, 10); 505 if (tmp < 0 || tmp >= INT_MAX) { 506 fprintf(stderr, "Invalid value '%s' for option -%c.\n", optarg, ch); 507 return -EINVAL; 508 } 509 switch (ch) { 510 case 't': 511 g_time_in_sec = tmp; 512 break; 513 case 'Z': 514 if ((unsigned long)tmp >= NUM_FUZZERS) { 515 fprintf(stderr, "Invalid fuzz type %lld (max %lu)\n", tmp, NUM_FUZZERS - 1); 516 return -EINVAL; 517 } 518 g_fuzzer = &g_fuzzers[tmp]; 519 break; 520 } 521 break; 522 case '?': 523 default: 524 return -EINVAL; 525 } 526 return 0; 527 } 528 529 static void 530 fuzz_shutdown(void) 531 { 532 /* If the user terminates the fuzzer prematurely, it is likely due 533 * to an input hang. So raise a SIGSEGV signal which will cause the 534 * fuzzer to generate a crash file for the last input. 535 * 536 * Note that the fuzzer will always generate a crash file, even if 537 * we get our TestOneInput() function (which is called by the fuzzer) 538 * to pthread_exit(). So just doing the SIGSEGV here in all cases is 539 * simpler than trying to differentiate between hung inputs and 540 * an impatient user. 541 */ 542 pthread_kill(g_fuzz_td, SIGSEGV); 543 } 544 545 int 546 main(int argc, char **argv) 547 { 548 struct spdk_app_opts opts = {}; 549 int rc = 0; 550 551 spdk_app_opts_init(&opts, sizeof(opts)); 552 opts.name = "vfio_fuzz"; 553 opts.shutdown_cb = fuzz_shutdown; 554 555 if ((rc = spdk_app_parse_args(argc, argv, &opts, "D:F:N:P:t:Y:Z:", NULL, vfio_fuzz_parse, 556 vfio_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) { 557 return rc; 558 } 559 560 if (!g_corpus_dir) { 561 fprintf(stderr, "Must specify corpus dir with -D option\n"); 562 return -1; 563 } 564 565 if (!g_ctrlr_path) { 566 fprintf(stderr, "Must specify ctrlr path with -F option\n"); 567 return -1; 568 } 569 570 if (!g_fuzzer) { 571 fprintf(stderr, "Must specify fuzzer with -Z option\n"); 572 return -1; 573 } 574 575 rc = spdk_app_start(&opts, begin_fuzz, NULL); 576 577 spdk_app_fini(); 578 return rc; 579 } 580