1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/conf.h" 36 #include "spdk/env.h" 37 #include "spdk/event.h" 38 #include "spdk/util.h" 39 #include "spdk/string.h" 40 #include "spdk/nvme_spec.h" 41 #include "spdk/nvme.h" 42 #include "spdk/likely.h" 43 #include "spdk/json.h" 44 #include "fuzz_common.h" 45 46 #define UNIQUE_OPCODES 256 47 48 const char g_nvme_cmd_json_name[] = "struct spdk_nvme_cmd"; 49 char *g_conf_file; 50 char *g_json_file = NULL; 51 uint64_t g_runtime_ticks; 52 unsigned int g_seed_value = 0; 53 int g_runtime; 54 55 int g_num_active_threads = 0; 56 uint32_t g_admin_depth = 16; 57 uint32_t g_io_depth = 128; 58 59 bool g_valid_ns_only = false; 60 bool g_verbose_mode = false; 61 bool g_run_admin_commands = false; 62 bool g_run; 63 64 struct spdk_poller *g_app_completion_poller; 65 bool g_successful_io_opcodes[UNIQUE_OPCODES] = {0}; 66 bool g_successful_admin_opcodes[UNIQUE_OPCODES] = {0}; 67 68 struct spdk_nvme_cmd *g_cmd_array; 69 size_t g_cmd_array_size; 70 71 /* I need context objects here because I need to keep track of all I/O that are in flight. */ 72 struct nvme_fuzz_request { 73 struct spdk_nvme_cmd cmd; 74 struct nvme_fuzz_qp *qp; 75 TAILQ_ENTRY(nvme_fuzz_request) link; 76 }; 77 78 struct nvme_fuzz_trid { 79 struct spdk_nvme_transport_id trid; 80 TAILQ_ENTRY(nvme_fuzz_trid) tailq; 81 }; 82 83 struct nvme_fuzz_ctrlr { 84 struct spdk_nvme_ctrlr *ctrlr; 85 TAILQ_ENTRY(nvme_fuzz_ctrlr) tailq; 86 }; 87 88 struct nvme_fuzz_qp { 89 struct spdk_nvme_qpair *qpair; 90 /* array of context objects equal in length to the queue depth */ 91 struct nvme_fuzz_request *req_ctx; 92 TAILQ_HEAD(, nvme_fuzz_request) free_ctx_objs; 93 TAILQ_HEAD(, nvme_fuzz_request) outstanding_ctx_objs; 94 unsigned int random_seed; 95 uint64_t completed_cmd_counter; 96 uint64_t submitted_cmd_counter; 97 uint64_t successful_completed_cmd_counter; 98 uint64_t timeout_tsc; 99 uint32_t num_cmds_outstanding; 100 bool timed_out; 101 bool is_admin; 102 }; 103 104 struct nvme_fuzz_ns { 105 struct spdk_nvme_ns *ns; 106 struct spdk_nvme_ctrlr *ctrlr; 107 struct spdk_thread *thread; 108 struct spdk_poller *req_poller; 109 struct nvme_fuzz_qp io_qp; 110 struct nvme_fuzz_qp a_qp; 111 uint32_t nsid; 112 TAILQ_ENTRY(nvme_fuzz_ns) tailq; 113 }; 114 115 static TAILQ_HEAD(, nvme_fuzz_ns) g_ns_list = TAILQ_HEAD_INITIALIZER(g_ns_list); 116 static TAILQ_HEAD(, nvme_fuzz_ctrlr) g_ctrlr_list = TAILQ_HEAD_INITIALIZER(g_ctrlr_list); 117 static TAILQ_HEAD(, nvme_fuzz_trid) g_trid_list = TAILQ_HEAD_INITIALIZER(g_trid_list); 118 119 static bool 120 parse_nvme_cmd_obj(void *item, struct spdk_json_val *value, size_t num_values) 121 { 122 struct spdk_nvme_cmd *cmd = item; 123 struct spdk_json_val *next_val; 124 uint64_t tmp_val; 125 size_t i = 0; 126 127 while (i < num_values) { 128 if (value->type == SPDK_JSON_VAL_NAME) { 129 next_val = value + 1; 130 if (!strncmp(value->start, "opc", value->len)) { 131 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 132 if (fuzz_parse_json_num(next_val, UNSIGNED_8BIT_MAX, &tmp_val)) { 133 goto invalid; 134 } 135 cmd->opc = tmp_val; 136 } 137 } else if (!strncmp(value->start, "fuse", value->len)) { 138 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 139 if (fuzz_parse_json_num(next_val, UNSIGNED_2BIT_MAX, &tmp_val)) { 140 goto invalid; 141 } 142 cmd->fuse = tmp_val; 143 } 144 } else if (!strncmp(value->start, "rsvd1", value->len)) { 145 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 146 if (fuzz_parse_json_num(next_val, UNSIGNED_4BIT_MAX, &tmp_val)) { 147 goto invalid; 148 } 149 cmd->rsvd1 = tmp_val; 150 } 151 } else if (!strncmp(value->start, "psdt", value->len)) { 152 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 153 if (fuzz_parse_json_num(next_val, UNSIGNED_2BIT_MAX, &tmp_val)) { 154 goto invalid; 155 } 156 cmd->psdt = tmp_val; 157 } 158 } else if (!strncmp(value->start, "cid", value->len)) { 159 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 160 if (fuzz_parse_json_num(next_val, UINT16_MAX, &tmp_val)) { 161 goto invalid; 162 } 163 cmd->cid = tmp_val; 164 } 165 } else if (!strncmp(value->start, "nsid", value->len)) { 166 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 167 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 168 goto invalid; 169 } 170 cmd->nsid = tmp_val; 171 } 172 } else if (!strncmp(value->start, "rsvd2", value->len)) { 173 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 174 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 175 goto invalid; 176 } 177 cmd->rsvd2 = tmp_val; 178 } 179 } else if (!strncmp(value->start, "rsvd3", value->len)) { 180 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 181 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 182 goto invalid; 183 } 184 cmd->rsvd3 = tmp_val; 185 } 186 } else if (!strncmp(value->start, "mptr", value->len)) { 187 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 188 if (fuzz_parse_json_num(next_val, UINT64_MAX, &tmp_val)) { 189 goto invalid; 190 } 191 cmd->mptr = tmp_val; 192 } 193 } else if (!strncmp(value->start, "dptr", value->len)) { 194 if (next_val->type == SPDK_JSON_VAL_STRING) { 195 if (fuzz_get_base_64_buffer_value(&cmd->dptr, sizeof(cmd->dptr), (char *)next_val->start, 196 next_val->len)) { 197 goto invalid; 198 } 199 } 200 } else if (!strncmp(value->start, "cdw10", value->len)) { 201 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 202 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 203 goto invalid; 204 } 205 cmd->cdw10 = tmp_val; 206 } 207 } else if (!strncmp(value->start, "cdw11", value->len)) { 208 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 209 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 210 goto invalid; 211 } 212 cmd->cdw11 = tmp_val; 213 } 214 } else if (!strncmp(value->start, "cdw12", value->len)) { 215 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 216 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 217 goto invalid; 218 } 219 cmd->cdw12 = tmp_val; 220 } 221 } else if (!strncmp(value->start, "cdw13", value->len)) { 222 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 223 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 224 goto invalid; 225 } 226 cmd->cdw13 = tmp_val; 227 } 228 } else if (!strncmp(value->start, "cdw14", value->len)) { 229 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 230 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 231 goto invalid; 232 } 233 cmd->cdw14 = tmp_val; 234 } 235 } else if (!strncmp(value->start, "cdw15", value->len)) { 236 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 237 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 238 goto invalid; 239 } 240 cmd->cdw15 = tmp_val; 241 } 242 } 243 } 244 i++; 245 value++; 246 } 247 return true; 248 249 invalid: 250 fprintf(stderr, "Invalid value supplied for cmd->%.*s: %.*s\n", value->len, (char *)value->start, 251 next_val->len, (char *)next_val->start); 252 return false; 253 } 254 255 static void 256 report_successful_opcodes(bool *array, int length) 257 { 258 int i; 259 260 for (i = 0; i < length; i++) { 261 if (array[i] == true) { 262 printf("%d, ", i); 263 } 264 } 265 printf("\n"); 266 } 267 268 static int 269 print_nvme_cmd(void *cb_ctx, const void *data, size_t size) 270 { 271 fprintf(stderr, "%s\n", (const char *)data); 272 return 0; 273 } 274 275 static void 276 json_dump_nvme_cmd(struct spdk_nvme_cmd *cmd) 277 { 278 struct spdk_json_write_ctx *w; 279 char *dptr_value; 280 281 dptr_value = fuzz_get_value_base_64_buffer(&cmd->dptr, sizeof(cmd->dptr)); 282 if (dptr_value == NULL) { 283 fprintf(stderr, "Unable to allocate buffer context for printing command.\n"); 284 return; 285 } 286 287 w = spdk_json_write_begin(print_nvme_cmd, cmd, SPDK_JSON_WRITE_FLAG_FORMATTED); 288 if (w == NULL) { 289 fprintf(stderr, "Unable to allocate json context for printing command.\n"); 290 free(dptr_value); 291 return; 292 } 293 294 spdk_json_write_named_object_begin(w, g_nvme_cmd_json_name); 295 spdk_json_write_named_uint32(w, "opc", cmd->opc); 296 spdk_json_write_named_uint32(w, "fuse", cmd->fuse); 297 spdk_json_write_named_uint32(w, "rsvd1", cmd->rsvd1); 298 spdk_json_write_named_uint32(w, "psdt", cmd->psdt); 299 spdk_json_write_named_uint32(w, "cid", cmd->cid); 300 spdk_json_write_named_uint32(w, "nsid", cmd->nsid); 301 spdk_json_write_named_uint32(w, "rsvd2", cmd->rsvd2); 302 spdk_json_write_named_uint32(w, "rsvd3", cmd->rsvd3); 303 spdk_json_write_named_uint32(w, "mptr", cmd->mptr); 304 spdk_json_write_named_string(w, "dptr", dptr_value); 305 spdk_json_write_named_uint32(w, "cdw10", cmd->cdw10); 306 spdk_json_write_named_uint32(w, "cdw11", cmd->cdw11); 307 spdk_json_write_named_uint32(w, "cdw12", cmd->cdw12); 308 spdk_json_write_named_uint32(w, "cdw13", cmd->cdw13); 309 spdk_json_write_named_uint32(w, "cdw14", cmd->cdw14); 310 spdk_json_write_named_uint32(w, "cdw15", cmd->cdw15); 311 spdk_json_write_object_end(w); 312 313 free(dptr_value); 314 spdk_json_write_end(w); 315 } 316 317 static void 318 json_dump_nvme_cmd_list(struct nvme_fuzz_qp *qp) 319 { 320 struct nvme_fuzz_request *ctx; 321 322 TAILQ_FOREACH(ctx, &qp->outstanding_ctx_objs, link) { 323 json_dump_nvme_cmd(&ctx->cmd); 324 } 325 } 326 327 static void 328 handle_timeout(struct nvme_fuzz_qp *qp, bool is_admin) 329 { 330 fprintf(stderr, "An %s queue has timed out. Dumping all outstanding commands from that queue\n", 331 is_admin ? "Admin" : "I/O"); 332 json_dump_nvme_cmd_list(qp); 333 qp->timed_out = true; 334 } 335 336 static void submit_ns_cmds(struct nvme_fuzz_ns *ns_entry); 337 338 static void 339 nvme_fuzz_cpl_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl) 340 { 341 struct nvme_fuzz_request *ctx = cb_arg; 342 struct nvme_fuzz_qp *qp = ctx->qp; 343 344 qp->completed_cmd_counter++; 345 if (spdk_unlikely(cpl->status.sc == SPDK_NVME_SC_SUCCESS)) { 346 fprintf(stderr, "The following %s command (command num %lu) completed successfully\n", 347 qp->is_admin ? "Admin" : "I/O", qp->completed_cmd_counter); 348 qp->successful_completed_cmd_counter++; 349 json_dump_nvme_cmd(&ctx->cmd); 350 351 if (qp->is_admin) { 352 __sync_bool_compare_and_swap(&g_successful_admin_opcodes[ctx->cmd.opc], false, true); 353 } else { 354 __sync_bool_compare_and_swap(&g_successful_io_opcodes[ctx->cmd.opc], false, true); 355 } 356 } else if (g_verbose_mode == true) { 357 fprintf(stderr, "The following %s command (command num %lu) failed as expected.\n", 358 qp->is_admin ? "Admin" : "I/O", qp->completed_cmd_counter); 359 json_dump_nvme_cmd(&ctx->cmd); 360 } 361 362 qp->timeout_tsc = fuzz_refresh_timeout(); 363 TAILQ_REMOVE(&qp->outstanding_ctx_objs, ctx, link); 364 TAILQ_INSERT_HEAD(&qp->free_ctx_objs, ctx, link); 365 assert(qp->num_cmds_outstanding > 0); 366 qp->num_cmds_outstanding--; 367 } 368 369 static int 370 poll_for_completions(void *arg) 371 { 372 struct nvme_fuzz_ns *ns_entry = arg; 373 uint64_t current_ticks = spdk_get_ticks(); 374 uint64_t *counter; 375 if (!ns_entry->io_qp.timed_out) { 376 spdk_nvme_qpair_process_completions(ns_entry->io_qp.qpair, 0); 377 /* SAlways have to process admin completions for the purposes of keep alive. */ 378 spdk_nvme_ctrlr_process_admin_completions(ns_entry->ctrlr); 379 } 380 381 if (g_cmd_array) { 382 if (g_run_admin_commands) { 383 counter = &ns_entry->a_qp.submitted_cmd_counter; 384 } else { 385 counter = &ns_entry->io_qp.submitted_cmd_counter; 386 } 387 388 if (*counter >= g_cmd_array_size) { 389 g_run = false; 390 } 391 } else { 392 if (current_ticks > g_runtime_ticks) { 393 g_run = false; 394 } 395 } 396 397 if (ns_entry->a_qp.timeout_tsc < current_ticks && !ns_entry->a_qp.timed_out && 398 ns_entry->a_qp.num_cmds_outstanding > 0) { 399 handle_timeout(&ns_entry->a_qp, true); 400 } 401 402 if (ns_entry->io_qp.timeout_tsc < current_ticks && !ns_entry->io_qp.timed_out && 403 ns_entry->io_qp.num_cmds_outstanding > 0) { 404 handle_timeout(&ns_entry->io_qp, false); 405 } 406 407 submit_ns_cmds(ns_entry); 408 409 if (g_run) { 410 return 0; 411 } 412 /* 413 * We either processed all I/O properly and can shut down normally, or we 414 * had a qp time out and we need to exit without reducing the values to 0. 415 */ 416 if (ns_entry->io_qp.num_cmds_outstanding == 0 && 417 ns_entry->a_qp.num_cmds_outstanding == 0) { 418 goto exit_handler; 419 } else if (ns_entry->io_qp.timed_out && (!g_run_admin_commands || ns_entry->a_qp.timed_out)) { 420 goto exit_handler; 421 } else { 422 return 0; 423 } 424 425 exit_handler: 426 spdk_poller_unregister(&ns_entry->req_poller); 427 __sync_sub_and_fetch(&g_num_active_threads, 1); 428 spdk_thread_exit(ns_entry->thread); 429 return 0; 430 } 431 432 static void 433 prep_nvme_cmd(struct nvme_fuzz_ns *ns_entry, struct nvme_fuzz_qp *qp, struct nvme_fuzz_request *ctx) 434 { 435 if (g_cmd_array) { 436 memcpy(&ctx->cmd, &g_cmd_array[qp->submitted_cmd_counter], sizeof(ctx->cmd)); 437 } else { 438 fuzz_fill_random_bytes((char *)&ctx->cmd, sizeof(ctx->cmd), &qp->random_seed); 439 440 if (g_valid_ns_only) { 441 ctx->cmd.nsid = ns_entry->nsid; 442 } 443 } 444 } 445 446 static int 447 submit_qp_cmds(struct nvme_fuzz_ns *ns, struct nvme_fuzz_qp *qp) 448 { 449 struct nvme_fuzz_request *ctx; 450 int rc; 451 452 if (qp->timed_out) { 453 return 0; 454 } 455 /* If we are reading from an array, we need to stop after the last one. */ 456 while ((qp->submitted_cmd_counter < g_cmd_array_size || g_cmd_array_size == 0) && 457 !TAILQ_EMPTY(&qp->free_ctx_objs)) { 458 ctx = TAILQ_FIRST(&qp->free_ctx_objs); 459 do { 460 prep_nvme_cmd(ns, qp, ctx); 461 } while (qp->is_admin && ctx->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST); 462 463 TAILQ_REMOVE(&qp->free_ctx_objs, ctx, link); 464 TAILQ_INSERT_HEAD(&qp->outstanding_ctx_objs, ctx, link); 465 qp->num_cmds_outstanding++; 466 qp->submitted_cmd_counter++; 467 if (qp->is_admin) { 468 rc = spdk_nvme_ctrlr_cmd_admin_raw(ns->ctrlr, &ctx->cmd, NULL, 0, nvme_fuzz_cpl_cb, ctx); 469 } else { 470 rc = spdk_nvme_ctrlr_cmd_io_raw(ns->ctrlr, qp->qpair, &ctx->cmd, NULL, 0, nvme_fuzz_cpl_cb, ctx); 471 } 472 if (rc) { 473 return rc; 474 } 475 } 476 return 0; 477 } 478 479 static void 480 submit_ns_cmds(struct nvme_fuzz_ns *ns_entry) 481 { 482 int rc; 483 484 if (!g_run) { 485 return; 486 } 487 488 if (g_run_admin_commands) { 489 rc = submit_qp_cmds(ns_entry, &ns_entry->a_qp); 490 if (rc) { 491 goto err_exit; 492 } 493 } 494 495 if (g_cmd_array == NULL || !g_run_admin_commands) { 496 rc = submit_qp_cmds(ns_entry, &ns_entry->io_qp); 497 } 498 err_exit: 499 if (rc) { 500 /* 501 * I see the prospect of having a broken qpair on one ns as interesting 502 * enough to recommend stopping the application. 503 */ 504 fprintf(stderr, "Unable to submit command with rc %d\n", rc); 505 g_run = false; 506 } 507 } 508 509 static void 510 free_namespaces(void) 511 { 512 struct nvme_fuzz_ns *ns, *tmp; 513 514 TAILQ_FOREACH_SAFE(ns, &g_ns_list, tailq, tmp) { 515 printf("NS: %p I/O qp, Total commands completed: %lu, total successful commands: %lu, random_seed: %u\n", 516 ns->ns, 517 ns->io_qp.completed_cmd_counter, ns->io_qp.successful_completed_cmd_counter, ns->io_qp.random_seed); 518 printf("NS: %p admin qp, Total commands completed: %lu, total successful commands: %lu, random_seed: %u\n", 519 ns->ns, 520 ns->a_qp.completed_cmd_counter, ns->a_qp.successful_completed_cmd_counter, ns->a_qp.random_seed); 521 522 TAILQ_REMOVE(&g_ns_list, ns, tailq); 523 if (ns->io_qp.qpair) { 524 spdk_nvme_ctrlr_free_io_qpair(ns->io_qp.qpair); 525 } 526 if (ns->io_qp.req_ctx) { 527 free(ns->io_qp.req_ctx); 528 } 529 if (ns->a_qp.req_ctx) { 530 free(ns->a_qp.req_ctx); 531 } 532 free(ns); 533 } 534 } 535 536 static void 537 free_controllers(void) 538 { 539 struct nvme_fuzz_ctrlr *ctrlr, *tmp; 540 struct spdk_nvme_detach_ctx *detach_ctx = NULL; 541 542 TAILQ_FOREACH_SAFE(ctrlr, &g_ctrlr_list, tailq, tmp) { 543 TAILQ_REMOVE(&g_ctrlr_list, ctrlr, tailq); 544 spdk_nvme_detach_async(ctrlr->ctrlr, &detach_ctx); 545 free(ctrlr); 546 } 547 548 while (detach_ctx && spdk_nvme_detach_poll_async(detach_ctx) == -EAGAIN) { 549 ; 550 } 551 } 552 553 static void 554 free_trids(void) 555 { 556 struct nvme_fuzz_trid *trid, *tmp; 557 558 TAILQ_FOREACH_SAFE(trid, &g_trid_list, tailq, tmp) { 559 TAILQ_REMOVE(&g_trid_list, trid, tailq); 560 free(trid); 561 } 562 } 563 564 static void 565 register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns, uint32_t nsid) 566 { 567 struct nvme_fuzz_ns *ns_entry; 568 569 ns_entry = calloc(1, sizeof(struct nvme_fuzz_ns)); 570 if (ns_entry == NULL) { 571 fprintf(stderr, "Unable to allocate an entry for a namespace\n"); 572 return; 573 } 574 575 ns_entry->ns = ns; 576 ns_entry->ctrlr = ctrlr; 577 ns_entry->nsid = nsid; 578 579 TAILQ_INIT(&ns_entry->io_qp.free_ctx_objs); 580 TAILQ_INIT(&ns_entry->io_qp.outstanding_ctx_objs); 581 if (g_run_admin_commands) { 582 ns_entry->a_qp.qpair = NULL; 583 TAILQ_INIT(&ns_entry->a_qp.free_ctx_objs); 584 TAILQ_INIT(&ns_entry->a_qp.outstanding_ctx_objs); 585 } 586 TAILQ_INSERT_TAIL(&g_ns_list, ns_entry, tailq); 587 } 588 589 static void 590 register_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 591 { 592 struct nvme_fuzz_ctrlr *ctrlr_entry; 593 uint32_t nsid; 594 struct spdk_nvme_ns *ns; 595 596 ctrlr_entry = calloc(1, sizeof(struct nvme_fuzz_ctrlr)); 597 if (ctrlr_entry == NULL) { 598 fprintf(stderr, "Unable to allocate an entry for a controller\n"); 599 return; 600 } 601 602 ctrlr_entry->ctrlr = ctrlr; 603 TAILQ_INSERT_TAIL(&g_ctrlr_list, ctrlr_entry, tailq); 604 605 for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); nsid != 0; 606 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) { 607 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid); 608 if (ns == NULL) { 609 continue; 610 } 611 register_ns(ctrlr, ns, nsid); 612 } 613 } 614 615 static void 616 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 617 struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts) 618 { 619 register_ctrlr(ctrlr); 620 } 621 622 static bool 623 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, struct spdk_nvme_ctrlr_opts *opts) 624 { 625 printf("Controller trtype %s\ttraddr %s\n", spdk_nvme_transport_id_trtype_str(trid->trtype), 626 trid->traddr); 627 628 return true; 629 } 630 631 static int 632 prep_qpair(struct nvme_fuzz_ns *ns, struct nvme_fuzz_qp *qp, uint32_t max_qdepth) 633 { 634 uint32_t i; 635 636 /* ensure that each qpair gets a unique random seed for maximum command dispersion. */ 637 638 if (g_seed_value != 0) { 639 qp->random_seed = g_seed_value; 640 } else { 641 /* Take the low 32 bits of spdk_get_ticks. This should be more granular than time(). */ 642 qp->random_seed = spdk_get_ticks(); 643 } 644 645 qp->timeout_tsc = fuzz_refresh_timeout(); 646 647 qp->req_ctx = calloc(max_qdepth, sizeof(struct nvme_fuzz_request)); 648 if (qp->req_ctx == NULL) { 649 fprintf(stderr, "Unable to allocate I/O contexts for I/O qpair.\n"); 650 return -1; 651 } 652 653 for (i = 0; i < max_qdepth; i++) { 654 qp->req_ctx[i].qp = qp; 655 TAILQ_INSERT_HEAD(&qp->free_ctx_objs, &qp->req_ctx[i], link); 656 } 657 658 return 0; 659 } 660 661 static int 662 prepare_qpairs(void) 663 { 664 struct spdk_nvme_io_qpair_opts opts; 665 struct nvme_fuzz_ns *ns_entry; 666 667 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) { 668 spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_entry->ctrlr, &opts, sizeof(opts)); 669 ns_entry->io_qp.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, &opts, sizeof(opts)); 670 if (ns_entry->io_qp.qpair == NULL) { 671 fprintf(stderr, "Unable to create a qpair for a namespace\n"); 672 return -1; 673 } 674 675 ns_entry->io_qp.is_admin = false; 676 if (prep_qpair(ns_entry, &ns_entry->io_qp, g_io_depth) != 0) { 677 fprintf(stderr, "Unable to allocate request contexts for I/O qpair.\n"); 678 return -1; 679 } 680 681 if (g_run_admin_commands) { 682 ns_entry->a_qp.is_admin = true; 683 if (prep_qpair(ns_entry, &ns_entry->a_qp, g_admin_depth) != 0) { 684 fprintf(stderr, "Unable to allocate request contexts for admin qpair.\n"); 685 return -1; 686 } 687 } 688 } 689 return 0; 690 } 691 692 static void 693 start_ns_poller(void *ctx) 694 { 695 struct nvme_fuzz_ns *ns_entry = ctx; 696 697 ns_entry->req_poller = SPDK_POLLER_REGISTER(poll_for_completions, ns_entry, 0); 698 submit_ns_cmds(ns_entry); 699 } 700 701 static int 702 check_app_completion(void *ctx) 703 { 704 705 if (g_num_active_threads <= 0) { 706 spdk_poller_unregister(&g_app_completion_poller); 707 if (g_cmd_array) { 708 free(g_cmd_array); 709 } 710 printf("Fuzzing completed. Shutting down the fuzz application\n\n"); 711 printf("Dumping successful admin opcodes:\n"); 712 report_successful_opcodes(g_successful_admin_opcodes, UNIQUE_OPCODES); 713 printf("Dumping successful io opcodes:\n"); 714 report_successful_opcodes(g_successful_io_opcodes, UNIQUE_OPCODES); 715 free_namespaces(); 716 free_controllers(); 717 free_trids(); 718 spdk_app_stop(0); 719 } 720 return 0; 721 } 722 723 static void 724 begin_fuzz(void *ctx) 725 { 726 struct nvme_fuzz_ns *ns_entry; 727 struct nvme_fuzz_trid *trid; 728 int rc; 729 730 if (!spdk_iommu_is_enabled()) { 731 /* Don't set rc to an error code here. We don't want to fail an automated test based on this. */ 732 fprintf(stderr, "The IOMMU must be enabled to run this program to avoid unsafe memory accesses.\n"); 733 rc = 0; 734 goto out; 735 } 736 737 TAILQ_FOREACH(trid, &g_trid_list, tailq) { 738 if (spdk_nvme_probe(&trid->trid, trid, probe_cb, attach_cb, NULL) != 0) { 739 fprintf(stderr, "spdk_nvme_probe() failed for transport address '%s'\n", 740 trid->trid.traddr); 741 rc = -1; 742 goto out; 743 } 744 } 745 746 if (TAILQ_EMPTY(&g_ns_list)) { 747 fprintf(stderr, "No valid NVMe Namespaces to fuzz\n"); 748 rc = -EINVAL; 749 goto out; 750 } 751 752 rc = prepare_qpairs(); 753 754 if (rc < 0) { 755 fprintf(stderr, "Unable to prepare the qpairs\n"); 756 goto out; 757 } 758 759 g_runtime_ticks = spdk_get_ticks() + g_runtime * spdk_get_ticks_hz(); 760 761 /* Assigning all of the threads and then starting them makes cleanup easier. */ 762 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) { 763 ns_entry->thread = spdk_thread_create(NULL, NULL); 764 if (ns_entry->thread == NULL) { 765 fprintf(stderr, "Failed to allocate thread for namespace.\n"); 766 goto out; 767 } 768 } 769 770 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) { 771 spdk_thread_send_msg(ns_entry->thread, start_ns_poller, ns_entry); 772 __sync_add_and_fetch(&g_num_active_threads, 1); 773 } 774 775 g_app_completion_poller = SPDK_POLLER_REGISTER(check_app_completion, NULL, 1000000); 776 return; 777 out: 778 printf("Shutting down the fuzz application\n"); 779 free_namespaces(); 780 free_controllers(); 781 free_trids(); 782 spdk_app_stop(rc); 783 } 784 785 static int 786 parse_trids(void) 787 { 788 struct spdk_conf *config = NULL; 789 struct spdk_conf_section *sp; 790 const char *trid_char; 791 struct nvme_fuzz_trid *current_trid; 792 int num_subsystems = 0; 793 int rc = 0; 794 795 if (g_conf_file) { 796 config = spdk_conf_allocate(); 797 if (!config) { 798 fprintf(stderr, "Unable to allocate an spdk_conf object\n"); 799 return -1; 800 } 801 802 rc = spdk_conf_read(config, g_conf_file); 803 if (rc) { 804 fprintf(stderr, "Unable to convert the conf file into a readable system\n"); 805 rc = -1; 806 goto exit; 807 } 808 809 sp = spdk_conf_find_section(config, "Nvme"); 810 811 if (sp == NULL) { 812 fprintf(stderr, "No Nvme configuration in conf file\n"); 813 goto exit; 814 } 815 816 while ((trid_char = spdk_conf_section_get_nmval(sp, "TransportID", num_subsystems, 0)) != NULL) { 817 current_trid = malloc(sizeof(struct nvme_fuzz_trid)); 818 if (!current_trid) { 819 fprintf(stderr, "Unable to allocate memory for transport ID\n"); 820 rc = -1; 821 goto exit; 822 } 823 rc = spdk_nvme_transport_id_parse(¤t_trid->trid, trid_char); 824 825 if (rc < 0) { 826 fprintf(stderr, "failed to parse transport ID: %s\n", trid_char); 827 free(current_trid); 828 rc = -1; 829 goto exit; 830 } 831 TAILQ_INSERT_TAIL(&g_trid_list, current_trid, tailq); 832 num_subsystems++; 833 } 834 } 835 836 exit: 837 if (config != NULL) { 838 spdk_conf_free(config); 839 } 840 return rc; 841 } 842 843 static void 844 nvme_fuzz_usage(void) 845 { 846 fprintf(stderr, " -a Perform admin commands. if -j is specified, \ 847 only admin commands will run. Otherwise they will be run in tandem with I/O commands.\n"); 848 fprintf(stderr, " -C <path> Path to a configuration file.\n"); 849 fprintf(stderr, 850 " -j <path> Path to a json file containing named objects of type spdk_nvme_cmd. If this option is specified, -t will be ignored.\n"); 851 fprintf(stderr, " -N Target only valid namespace with commands. \ 852 This helps dig deeper into other errors besides invalid namespace.\n"); 853 fprintf(stderr, " -S <integer> Seed value for test.\n"); 854 fprintf(stderr, 855 " -t <integer> Time in seconds to run the fuzz test. Only valid if -j is not specified.\n"); 856 fprintf(stderr, " -V Enable logging of each submitted command.\n"); 857 } 858 859 static int 860 nvme_fuzz_parse(int ch, char *arg) 861 { 862 int64_t error_test; 863 864 switch (ch) { 865 case 'a': 866 g_run_admin_commands = true; 867 break; 868 case 'C': 869 g_conf_file = optarg; 870 break; 871 case 'j': 872 g_json_file = optarg; 873 break; 874 case 'N': 875 g_valid_ns_only = true; 876 break; 877 case 'S': 878 error_test = spdk_strtol(arg, 10); 879 if (error_test < 0) { 880 fprintf(stderr, "Invalid value supplied for the random seed.\n"); 881 return -1; 882 } else { 883 g_seed_value = error_test; 884 } 885 break; 886 case 't': 887 g_runtime = spdk_strtol(optarg, 10); 888 if (g_runtime < 0 || g_runtime > MAX_RUNTIME_S) { 889 fprintf(stderr, "You must supply a positive runtime value less than 86401.\n"); 890 return -1; 891 } 892 break; 893 case 'V': 894 g_verbose_mode = true; 895 break; 896 case '?': 897 default: 898 return -EINVAL; 899 } 900 return 0; 901 } 902 903 int 904 main(int argc, char **argv) 905 { 906 struct spdk_app_opts opts = {}; 907 int rc; 908 909 spdk_app_opts_init(&opts); 910 opts.name = "nvme_fuzz"; 911 912 g_runtime = DEFAULT_RUNTIME; 913 g_run = true; 914 915 if ((rc = spdk_app_parse_args(argc, argv, &opts, "aC:j:NS:t:V", NULL, nvme_fuzz_parse, 916 nvme_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) { 917 return rc; 918 } 919 920 if (g_conf_file) { 921 parse_trids(); 922 } 923 924 if (g_json_file != NULL) { 925 g_cmd_array_size = fuzz_parse_args_into_array(g_json_file, (void **)&g_cmd_array, 926 sizeof(struct spdk_nvme_cmd), g_nvme_cmd_json_name, parse_nvme_cmd_obj); 927 if (g_cmd_array_size == 0) { 928 fprintf(stderr, "The provided json file did not contain any valid commands. Exiting."); 929 return -EINVAL; 930 } 931 } 932 933 rc = spdk_app_start(&opts, begin_fuzz, NULL); 934 935 return rc; 936 } 937