1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/conf.h" 36 #include "spdk/env.h" 37 #include "spdk/event.h" 38 #include "spdk/util.h" 39 #include "spdk/string.h" 40 #include "spdk/nvme_spec.h" 41 #include "spdk/nvme.h" 42 #include "spdk/likely.h" 43 #include "spdk/json.h" 44 #include "fuzz_common.h" 45 46 #define UNIQUE_OPCODES 256 47 48 const char g_nvme_cmd_json_name[] = "struct spdk_nvme_cmd"; 49 char *g_conf_file; 50 char *g_json_file = NULL; 51 uint64_t g_runtime_ticks; 52 unsigned int g_seed_value = 0; 53 int g_runtime; 54 55 int g_num_active_threads = 0; 56 uint32_t g_admin_depth = 16; 57 uint32_t g_io_depth = 128; 58 59 bool g_valid_ns_only = false; 60 bool g_verbose_mode = false; 61 bool g_run_admin_commands = false; 62 bool g_run; 63 64 struct spdk_poller *g_app_completion_poller; 65 bool g_successful_io_opcodes[UNIQUE_OPCODES] = {0}; 66 bool g_successful_admin_opcodes[UNIQUE_OPCODES] = {0}; 67 68 struct spdk_nvme_cmd *g_cmd_array; 69 size_t g_cmd_array_size; 70 71 /* I need context objects here because I need to keep track of all I/O that are in flight. */ 72 struct nvme_fuzz_request { 73 struct spdk_nvme_cmd cmd; 74 struct nvme_fuzz_qp *qp; 75 TAILQ_ENTRY(nvme_fuzz_request) link; 76 }; 77 78 struct nvme_fuzz_trid { 79 struct spdk_nvme_transport_id trid; 80 TAILQ_ENTRY(nvme_fuzz_trid) tailq; 81 }; 82 83 struct nvme_fuzz_ctrlr { 84 struct spdk_nvme_ctrlr *ctrlr; 85 TAILQ_ENTRY(nvme_fuzz_ctrlr) tailq; 86 }; 87 88 struct nvme_fuzz_qp { 89 struct spdk_nvme_qpair *qpair; 90 /* array of context objects equal in length to the queue depth */ 91 struct nvme_fuzz_request *req_ctx; 92 TAILQ_HEAD(, nvme_fuzz_request) free_ctx_objs; 93 TAILQ_HEAD(, nvme_fuzz_request) outstanding_ctx_objs; 94 unsigned int random_seed; 95 uint64_t completed_cmd_counter; 96 uint64_t submitted_cmd_counter; 97 uint64_t successful_completed_cmd_counter; 98 uint64_t timeout_tsc; 99 uint32_t num_cmds_outstanding; 100 bool timed_out; 101 bool is_admin; 102 }; 103 104 struct nvme_fuzz_ns { 105 struct spdk_nvme_ns *ns; 106 struct spdk_nvme_ctrlr *ctrlr; 107 struct spdk_thread *thread; 108 struct spdk_poller *req_poller; 109 struct nvme_fuzz_qp io_qp; 110 struct nvme_fuzz_qp a_qp; 111 uint32_t nsid; 112 TAILQ_ENTRY(nvme_fuzz_ns) tailq; 113 }; 114 115 static TAILQ_HEAD(, nvme_fuzz_ns) g_ns_list = TAILQ_HEAD_INITIALIZER(g_ns_list); 116 static TAILQ_HEAD(, nvme_fuzz_ctrlr) g_ctrlr_list = TAILQ_HEAD_INITIALIZER(g_ctrlr_list); 117 static TAILQ_HEAD(, nvme_fuzz_trid) g_trid_list = TAILQ_HEAD_INITIALIZER(g_trid_list); 118 119 static bool 120 parse_nvme_cmd_obj(void *item, struct spdk_json_val *value, size_t num_values) 121 { 122 struct spdk_nvme_cmd *cmd = item; 123 struct spdk_json_val *next_val; 124 uint64_t tmp_val; 125 size_t i = 0; 126 127 while (i < num_values) { 128 if (value->type == SPDK_JSON_VAL_NAME) { 129 next_val = value + 1; 130 if (!strncmp(value->start, "opc", value->len)) { 131 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 132 if (fuzz_parse_json_num(next_val, UNSIGNED_8BIT_MAX, &tmp_val)) { 133 goto invalid; 134 } 135 cmd->opc = tmp_val; 136 } 137 } else if (!strncmp(value->start, "fuse", value->len)) { 138 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 139 if (fuzz_parse_json_num(next_val, UNSIGNED_2BIT_MAX, &tmp_val)) { 140 goto invalid; 141 } 142 cmd->fuse = tmp_val; 143 } 144 } else if (!strncmp(value->start, "rsvd1", value->len)) { 145 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 146 if (fuzz_parse_json_num(next_val, UNSIGNED_4BIT_MAX, &tmp_val)) { 147 goto invalid; 148 } 149 cmd->rsvd1 = tmp_val; 150 } 151 } else if (!strncmp(value->start, "psdt", value->len)) { 152 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 153 if (fuzz_parse_json_num(next_val, UNSIGNED_2BIT_MAX, &tmp_val)) { 154 goto invalid; 155 } 156 cmd->psdt = tmp_val; 157 } 158 } else if (!strncmp(value->start, "cid", value->len)) { 159 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 160 if (fuzz_parse_json_num(next_val, UINT16_MAX, &tmp_val)) { 161 goto invalid; 162 } 163 cmd->cid = tmp_val; 164 } 165 } else if (!strncmp(value->start, "nsid", value->len)) { 166 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 167 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 168 goto invalid; 169 } 170 cmd->nsid = tmp_val; 171 } 172 } else if (!strncmp(value->start, "rsvd2", value->len)) { 173 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 174 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 175 goto invalid; 176 } 177 cmd->rsvd2 = tmp_val; 178 } 179 } else if (!strncmp(value->start, "rsvd3", value->len)) { 180 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 181 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 182 goto invalid; 183 } 184 cmd->rsvd3 = tmp_val; 185 } 186 } else if (!strncmp(value->start, "mptr", value->len)) { 187 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 188 if (fuzz_parse_json_num(next_val, UINT64_MAX, &tmp_val)) { 189 goto invalid; 190 } 191 cmd->mptr = tmp_val; 192 } 193 } else if (!strncmp(value->start, "dptr", value->len)) { 194 if (next_val->type == SPDK_JSON_VAL_STRING) { 195 if (fuzz_get_base_64_buffer_value(&cmd->dptr, sizeof(cmd->dptr), (char *)next_val->start, 196 next_val->len)) { 197 goto invalid; 198 } 199 } 200 } else if (!strncmp(value->start, "cdw10", value->len)) { 201 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 202 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 203 goto invalid; 204 } 205 cmd->cdw10 = tmp_val; 206 } 207 } else if (!strncmp(value->start, "cdw11", value->len)) { 208 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 209 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 210 goto invalid; 211 } 212 cmd->cdw11 = tmp_val; 213 } 214 } else if (!strncmp(value->start, "cdw12", value->len)) { 215 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 216 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 217 goto invalid; 218 } 219 cmd->cdw12 = tmp_val; 220 } 221 } else if (!strncmp(value->start, "cdw13", value->len)) { 222 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 223 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 224 goto invalid; 225 } 226 cmd->cdw13 = tmp_val; 227 } 228 } else if (!strncmp(value->start, "cdw14", value->len)) { 229 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 230 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 231 goto invalid; 232 } 233 cmd->cdw14 = tmp_val; 234 } 235 } else if (!strncmp(value->start, "cdw15", value->len)) { 236 if (next_val->type == SPDK_JSON_VAL_NUMBER) { 237 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) { 238 goto invalid; 239 } 240 cmd->cdw15 = tmp_val; 241 } 242 } 243 } 244 i++; 245 value++; 246 } 247 return true; 248 249 invalid: 250 fprintf(stderr, "Invalid value supplied for cmd->%.*s: %.*s\n", value->len, (char *)value->start, 251 next_val->len, (char *)next_val->start); 252 return false; 253 } 254 255 static void 256 report_successful_opcodes(bool *array, int length) 257 { 258 int i; 259 260 for (i = 0; i < length; i++) { 261 if (array[i] == true) { 262 printf("%d, ", i); 263 } 264 } 265 printf("\n"); 266 } 267 268 static int 269 print_nvme_cmd(void *cb_ctx, const void *data, size_t size) 270 { 271 fprintf(stderr, "%s\n", (const char *)data); 272 return 0; 273 } 274 275 static void 276 json_dump_nvme_cmd(struct spdk_nvme_cmd *cmd) 277 { 278 struct spdk_json_write_ctx *w; 279 char *dptr_value; 280 281 dptr_value = fuzz_get_value_base_64_buffer(&cmd->dptr, sizeof(cmd->dptr)); 282 if (dptr_value == NULL) { 283 fprintf(stderr, "Unable to allocate buffer context for printing command.\n"); 284 return; 285 } 286 287 w = spdk_json_write_begin(print_nvme_cmd, cmd, SPDK_JSON_WRITE_FLAG_FORMATTED); 288 if (w == NULL) { 289 fprintf(stderr, "Unable to allocate json context for printing command.\n"); 290 free(dptr_value); 291 return; 292 } 293 294 spdk_json_write_named_object_begin(w, g_nvme_cmd_json_name); 295 spdk_json_write_named_uint32(w, "opc", cmd->opc); 296 spdk_json_write_named_uint32(w, "fuse", cmd->fuse); 297 spdk_json_write_named_uint32(w, "rsvd1", cmd->rsvd1); 298 spdk_json_write_named_uint32(w, "psdt", cmd->psdt); 299 spdk_json_write_named_uint32(w, "cid", cmd->cid); 300 spdk_json_write_named_uint32(w, "nsid", cmd->nsid); 301 spdk_json_write_named_uint32(w, "rsvd2", cmd->rsvd2); 302 spdk_json_write_named_uint32(w, "rsvd3", cmd->rsvd3); 303 spdk_json_write_named_uint32(w, "mptr", cmd->mptr); 304 spdk_json_write_named_string(w, "dptr", dptr_value); 305 spdk_json_write_named_uint32(w, "cdw10", cmd->cdw10); 306 spdk_json_write_named_uint32(w, "cdw11", cmd->cdw11); 307 spdk_json_write_named_uint32(w, "cdw12", cmd->cdw12); 308 spdk_json_write_named_uint32(w, "cdw13", cmd->cdw13); 309 spdk_json_write_named_uint32(w, "cdw14", cmd->cdw14); 310 spdk_json_write_named_uint32(w, "cdw15", cmd->cdw15); 311 spdk_json_write_object_end(w); 312 313 free(dptr_value); 314 spdk_json_write_end(w); 315 } 316 317 static void 318 json_dump_nvme_cmd_list(struct nvme_fuzz_qp *qp) 319 { 320 struct nvme_fuzz_request *ctx; 321 322 TAILQ_FOREACH(ctx, &qp->outstanding_ctx_objs, link) { 323 json_dump_nvme_cmd(&ctx->cmd); 324 } 325 } 326 327 static void 328 handle_timeout(struct nvme_fuzz_qp *qp, bool is_admin) 329 { 330 fprintf(stderr, "An %s queue has timed out. Dumping all outstanding commands from that queue\n", 331 is_admin ? "Admin" : "I/O"); 332 json_dump_nvme_cmd_list(qp); 333 qp->timed_out = true; 334 } 335 336 static void submit_ns_cmds(struct nvme_fuzz_ns *ns_entry); 337 338 static void 339 nvme_fuzz_cpl_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl) 340 { 341 struct nvme_fuzz_request *ctx = cb_arg; 342 struct nvme_fuzz_qp *qp = ctx->qp; 343 344 qp->completed_cmd_counter++; 345 if (spdk_unlikely(cpl->status.sc == SPDK_NVME_SC_SUCCESS)) { 346 fprintf(stderr, "The following %s command (command num %lu) completed successfully\n", 347 qp->is_admin ? "Admin" : "I/O", qp->completed_cmd_counter); 348 qp->successful_completed_cmd_counter++; 349 json_dump_nvme_cmd(&ctx->cmd); 350 351 if (qp->is_admin) { 352 __sync_bool_compare_and_swap(&g_successful_admin_opcodes[ctx->cmd.opc], false, true); 353 } else { 354 __sync_bool_compare_and_swap(&g_successful_io_opcodes[ctx->cmd.opc], false, true); 355 } 356 } else if (g_verbose_mode == true) { 357 fprintf(stderr, "The following %s command (command num %lu) failed as expected.\n", 358 qp->is_admin ? "Admin" : "I/O", qp->completed_cmd_counter); 359 json_dump_nvme_cmd(&ctx->cmd); 360 } 361 362 qp->timeout_tsc = fuzz_refresh_timeout(); 363 TAILQ_REMOVE(&qp->outstanding_ctx_objs, ctx, link); 364 TAILQ_INSERT_HEAD(&qp->free_ctx_objs, ctx, link); 365 assert(qp->num_cmds_outstanding > 0); 366 qp->num_cmds_outstanding--; 367 } 368 369 static int 370 poll_for_completions(void *arg) 371 { 372 struct nvme_fuzz_ns *ns_entry = arg; 373 uint64_t current_ticks = spdk_get_ticks(); 374 uint64_t *counter; 375 if (!ns_entry->io_qp.timed_out) { 376 spdk_nvme_qpair_process_completions(ns_entry->io_qp.qpair, 0); 377 /* SAlways have to process admin completions for the purposes of keep alive. */ 378 spdk_nvme_ctrlr_process_admin_completions(ns_entry->ctrlr); 379 } 380 381 if (g_cmd_array) { 382 if (g_run_admin_commands) { 383 counter = &ns_entry->a_qp.submitted_cmd_counter; 384 } else { 385 counter = &ns_entry->io_qp.submitted_cmd_counter; 386 } 387 388 if (*counter >= g_cmd_array_size) { 389 g_run = false; 390 } 391 } else { 392 if (current_ticks > g_runtime_ticks) { 393 g_run = false; 394 } 395 } 396 397 if (ns_entry->a_qp.timeout_tsc < current_ticks && !ns_entry->a_qp.timed_out && 398 ns_entry->a_qp.num_cmds_outstanding > 0) { 399 handle_timeout(&ns_entry->a_qp, true); 400 } 401 402 if (ns_entry->io_qp.timeout_tsc < current_ticks && !ns_entry->io_qp.timed_out && 403 ns_entry->io_qp.num_cmds_outstanding > 0) { 404 handle_timeout(&ns_entry->io_qp, false); 405 } 406 407 submit_ns_cmds(ns_entry); 408 409 if (g_run) { 410 return 0; 411 } 412 /* 413 * We either processed all I/O properly and can shut down normally, or we 414 * had a qp time out and we need to exit without reducing the values to 0. 415 */ 416 if (ns_entry->io_qp.num_cmds_outstanding == 0 && 417 ns_entry->a_qp.num_cmds_outstanding == 0) { 418 goto exit_handler; 419 } else if (ns_entry->io_qp.timed_out && (!g_run_admin_commands || ns_entry->a_qp.timed_out)) { 420 goto exit_handler; 421 } else { 422 return 0; 423 } 424 425 exit_handler: 426 spdk_poller_unregister(&ns_entry->req_poller); 427 __sync_sub_and_fetch(&g_num_active_threads, 1); 428 spdk_thread_exit(ns_entry->thread); 429 return 0; 430 } 431 432 static void 433 prep_nvme_cmd(struct nvme_fuzz_ns *ns_entry, struct nvme_fuzz_qp *qp, struct nvme_fuzz_request *ctx) 434 { 435 if (g_cmd_array) { 436 memcpy(&ctx->cmd, &g_cmd_array[qp->submitted_cmd_counter], sizeof(ctx->cmd)); 437 } else { 438 fuzz_fill_random_bytes((char *)&ctx->cmd, sizeof(ctx->cmd), &qp->random_seed); 439 440 if (g_valid_ns_only) { 441 ctx->cmd.nsid = ns_entry->nsid; 442 } 443 } 444 } 445 446 static int 447 submit_qp_cmds(struct nvme_fuzz_ns *ns, struct nvme_fuzz_qp *qp) 448 { 449 struct nvme_fuzz_request *ctx; 450 int rc; 451 452 if (qp->timed_out) { 453 return 0; 454 } 455 /* If we are reading from an array, we need to stop after the last one. */ 456 while ((qp->submitted_cmd_counter < g_cmd_array_size || g_cmd_array_size == 0) && 457 !TAILQ_EMPTY(&qp->free_ctx_objs)) { 458 ctx = TAILQ_FIRST(&qp->free_ctx_objs); 459 do { 460 prep_nvme_cmd(ns, qp, ctx); 461 } while (qp->is_admin && ctx->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST); 462 463 TAILQ_REMOVE(&qp->free_ctx_objs, ctx, link); 464 TAILQ_INSERT_HEAD(&qp->outstanding_ctx_objs, ctx, link); 465 qp->num_cmds_outstanding++; 466 qp->submitted_cmd_counter++; 467 if (qp->is_admin) { 468 rc = spdk_nvme_ctrlr_cmd_admin_raw(ns->ctrlr, &ctx->cmd, NULL, 0, nvme_fuzz_cpl_cb, ctx); 469 } else { 470 rc = spdk_nvme_ctrlr_cmd_io_raw(ns->ctrlr, qp->qpair, &ctx->cmd, NULL, 0, nvme_fuzz_cpl_cb, ctx); 471 } 472 if (rc) { 473 return rc; 474 } 475 } 476 return 0; 477 } 478 479 static void 480 submit_ns_cmds(struct nvme_fuzz_ns *ns_entry) 481 { 482 int rc; 483 484 if (!g_run) { 485 return; 486 } 487 488 if (g_run_admin_commands) { 489 rc = submit_qp_cmds(ns_entry, &ns_entry->a_qp); 490 if (rc) { 491 goto err_exit; 492 } 493 } 494 495 if (g_cmd_array == NULL || !g_run_admin_commands) { 496 rc = submit_qp_cmds(ns_entry, &ns_entry->io_qp); 497 } 498 err_exit: 499 if (rc) { 500 /* 501 * I see the prospect of having a broken qpair on one ns as interesting 502 * enough to recommend stopping the application. 503 */ 504 fprintf(stderr, "Unable to submit command with rc %d\n", rc); 505 g_run = false; 506 } 507 } 508 509 static void 510 free_namespaces(void) 511 { 512 struct nvme_fuzz_ns *ns, *tmp; 513 514 TAILQ_FOREACH_SAFE(ns, &g_ns_list, tailq, tmp) { 515 printf("NS: %p I/O qp, Total commands completed: %lu, total successful commands: %lu, random_seed: %u\n", 516 ns->ns, 517 ns->io_qp.completed_cmd_counter, ns->io_qp.successful_completed_cmd_counter, ns->io_qp.random_seed); 518 printf("NS: %p admin qp, Total commands completed: %lu, total successful commands: %lu, random_seed: %u\n", 519 ns->ns, 520 ns->a_qp.completed_cmd_counter, ns->a_qp.successful_completed_cmd_counter, ns->a_qp.random_seed); 521 522 TAILQ_REMOVE(&g_ns_list, ns, tailq); 523 if (ns->io_qp.qpair) { 524 spdk_nvme_ctrlr_free_io_qpair(ns->io_qp.qpair); 525 } 526 if (ns->io_qp.req_ctx) { 527 free(ns->io_qp.req_ctx); 528 } 529 if (ns->a_qp.req_ctx) { 530 free(ns->a_qp.req_ctx); 531 } 532 free(ns); 533 } 534 } 535 536 static void 537 free_controllers(void) 538 { 539 struct nvme_fuzz_ctrlr *ctrlr, *tmp; 540 541 TAILQ_FOREACH_SAFE(ctrlr, &g_ctrlr_list, tailq, tmp) { 542 TAILQ_REMOVE(&g_ctrlr_list, ctrlr, tailq); 543 spdk_nvme_detach(ctrlr->ctrlr); 544 free(ctrlr); 545 } 546 } 547 548 static void 549 free_trids(void) 550 { 551 struct nvme_fuzz_trid *trid, *tmp; 552 553 TAILQ_FOREACH_SAFE(trid, &g_trid_list, tailq, tmp) { 554 TAILQ_REMOVE(&g_trid_list, trid, tailq); 555 free(trid); 556 } 557 } 558 559 static void 560 register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns, uint32_t nsid) 561 { 562 struct nvme_fuzz_ns *ns_entry; 563 564 ns_entry = calloc(1, sizeof(struct nvme_fuzz_ns)); 565 if (ns_entry == NULL) { 566 fprintf(stderr, "Unable to allocate an entry for a namespace\n"); 567 return; 568 } 569 570 ns_entry->ns = ns; 571 ns_entry->ctrlr = ctrlr; 572 ns_entry->nsid = nsid; 573 574 TAILQ_INIT(&ns_entry->io_qp.free_ctx_objs); 575 TAILQ_INIT(&ns_entry->io_qp.outstanding_ctx_objs); 576 if (g_run_admin_commands) { 577 ns_entry->a_qp.qpair = NULL; 578 TAILQ_INIT(&ns_entry->a_qp.free_ctx_objs); 579 TAILQ_INIT(&ns_entry->a_qp.outstanding_ctx_objs); 580 } 581 TAILQ_INSERT_TAIL(&g_ns_list, ns_entry, tailq); 582 } 583 584 static void 585 register_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 586 { 587 struct nvme_fuzz_ctrlr *ctrlr_entry; 588 uint32_t nsid; 589 struct spdk_nvme_ns *ns; 590 591 ctrlr_entry = calloc(1, sizeof(struct nvme_fuzz_ctrlr)); 592 if (ctrlr_entry == NULL) { 593 fprintf(stderr, "Unable to allocate an entry for a controller\n"); 594 return; 595 } 596 597 ctrlr_entry->ctrlr = ctrlr; 598 TAILQ_INSERT_TAIL(&g_ctrlr_list, ctrlr_entry, tailq); 599 600 for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); nsid != 0; 601 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) { 602 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid); 603 if (ns == NULL) { 604 continue; 605 } 606 register_ns(ctrlr, ns, nsid); 607 } 608 } 609 610 static void 611 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 612 struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts) 613 { 614 register_ctrlr(ctrlr); 615 } 616 617 static bool 618 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, struct spdk_nvme_ctrlr_opts *opts) 619 { 620 printf("Controller trtype %s\ttraddr %s\n", spdk_nvme_transport_id_trtype_str(trid->trtype), 621 trid->traddr); 622 623 return true; 624 } 625 626 static int 627 prep_qpair(struct nvme_fuzz_ns *ns, struct nvme_fuzz_qp *qp, uint32_t max_qdepth) 628 { 629 uint32_t i; 630 631 /* ensure that each qpair gets a unique random seed for maximum command dispersion. */ 632 633 if (g_seed_value != 0) { 634 qp->random_seed = g_seed_value; 635 } else { 636 /* Take the low 32 bits of spdk_get_ticks. This should be more granular than time(). */ 637 qp->random_seed = spdk_get_ticks(); 638 } 639 640 qp->timeout_tsc = fuzz_refresh_timeout(); 641 642 qp->req_ctx = calloc(max_qdepth, sizeof(struct nvme_fuzz_request)); 643 if (qp->req_ctx == NULL) { 644 fprintf(stderr, "Unable to allocate I/O contexts for I/O qpair.\n"); 645 return -1; 646 } 647 648 for (i = 0; i < max_qdepth; i++) { 649 qp->req_ctx[i].qp = qp; 650 TAILQ_INSERT_HEAD(&qp->free_ctx_objs, &qp->req_ctx[i], link); 651 } 652 653 return 0; 654 } 655 656 static int 657 prepare_qpairs(void) 658 { 659 struct spdk_nvme_io_qpair_opts opts; 660 struct nvme_fuzz_ns *ns_entry; 661 662 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) { 663 spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_entry->ctrlr, &opts, sizeof(opts)); 664 ns_entry->io_qp.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, &opts, sizeof(opts)); 665 if (ns_entry->io_qp.qpair == NULL) { 666 fprintf(stderr, "Unable to create a qpair for a namespace\n"); 667 return -1; 668 } 669 670 ns_entry->io_qp.is_admin = false; 671 if (prep_qpair(ns_entry, &ns_entry->io_qp, g_io_depth) != 0) { 672 fprintf(stderr, "Unable to allocate request contexts for I/O qpair.\n"); 673 return -1; 674 } 675 676 if (g_run_admin_commands) { 677 ns_entry->a_qp.is_admin = true; 678 if (prep_qpair(ns_entry, &ns_entry->a_qp, g_admin_depth) != 0) { 679 fprintf(stderr, "Unable to allocate request contexts for admin qpair.\n"); 680 return -1; 681 } 682 } 683 } 684 return 0; 685 } 686 687 static void 688 start_ns_poller(void *ctx) 689 { 690 struct nvme_fuzz_ns *ns_entry = ctx; 691 692 ns_entry->req_poller = SPDK_POLLER_REGISTER(poll_for_completions, ns_entry, 0); 693 submit_ns_cmds(ns_entry); 694 } 695 696 static int 697 check_app_completion(void *ctx) 698 { 699 700 if (g_num_active_threads <= 0) { 701 spdk_poller_unregister(&g_app_completion_poller); 702 if (g_cmd_array) { 703 free(g_cmd_array); 704 } 705 printf("Fuzzing completed. Shutting down the fuzz application\n\n"); 706 printf("Dumping successful admin opcodes:\n"); 707 report_successful_opcodes(g_successful_admin_opcodes, UNIQUE_OPCODES); 708 printf("Dumping successful io opcodes:\n"); 709 report_successful_opcodes(g_successful_io_opcodes, UNIQUE_OPCODES); 710 free_namespaces(); 711 free_controllers(); 712 free_trids(); 713 spdk_app_stop(0); 714 } 715 return 0; 716 } 717 718 static void 719 begin_fuzz(void *ctx) 720 { 721 struct nvme_fuzz_ns *ns_entry; 722 struct nvme_fuzz_trid *trid; 723 int rc; 724 725 if (!spdk_iommu_is_enabled()) { 726 /* Don't set rc to an error code here. We don't want to fail an automated test based on this. */ 727 fprintf(stderr, "The IOMMU must be enabled to run this program to avoid unsafe memory accesses.\n"); 728 rc = 0; 729 goto out; 730 } 731 732 TAILQ_FOREACH(trid, &g_trid_list, tailq) { 733 if (spdk_nvme_probe(&trid->trid, trid, probe_cb, attach_cb, NULL) != 0) { 734 fprintf(stderr, "spdk_nvme_probe() failed for transport address '%s'\n", 735 trid->trid.traddr); 736 rc = -1; 737 goto out; 738 } 739 } 740 741 if (TAILQ_EMPTY(&g_ns_list)) { 742 fprintf(stderr, "No valid NVMe Namespaces to fuzz\n"); 743 rc = -EINVAL; 744 goto out; 745 } 746 747 rc = prepare_qpairs(); 748 749 if (rc < 0) { 750 fprintf(stderr, "Unable to prepare the qpairs\n"); 751 goto out; 752 } 753 754 g_runtime_ticks = spdk_get_ticks() + g_runtime * spdk_get_ticks_hz(); 755 756 /* Assigning all of the threads and then starting them makes cleanup easier. */ 757 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) { 758 ns_entry->thread = spdk_thread_create(NULL, NULL); 759 if (ns_entry->thread == NULL) { 760 fprintf(stderr, "Failed to allocate thread for namespace.\n"); 761 goto out; 762 } 763 } 764 765 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) { 766 spdk_thread_send_msg(ns_entry->thread, start_ns_poller, ns_entry); 767 __sync_add_and_fetch(&g_num_active_threads, 1); 768 } 769 770 g_app_completion_poller = SPDK_POLLER_REGISTER(check_app_completion, NULL, 1000000); 771 return; 772 out: 773 printf("Shutting down the fuzz application\n"); 774 free_namespaces(); 775 free_controllers(); 776 free_trids(); 777 spdk_app_stop(rc); 778 } 779 780 static int 781 parse_trids(void) 782 { 783 struct spdk_conf *config = NULL; 784 struct spdk_conf_section *sp; 785 const char *trid_char; 786 struct nvme_fuzz_trid *current_trid; 787 int num_subsystems = 0; 788 int rc = 0; 789 790 if (g_conf_file) { 791 config = spdk_conf_allocate(); 792 if (!config) { 793 fprintf(stderr, "Unable to allocate an spdk_conf object\n"); 794 return -1; 795 } 796 797 rc = spdk_conf_read(config, g_conf_file); 798 if (rc) { 799 fprintf(stderr, "Unable to convert the conf file into a readable system\n"); 800 rc = -1; 801 goto exit; 802 } 803 804 sp = spdk_conf_find_section(config, "Nvme"); 805 806 if (sp == NULL) { 807 fprintf(stderr, "No Nvme configuration in conf file\n"); 808 goto exit; 809 } 810 811 while ((trid_char = spdk_conf_section_get_nmval(sp, "TransportID", num_subsystems, 0)) != NULL) { 812 current_trid = malloc(sizeof(struct nvme_fuzz_trid)); 813 if (!current_trid) { 814 fprintf(stderr, "Unable to allocate memory for transport ID\n"); 815 rc = -1; 816 goto exit; 817 } 818 rc = spdk_nvme_transport_id_parse(¤t_trid->trid, trid_char); 819 820 if (rc < 0) { 821 fprintf(stderr, "failed to parse transport ID: %s\n", trid_char); 822 free(current_trid); 823 rc = -1; 824 goto exit; 825 } 826 TAILQ_INSERT_TAIL(&g_trid_list, current_trid, tailq); 827 num_subsystems++; 828 } 829 } 830 831 exit: 832 if (config != NULL) { 833 spdk_conf_free(config); 834 } 835 return rc; 836 } 837 838 static void 839 nvme_fuzz_usage(void) 840 { 841 fprintf(stderr, " -a Perform admin commands. if -j is specified, \ 842 only admin commands will run. Otherwise they will be run in tandem with I/O commands.\n"); 843 fprintf(stderr, " -C <path> Path to a configuration file.\n"); 844 fprintf(stderr, 845 " -j <path> Path to a json file containing named objects of type spdk_nvme_cmd. If this option is specified, -t will be ignored.\n"); 846 fprintf(stderr, " -N Target only valid namespace with commands. \ 847 This helps dig deeper into other errors besides invalid namespace.\n"); 848 fprintf(stderr, " -S <integer> Seed value for test.\n"); 849 fprintf(stderr, 850 " -t <integer> Time in seconds to run the fuzz test. Only valid if -j is not specified.\n"); 851 fprintf(stderr, " -V Enable logging of each submitted command.\n"); 852 } 853 854 static int 855 nvme_fuzz_parse(int ch, char *arg) 856 { 857 int64_t error_test; 858 859 switch (ch) { 860 case 'a': 861 g_run_admin_commands = true; 862 break; 863 case 'C': 864 g_conf_file = optarg; 865 break; 866 case 'j': 867 g_json_file = optarg; 868 break; 869 case 'N': 870 g_valid_ns_only = true; 871 break; 872 case 'S': 873 error_test = spdk_strtol(arg, 10); 874 if (error_test < 0) { 875 fprintf(stderr, "Invalid value supplied for the random seed.\n"); 876 return -1; 877 } else { 878 g_seed_value = error_test; 879 } 880 break; 881 case 't': 882 g_runtime = spdk_strtol(optarg, 10); 883 if (g_runtime < 0 || g_runtime > MAX_RUNTIME_S) { 884 fprintf(stderr, "You must supply a positive runtime value less than 86401.\n"); 885 return -1; 886 } 887 break; 888 case 'V': 889 g_verbose_mode = true; 890 break; 891 case '?': 892 default: 893 return -EINVAL; 894 } 895 return 0; 896 } 897 898 int 899 main(int argc, char **argv) 900 { 901 struct spdk_app_opts opts = {}; 902 int rc; 903 904 spdk_app_opts_init(&opts); 905 opts.name = "nvme_fuzz"; 906 907 g_runtime = DEFAULT_RUNTIME; 908 g_run = true; 909 910 if ((rc = spdk_app_parse_args(argc, argv, &opts, "aC:j:NS:t:V", NULL, nvme_fuzz_parse, 911 nvme_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) { 912 return rc; 913 } 914 915 if (g_conf_file) { 916 parse_trids(); 917 } 918 919 if (g_json_file != NULL) { 920 g_cmd_array_size = fuzz_parse_args_into_array(g_json_file, (void **)&g_cmd_array, 921 sizeof(struct spdk_nvme_cmd), g_nvme_cmd_json_name, parse_nvme_cmd_obj); 922 if (g_cmd_array_size == 0) { 923 fprintf(stderr, "The provided json file did not contain any valid commands. Exiting."); 924 return -EINVAL; 925 } 926 } 927 928 rc = spdk_app_start(&opts, begin_fuzz, NULL); 929 930 return rc; 931 } 932