1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2018 Intel Corporation. All rights reserved.
3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 */
5
6 #include "spdk/stdinc.h"
7 #include "spdk/conf.h"
8 #include "spdk/env.h"
9 #include "spdk/event.h"
10 #include "spdk/util.h"
11 #include "spdk/string.h"
12 #include "spdk/nvme_spec.h"
13 #include "spdk/nvme.h"
14 #include "spdk/likely.h"
15 #include "spdk/json.h"
16 #include "fuzz_common.h"
17
18 #define UNIQUE_OPCODES 256
19
20 const char g_nvme_cmd_json_name[] = "struct spdk_nvme_cmd";
21 char *g_json_file = NULL;
22 uint64_t g_runtime_ticks;
23 unsigned int g_seed_value = 0;
24 int g_runtime;
25
26 int g_num_active_threads = 0;
27 uint32_t g_admin_depth = 16;
28 uint32_t g_io_depth = 128;
29 bool g_check_iommu = true;
30
31 bool g_valid_ns_only = false;
32 bool g_verbose_mode = false;
33 bool g_run_admin_commands = false;
34 bool g_run;
35
36 struct spdk_poller *g_app_completion_poller;
37 bool g_successful_io_opcodes[UNIQUE_OPCODES] = {0};
38 bool g_successful_admin_opcodes[UNIQUE_OPCODES] = {0};
39
40 struct spdk_nvme_cmd *g_cmd_array;
41 size_t g_cmd_array_size;
42
43 /* I need context objects here because I need to keep track of all I/O that are in flight. */
44 struct nvme_fuzz_request {
45 struct spdk_nvme_cmd cmd;
46 struct nvme_fuzz_qp *qp;
47 TAILQ_ENTRY(nvme_fuzz_request) link;
48 };
49
50 struct nvme_fuzz_trid {
51 struct spdk_nvme_transport_id trid;
52 TAILQ_ENTRY(nvme_fuzz_trid) tailq;
53 };
54
55 struct nvme_fuzz_ctrlr {
56 struct spdk_nvme_ctrlr *ctrlr;
57 TAILQ_ENTRY(nvme_fuzz_ctrlr) tailq;
58 };
59
60 struct nvme_fuzz_qp {
61 struct spdk_nvme_qpair *qpair;
62 /* array of context objects equal in length to the queue depth */
63 struct nvme_fuzz_request *req_ctx;
64 TAILQ_HEAD(, nvme_fuzz_request) free_ctx_objs;
65 TAILQ_HEAD(, nvme_fuzz_request) outstanding_ctx_objs;
66 unsigned int random_seed;
67 uint64_t completed_cmd_counter;
68 uint64_t submitted_cmd_counter;
69 uint64_t successful_completed_cmd_counter;
70 uint64_t timeout_tsc;
71 uint32_t num_cmds_outstanding;
72 bool timed_out;
73 bool is_admin;
74 };
75
76 struct nvme_fuzz_ns {
77 struct spdk_nvme_ns *ns;
78 struct spdk_nvme_ctrlr *ctrlr;
79 struct spdk_thread *thread;
80 struct spdk_poller *req_poller;
81 struct nvme_fuzz_qp io_qp;
82 struct nvme_fuzz_qp a_qp;
83 uint32_t nsid;
84 TAILQ_ENTRY(nvme_fuzz_ns) tailq;
85 };
86
87 static TAILQ_HEAD(, nvme_fuzz_ns) g_ns_list = TAILQ_HEAD_INITIALIZER(g_ns_list);
88 static TAILQ_HEAD(, nvme_fuzz_ctrlr) g_ctrlr_list = TAILQ_HEAD_INITIALIZER(g_ctrlr_list);
89 static TAILQ_HEAD(, nvme_fuzz_trid) g_trid_list = TAILQ_HEAD_INITIALIZER(g_trid_list);
90
91 static bool
parse_nvme_cmd_obj(void * item,struct spdk_json_val * value,size_t num_values)92 parse_nvme_cmd_obj(void *item, struct spdk_json_val *value, size_t num_values)
93 {
94 struct spdk_nvme_cmd *cmd = item;
95 struct spdk_json_val *next_val;
96 uint64_t tmp_val;
97 size_t i = 0;
98
99 while (i < num_values) {
100 if (value->type == SPDK_JSON_VAL_NAME) {
101 next_val = value + 1;
102 if (!strncmp(value->start, "opc", value->len)) {
103 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
104 if (fuzz_parse_json_num(next_val, UNSIGNED_8BIT_MAX, &tmp_val)) {
105 goto invalid;
106 }
107 cmd->opc = tmp_val;
108 }
109 } else if (!strncmp(value->start, "fuse", value->len)) {
110 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
111 if (fuzz_parse_json_num(next_val, UNSIGNED_2BIT_MAX, &tmp_val)) {
112 goto invalid;
113 }
114 cmd->fuse = tmp_val;
115 }
116 } else if (!strncmp(value->start, "rsvd1", value->len)) {
117 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
118 if (fuzz_parse_json_num(next_val, UNSIGNED_4BIT_MAX, &tmp_val)) {
119 goto invalid;
120 }
121 cmd->rsvd1 = tmp_val;
122 }
123 } else if (!strncmp(value->start, "psdt", value->len)) {
124 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
125 if (fuzz_parse_json_num(next_val, UNSIGNED_2BIT_MAX, &tmp_val)) {
126 goto invalid;
127 }
128 cmd->psdt = tmp_val;
129 }
130 } else if (!strncmp(value->start, "cid", value->len)) {
131 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
132 if (fuzz_parse_json_num(next_val, UINT16_MAX, &tmp_val)) {
133 goto invalid;
134 }
135 cmd->cid = tmp_val;
136 }
137 } else if (!strncmp(value->start, "nsid", value->len)) {
138 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
139 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
140 goto invalid;
141 }
142 cmd->nsid = tmp_val;
143 }
144 } else if (!strncmp(value->start, "rsvd2", value->len)) {
145 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
146 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
147 goto invalid;
148 }
149 cmd->rsvd2 = tmp_val;
150 }
151 } else if (!strncmp(value->start, "rsvd3", value->len)) {
152 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
153 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
154 goto invalid;
155 }
156 cmd->rsvd3 = tmp_val;
157 }
158 } else if (!strncmp(value->start, "mptr", value->len)) {
159 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
160 if (fuzz_parse_json_num(next_val, UINT64_MAX, &tmp_val)) {
161 goto invalid;
162 }
163 cmd->mptr = tmp_val;
164 }
165 } else if (!strncmp(value->start, "dptr", value->len)) {
166 if (next_val->type == SPDK_JSON_VAL_STRING) {
167 if (fuzz_get_base_64_buffer_value(&cmd->dptr, sizeof(cmd->dptr), (char *)next_val->start,
168 next_val->len)) {
169 goto invalid;
170 }
171 }
172 } else if (!strncmp(value->start, "cdw10", value->len)) {
173 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
174 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
175 goto invalid;
176 }
177 cmd->cdw10 = tmp_val;
178 }
179 } else if (!strncmp(value->start, "cdw11", value->len)) {
180 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
181 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
182 goto invalid;
183 }
184 cmd->cdw11 = tmp_val;
185 }
186 } else if (!strncmp(value->start, "cdw12", value->len)) {
187 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
188 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
189 goto invalid;
190 }
191 cmd->cdw12 = tmp_val;
192 }
193 } else if (!strncmp(value->start, "cdw13", value->len)) {
194 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
195 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
196 goto invalid;
197 }
198 cmd->cdw13 = tmp_val;
199 }
200 } else if (!strncmp(value->start, "cdw14", value->len)) {
201 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
202 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
203 goto invalid;
204 }
205 cmd->cdw14 = tmp_val;
206 }
207 } else if (!strncmp(value->start, "cdw15", value->len)) {
208 if (next_val->type == SPDK_JSON_VAL_NUMBER) {
209 if (fuzz_parse_json_num(next_val, UINT32_MAX, &tmp_val)) {
210 goto invalid;
211 }
212 cmd->cdw15 = tmp_val;
213 }
214 }
215 }
216 i++;
217 value++;
218 }
219 return true;
220
221 invalid:
222 fprintf(stderr, "Invalid value supplied for cmd->%.*s: %.*s\n", value->len, (char *)value->start,
223 next_val->len, (char *)next_val->start);
224 return false;
225 }
226
227 static void
report_successful_opcodes(bool * array,int length)228 report_successful_opcodes(bool *array, int length)
229 {
230 int i;
231
232 for (i = 0; i < length; i++) {
233 if (array[i] == true) {
234 printf("%d, ", i);
235 }
236 }
237 printf("\n");
238 }
239
240 static int
print_nvme_cmd(void * cb_ctx,const void * data,size_t size)241 print_nvme_cmd(void *cb_ctx, const void *data, size_t size)
242 {
243 fprintf(stderr, "%s\n", (const char *)data);
244 return 0;
245 }
246
247 static void
json_dump_nvme_cmd(struct spdk_nvme_cmd * cmd)248 json_dump_nvme_cmd(struct spdk_nvme_cmd *cmd)
249 {
250 struct spdk_json_write_ctx *w;
251 char *dptr_value;
252
253 dptr_value = fuzz_get_value_base_64_buffer(&cmd->dptr, sizeof(cmd->dptr));
254 if (dptr_value == NULL) {
255 fprintf(stderr, "Unable to allocate buffer context for printing command.\n");
256 return;
257 }
258
259 w = spdk_json_write_begin(print_nvme_cmd, cmd, SPDK_JSON_WRITE_FLAG_FORMATTED);
260 if (w == NULL) {
261 fprintf(stderr, "Unable to allocate json context for printing command.\n");
262 free(dptr_value);
263 return;
264 }
265
266 spdk_json_write_named_object_begin(w, g_nvme_cmd_json_name);
267 spdk_json_write_named_uint32(w, "opc", cmd->opc);
268 spdk_json_write_named_uint32(w, "fuse", cmd->fuse);
269 spdk_json_write_named_uint32(w, "rsvd1", cmd->rsvd1);
270 spdk_json_write_named_uint32(w, "psdt", cmd->psdt);
271 spdk_json_write_named_uint32(w, "cid", cmd->cid);
272 spdk_json_write_named_uint32(w, "nsid", cmd->nsid);
273 spdk_json_write_named_uint32(w, "rsvd2", cmd->rsvd2);
274 spdk_json_write_named_uint32(w, "rsvd3", cmd->rsvd3);
275 spdk_json_write_named_uint32(w, "mptr", cmd->mptr);
276 spdk_json_write_named_string(w, "dptr", dptr_value);
277 spdk_json_write_named_uint32(w, "cdw10", cmd->cdw10);
278 spdk_json_write_named_uint32(w, "cdw11", cmd->cdw11);
279 spdk_json_write_named_uint32(w, "cdw12", cmd->cdw12);
280 spdk_json_write_named_uint32(w, "cdw13", cmd->cdw13);
281 spdk_json_write_named_uint32(w, "cdw14", cmd->cdw14);
282 spdk_json_write_named_uint32(w, "cdw15", cmd->cdw15);
283 spdk_json_write_object_end(w);
284
285 free(dptr_value);
286 spdk_json_write_end(w);
287 }
288
289 static void
json_dump_nvme_cmd_list(struct nvme_fuzz_qp * qp)290 json_dump_nvme_cmd_list(struct nvme_fuzz_qp *qp)
291 {
292 struct nvme_fuzz_request *ctx;
293
294 TAILQ_FOREACH(ctx, &qp->outstanding_ctx_objs, link) {
295 json_dump_nvme_cmd(&ctx->cmd);
296 }
297 }
298
299 static void
handle_timeout(struct nvme_fuzz_qp * qp,bool is_admin)300 handle_timeout(struct nvme_fuzz_qp *qp, bool is_admin)
301 {
302 fprintf(stderr, "An %s queue has timed out. Dumping all outstanding commands from that queue\n",
303 is_admin ? "Admin" : "I/O");
304 json_dump_nvme_cmd_list(qp);
305 qp->timed_out = true;
306 }
307
308 static void submit_ns_cmds(struct nvme_fuzz_ns *ns_entry);
309
310 static void
nvme_fuzz_cpl_cb(void * cb_arg,const struct spdk_nvme_cpl * cpl)311 nvme_fuzz_cpl_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
312 {
313 struct nvme_fuzz_request *ctx = cb_arg;
314 struct nvme_fuzz_qp *qp = ctx->qp;
315
316 qp->completed_cmd_counter++;
317 if (spdk_unlikely(cpl->status.sc == SPDK_NVME_SC_SUCCESS)) {
318 fprintf(stderr, "The following %s command (command num %" PRIu64 ") completed successfully\n",
319 qp->is_admin ? "Admin" : "I/O", qp->completed_cmd_counter);
320 qp->successful_completed_cmd_counter++;
321 json_dump_nvme_cmd(&ctx->cmd);
322
323 if (qp->is_admin) {
324 __sync_bool_compare_and_swap(&g_successful_admin_opcodes[ctx->cmd.opc], false, true);
325 } else {
326 __sync_bool_compare_and_swap(&g_successful_io_opcodes[ctx->cmd.opc], false, true);
327 }
328 } else if (g_verbose_mode == true) {
329 fprintf(stderr, "The following %s command (command num %" PRIu64 ") failed as expected.\n",
330 qp->is_admin ? "Admin" : "I/O", qp->completed_cmd_counter);
331 json_dump_nvme_cmd(&ctx->cmd);
332 }
333
334 qp->timeout_tsc = fuzz_refresh_timeout();
335 TAILQ_REMOVE(&qp->outstanding_ctx_objs, ctx, link);
336 TAILQ_INSERT_HEAD(&qp->free_ctx_objs, ctx, link);
337 assert(qp->num_cmds_outstanding > 0);
338 qp->num_cmds_outstanding--;
339 }
340
341 static int
poll_for_completions(void * arg)342 poll_for_completions(void *arg)
343 {
344 struct nvme_fuzz_ns *ns_entry = arg;
345 uint64_t current_ticks = spdk_get_ticks();
346 uint64_t *counter;
347 if (!ns_entry->io_qp.timed_out) {
348 spdk_nvme_qpair_process_completions(ns_entry->io_qp.qpair, 0);
349 /* SAlways have to process admin completions for the purposes of keep alive. */
350 spdk_nvme_ctrlr_process_admin_completions(ns_entry->ctrlr);
351 }
352
353 if (g_cmd_array) {
354 if (g_run_admin_commands) {
355 counter = &ns_entry->a_qp.submitted_cmd_counter;
356 } else {
357 counter = &ns_entry->io_qp.submitted_cmd_counter;
358 }
359
360 if (*counter >= g_cmd_array_size) {
361 g_run = false;
362 }
363 } else {
364 if (current_ticks > g_runtime_ticks) {
365 g_run = false;
366 }
367 }
368
369 if (ns_entry->a_qp.timeout_tsc < current_ticks && !ns_entry->a_qp.timed_out &&
370 ns_entry->a_qp.num_cmds_outstanding > 0) {
371 handle_timeout(&ns_entry->a_qp, true);
372 }
373
374 if (ns_entry->io_qp.timeout_tsc < current_ticks && !ns_entry->io_qp.timed_out &&
375 ns_entry->io_qp.num_cmds_outstanding > 0) {
376 handle_timeout(&ns_entry->io_qp, false);
377 }
378
379 submit_ns_cmds(ns_entry);
380
381 if (g_run) {
382 return 0;
383 }
384 /*
385 * We either processed all I/O properly and can shut down normally, or we
386 * had a qp time out and we need to exit without reducing the values to 0.
387 */
388 if (ns_entry->io_qp.num_cmds_outstanding == 0 &&
389 ns_entry->a_qp.num_cmds_outstanding == 0) {
390 goto exit_handler;
391 } else if (ns_entry->io_qp.timed_out && (!g_run_admin_commands || ns_entry->a_qp.timed_out)) {
392 goto exit_handler;
393 } else {
394 return 0;
395 }
396
397 exit_handler:
398 spdk_poller_unregister(&ns_entry->req_poller);
399 __sync_sub_and_fetch(&g_num_active_threads, 1);
400 spdk_thread_exit(ns_entry->thread);
401 return 0;
402 }
403
404 static void
prep_nvme_cmd(struct nvme_fuzz_ns * ns_entry,struct nvme_fuzz_qp * qp,struct nvme_fuzz_request * ctx)405 prep_nvme_cmd(struct nvme_fuzz_ns *ns_entry, struct nvme_fuzz_qp *qp, struct nvme_fuzz_request *ctx)
406 {
407 if (g_cmd_array) {
408 memcpy(&ctx->cmd, &g_cmd_array[qp->submitted_cmd_counter], sizeof(ctx->cmd));
409 } else {
410 fuzz_fill_random_bytes((char *)&ctx->cmd, sizeof(ctx->cmd), &qp->random_seed);
411
412 if (g_valid_ns_only) {
413 ctx->cmd.nsid = ns_entry->nsid;
414 }
415 }
416
417 /* Fuzzing test not support sequential FUSE commands. */
418 ctx->cmd.fuse = 0;
419 }
420
421 static int
submit_qp_cmds(struct nvme_fuzz_ns * ns,struct nvme_fuzz_qp * qp)422 submit_qp_cmds(struct nvme_fuzz_ns *ns, struct nvme_fuzz_qp *qp)
423 {
424 struct nvme_fuzz_request *ctx;
425 int rc;
426
427 if (qp->timed_out) {
428 return 0;
429 }
430 /* If we are reading from an array, we need to stop after the last one. */
431 while ((qp->submitted_cmd_counter < g_cmd_array_size || g_cmd_array_size == 0) &&
432 !TAILQ_EMPTY(&qp->free_ctx_objs)) {
433 ctx = TAILQ_FIRST(&qp->free_ctx_objs);
434 do {
435 prep_nvme_cmd(ns, qp, ctx);
436 } while (qp->is_admin && ctx->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
437
438 TAILQ_REMOVE(&qp->free_ctx_objs, ctx, link);
439 TAILQ_INSERT_HEAD(&qp->outstanding_ctx_objs, ctx, link);
440 qp->num_cmds_outstanding++;
441 qp->submitted_cmd_counter++;
442 if (qp->is_admin) {
443 rc = spdk_nvme_ctrlr_cmd_admin_raw(ns->ctrlr, &ctx->cmd, NULL, 0, nvme_fuzz_cpl_cb, ctx);
444 } else {
445 rc = spdk_nvme_ctrlr_cmd_io_raw(ns->ctrlr, qp->qpair, &ctx->cmd, NULL, 0, nvme_fuzz_cpl_cb, ctx);
446 }
447 if (rc) {
448 return rc;
449 }
450 }
451 return 0;
452 }
453
454 static void
submit_ns_cmds(struct nvme_fuzz_ns * ns_entry)455 submit_ns_cmds(struct nvme_fuzz_ns *ns_entry)
456 {
457 int rc;
458
459 if (!g_run) {
460 return;
461 }
462
463 if (g_run_admin_commands) {
464 rc = submit_qp_cmds(ns_entry, &ns_entry->a_qp);
465 if (rc) {
466 goto err_exit;
467 }
468 }
469
470 if (g_cmd_array == NULL || !g_run_admin_commands) {
471 rc = submit_qp_cmds(ns_entry, &ns_entry->io_qp);
472 }
473 err_exit:
474 if (rc) {
475 /*
476 * I see the prospect of having a broken qpair on one ns as interesting
477 * enough to recommend stopping the application.
478 */
479 fprintf(stderr, "Unable to submit command with rc %d\n", rc);
480 g_run = false;
481 }
482 }
483
484 static void
free_namespaces(void)485 free_namespaces(void)
486 {
487 struct nvme_fuzz_ns *ns, *tmp;
488
489 TAILQ_FOREACH_SAFE(ns, &g_ns_list, tailq, tmp) {
490 printf("NS: %p I/O qp, Total commands completed: %" PRIu64 ", total successful commands: %" PRIu64
491 ", random_seed: %u\n",
492 ns->ns,
493 ns->io_qp.completed_cmd_counter, ns->io_qp.successful_completed_cmd_counter, ns->io_qp.random_seed);
494 printf("NS: %p admin qp, Total commands completed: %" PRIu64 ", total successful commands: %" PRIu64
495 ", random_seed: %u\n",
496 ns->ns,
497 ns->a_qp.completed_cmd_counter, ns->a_qp.successful_completed_cmd_counter, ns->a_qp.random_seed);
498
499 TAILQ_REMOVE(&g_ns_list, ns, tailq);
500 if (ns->io_qp.qpair) {
501 spdk_nvme_ctrlr_free_io_qpair(ns->io_qp.qpair);
502 }
503 if (ns->io_qp.req_ctx) {
504 free(ns->io_qp.req_ctx);
505 }
506 if (ns->a_qp.req_ctx) {
507 free(ns->a_qp.req_ctx);
508 }
509 free(ns);
510 }
511 }
512
513 static void
free_controllers(void)514 free_controllers(void)
515 {
516 struct nvme_fuzz_ctrlr *ctrlr, *tmp;
517 struct spdk_nvme_detach_ctx *detach_ctx = NULL;
518
519 TAILQ_FOREACH_SAFE(ctrlr, &g_ctrlr_list, tailq, tmp) {
520 TAILQ_REMOVE(&g_ctrlr_list, ctrlr, tailq);
521 spdk_nvme_detach_async(ctrlr->ctrlr, &detach_ctx);
522 free(ctrlr);
523 }
524
525 if (detach_ctx) {
526 spdk_nvme_detach_poll(detach_ctx);
527 }
528 }
529
530 static void
free_trids(void)531 free_trids(void)
532 {
533 struct nvme_fuzz_trid *trid, *tmp;
534
535 TAILQ_FOREACH_SAFE(trid, &g_trid_list, tailq, tmp) {
536 TAILQ_REMOVE(&g_trid_list, trid, tailq);
537 free(trid);
538 }
539 }
540
541 static void
register_ns(struct spdk_nvme_ctrlr * ctrlr,struct spdk_nvme_ns * ns,uint32_t nsid)542 register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns, uint32_t nsid)
543 {
544 struct nvme_fuzz_ns *ns_entry;
545
546 ns_entry = calloc(1, sizeof(struct nvme_fuzz_ns));
547 if (ns_entry == NULL) {
548 fprintf(stderr, "Unable to allocate an entry for a namespace\n");
549 return;
550 }
551
552 ns_entry->ns = ns;
553 ns_entry->ctrlr = ctrlr;
554 ns_entry->nsid = nsid;
555
556 TAILQ_INIT(&ns_entry->io_qp.free_ctx_objs);
557 TAILQ_INIT(&ns_entry->io_qp.outstanding_ctx_objs);
558 TAILQ_INIT(&ns_entry->a_qp.free_ctx_objs);
559 TAILQ_INIT(&ns_entry->a_qp.outstanding_ctx_objs);
560 TAILQ_INSERT_TAIL(&g_ns_list, ns_entry, tailq);
561 }
562
563 static void
register_ctrlr(struct spdk_nvme_ctrlr * ctrlr)564 register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
565 {
566 struct nvme_fuzz_ctrlr *ctrlr_entry;
567 uint32_t nsid;
568 struct spdk_nvme_ns *ns;
569
570 ctrlr_entry = calloc(1, sizeof(struct nvme_fuzz_ctrlr));
571 if (ctrlr_entry == NULL) {
572 fprintf(stderr, "Unable to allocate an entry for a controller\n");
573 return;
574 }
575
576 ctrlr_entry->ctrlr = ctrlr;
577 TAILQ_INSERT_TAIL(&g_ctrlr_list, ctrlr_entry, tailq);
578
579 for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); nsid != 0;
580 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
581 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
582 if (ns == NULL) {
583 continue;
584 }
585 register_ns(ctrlr, ns, nsid);
586 }
587 }
588
589 static int
prep_qpair(struct nvme_fuzz_ns * ns,struct nvme_fuzz_qp * qp,uint32_t max_qdepth)590 prep_qpair(struct nvme_fuzz_ns *ns, struct nvme_fuzz_qp *qp, uint32_t max_qdepth)
591 {
592 uint32_t i;
593
594 /* ensure that each qpair gets a unique random seed for maximum command dispersion. */
595
596 if (g_seed_value != 0) {
597 qp->random_seed = g_seed_value;
598 } else {
599 /* Take the low 32 bits of spdk_get_ticks. This should be more granular than time(). */
600 qp->random_seed = spdk_get_ticks();
601 }
602
603 qp->timeout_tsc = fuzz_refresh_timeout();
604
605 qp->req_ctx = calloc(max_qdepth, sizeof(struct nvme_fuzz_request));
606 if (qp->req_ctx == NULL) {
607 fprintf(stderr, "Unable to allocate I/O contexts for I/O qpair.\n");
608 return -1;
609 }
610
611 for (i = 0; i < max_qdepth; i++) {
612 qp->req_ctx[i].qp = qp;
613 TAILQ_INSERT_HEAD(&qp->free_ctx_objs, &qp->req_ctx[i], link);
614 }
615
616 return 0;
617 }
618
619 static int
prepare_qpairs(void)620 prepare_qpairs(void)
621 {
622 struct spdk_nvme_io_qpair_opts opts;
623 struct nvme_fuzz_ns *ns_entry;
624
625 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) {
626 spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_entry->ctrlr, &opts, sizeof(opts));
627 ns_entry->io_qp.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, &opts, sizeof(opts));
628 if (ns_entry->io_qp.qpair == NULL) {
629 fprintf(stderr, "Unable to create a qpair for a namespace\n");
630 return -1;
631 }
632
633 ns_entry->io_qp.is_admin = false;
634 if (prep_qpair(ns_entry, &ns_entry->io_qp, g_io_depth) != 0) {
635 fprintf(stderr, "Unable to allocate request contexts for I/O qpair.\n");
636 return -1;
637 }
638
639 if (g_run_admin_commands) {
640 ns_entry->a_qp.is_admin = true;
641 if (prep_qpair(ns_entry, &ns_entry->a_qp, g_admin_depth) != 0) {
642 fprintf(stderr, "Unable to allocate request contexts for admin qpair.\n");
643 return -1;
644 }
645 }
646 }
647 return 0;
648 }
649
650 static void
start_ns_poller(void * ctx)651 start_ns_poller(void *ctx)
652 {
653 struct nvme_fuzz_ns *ns_entry = ctx;
654
655 ns_entry->req_poller = SPDK_POLLER_REGISTER(poll_for_completions, ns_entry, 0);
656 submit_ns_cmds(ns_entry);
657 }
658
659 static int
check_app_completion(void * ctx)660 check_app_completion(void *ctx)
661 {
662
663 if (g_num_active_threads <= 0) {
664 spdk_poller_unregister(&g_app_completion_poller);
665 if (g_cmd_array) {
666 free(g_cmd_array);
667 }
668 printf("Fuzzing completed. Shutting down the fuzz application\n\n");
669 printf("Dumping successful admin opcodes:\n");
670 report_successful_opcodes(g_successful_admin_opcodes, UNIQUE_OPCODES);
671 printf("Dumping successful io opcodes:\n");
672 report_successful_opcodes(g_successful_io_opcodes, UNIQUE_OPCODES);
673 free_namespaces();
674 free_controllers();
675 free_trids();
676 spdk_app_stop(0);
677 }
678 return 0;
679 }
680
681 static void
begin_fuzz(void * ctx)682 begin_fuzz(void *ctx)
683 {
684 struct nvme_fuzz_ns *ns_entry;
685 struct nvme_fuzz_trid *trid;
686 struct spdk_nvme_ctrlr *ctrlr;
687 int rc;
688
689 if (g_check_iommu && !spdk_iommu_is_enabled()) {
690 /* Don't set rc to an error code here. We don't want to fail an automated test based on this. */
691 fprintf(stderr, "The IOMMU must be enabled to run this program to avoid unsafe memory accesses.\n");
692 rc = 0;
693 goto out;
694 }
695
696 TAILQ_FOREACH(trid, &g_trid_list, tailq) {
697 ctrlr = spdk_nvme_connect(&trid->trid, NULL, 0);
698 if (ctrlr == NULL) {
699 fprintf(stderr, "spdk_nvme_connect() failed for transport address '%s'\n",
700 trid->trid.traddr);
701 rc = -1;
702 goto out;
703 }
704 register_ctrlr(ctrlr);
705 }
706
707 if (TAILQ_EMPTY(&g_ns_list)) {
708 fprintf(stderr, "No valid NVMe Namespaces to fuzz\n");
709 rc = -EINVAL;
710 goto out;
711 }
712
713 rc = prepare_qpairs();
714
715 if (rc < 0) {
716 fprintf(stderr, "Unable to prepare the qpairs\n");
717 goto out;
718 }
719
720 g_runtime_ticks = spdk_get_ticks() + g_runtime * spdk_get_ticks_hz();
721
722 /* Assigning all of the threads and then starting them makes cleanup easier. */
723 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) {
724 ns_entry->thread = spdk_thread_create(NULL, NULL);
725 if (ns_entry->thread == NULL) {
726 fprintf(stderr, "Failed to allocate thread for namespace.\n");
727 goto out;
728 }
729 }
730
731 TAILQ_FOREACH(ns_entry, &g_ns_list, tailq) {
732 spdk_thread_send_msg(ns_entry->thread, start_ns_poller, ns_entry);
733 __sync_add_and_fetch(&g_num_active_threads, 1);
734 }
735
736 g_app_completion_poller = SPDK_POLLER_REGISTER(check_app_completion, NULL, 1000000);
737 return;
738 out:
739 printf("Shutting down the fuzz application\n");
740 free_namespaces();
741 free_controllers();
742 free_trids();
743 spdk_app_stop(rc);
744 }
745
746 static void
nvme_fuzz_usage(void)747 nvme_fuzz_usage(void)
748 {
749 fprintf(stderr, " -a Perform admin commands. if -j is specified, \
750 only admin commands will run. Otherwise they will be run in tandem with I/O commands.\n");
751 fprintf(stderr, " -F Transport ID for subsystem that should be fuzzed.\n");
752 fprintf(stderr,
753 " -j <path> Path to a json file containing named objects of type spdk_nvme_cmd. If this option is specified, -t will be ignored.\n");
754 fprintf(stderr, " -N Target only valid namespace with commands. \
755 This helps dig deeper into other errors besides invalid namespace.\n");
756 fprintf(stderr, " -S <integer> Seed value for test.\n");
757 fprintf(stderr,
758 " -t <integer> Time in seconds to run the fuzz test. Only valid if -j is not specified.\n");
759 fprintf(stderr, " -U Do not check if IOMMU is enabled.\n");
760 fprintf(stderr, " -V Enable logging of each submitted command.\n");
761 }
762
763 static int
nvme_fuzz_parse(int ch,char * arg)764 nvme_fuzz_parse(int ch, char *arg)
765 {
766 struct nvme_fuzz_trid *trid;
767 int64_t error_test;
768 int rc;
769
770 switch (ch) {
771 case 'a':
772 g_run_admin_commands = true;
773 break;
774 case 'F':
775 trid = malloc(sizeof(*trid));
776 if (!trid) {
777 fprintf(stderr, "Unable to allocate memory for transport ID\n");
778 return -1;
779 }
780 rc = spdk_nvme_transport_id_parse(&trid->trid, optarg);
781 if (rc < 0) {
782 fprintf(stderr, "failed to parse transport ID: %s\n", optarg);
783 free(trid);
784 return -1;
785 }
786 TAILQ_INSERT_TAIL(&g_trid_list, trid, tailq);
787 break;
788 case 'j':
789 g_json_file = optarg;
790 break;
791 case 'N':
792 g_valid_ns_only = true;
793 break;
794 case 'S':
795 error_test = spdk_strtol(arg, 10);
796 if (error_test < 0) {
797 fprintf(stderr, "Invalid value supplied for the random seed.\n");
798 return -1;
799 } else {
800 g_seed_value = error_test;
801 }
802 break;
803 case 't':
804 g_runtime = spdk_strtol(optarg, 10);
805 if (g_runtime < 0 || g_runtime > MAX_RUNTIME_S) {
806 fprintf(stderr, "You must supply a positive runtime value less than 86401.\n");
807 return -1;
808 }
809 break;
810 case 'U':
811 g_check_iommu = false;
812 break;
813 case 'V':
814 g_verbose_mode = true;
815 break;
816 case '?':
817 default:
818 return -EINVAL;
819 }
820 return 0;
821 }
822
823 int
main(int argc,char ** argv)824 main(int argc, char **argv)
825 {
826 struct spdk_app_opts opts = {};
827 int rc;
828
829 spdk_app_opts_init(&opts, sizeof(opts));
830 opts.name = "nvme_fuzz";
831 opts.rpc_addr = NULL;
832
833 g_runtime = DEFAULT_RUNTIME;
834 g_run = true;
835
836 if ((rc = spdk_app_parse_args(argc, argv, &opts, "aF:j:NS:t:UV", NULL, nvme_fuzz_parse,
837 nvme_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) {
838 return rc;
839 }
840
841 if (g_json_file != NULL) {
842 g_cmd_array_size = fuzz_parse_args_into_array(g_json_file, (void **)&g_cmd_array,
843 sizeof(struct spdk_nvme_cmd), g_nvme_cmd_json_name, parse_nvme_cmd_obj);
844 if (g_cmd_array_size == 0) {
845 fprintf(stderr, "The provided json file did not contain any valid commands. Exiting.");
846 return -EINVAL;
847 }
848 }
849
850 rc = spdk_app_start(&opts, begin_fuzz, NULL);
851
852 spdk_app_fini();
853 return rc;
854 }
855