1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/barrier.h" 37 #include "spdk/fd.h" 38 #include "spdk/nvme.h" 39 #include "spdk/env.h" 40 #include "spdk/string.h" 41 #include "spdk/nvme_intel.h" 42 #include "spdk/histogram_data.h" 43 #include "spdk/string.h" 44 #include "spdk/log.h" 45 46 #if HAVE_LIBAIO 47 #include <libaio.h> 48 #endif 49 50 struct ctrlr_entry { 51 struct spdk_nvme_ctrlr *ctrlr; 52 TAILQ_ENTRY(ctrlr_entry) link; 53 char name[1024]; 54 }; 55 56 enum entry_type { 57 ENTRY_TYPE_NVME_NS, 58 ENTRY_TYPE_AIO_FILE, 59 }; 60 61 struct ns_entry { 62 enum entry_type type; 63 64 union { 65 struct { 66 struct spdk_nvme_ctrlr *ctrlr; 67 struct spdk_nvme_ns *ns; 68 struct spdk_nvme_qpair *qpair; 69 } nvme; 70 #if HAVE_LIBAIO 71 struct { 72 int fd; 73 struct io_event *events; 74 io_context_t ctx; 75 } aio; 76 #endif 77 } u; 78 79 uint32_t io_size_blocks; 80 uint64_t size_in_ios; 81 bool is_draining; 82 uint32_t current_queue_depth; 83 char name[1024]; 84 struct ns_entry *next; 85 86 struct spdk_histogram_data *submit_histogram; 87 struct spdk_histogram_data *complete_histogram; 88 }; 89 90 struct perf_task { 91 void *buf; 92 uint64_t submit_tsc; 93 #if HAVE_LIBAIO 94 struct iocb iocb; 95 #endif 96 }; 97 98 static bool g_enable_histogram = false; 99 100 static TAILQ_HEAD(, ctrlr_entry) g_ctrlr = TAILQ_HEAD_INITIALIZER(g_ctrlr); 101 static struct ns_entry *g_ns = NULL; 102 103 static uint64_t g_tsc_rate; 104 105 static uint32_t g_io_size_bytes; 106 static int g_time_in_sec; 107 108 static int g_aio_optind; /* Index of first AIO filename in argv */ 109 110 struct perf_task *g_task; 111 uint64_t g_tsc_submit = 0; 112 uint64_t g_tsc_submit_min = UINT64_MAX; 113 uint64_t g_tsc_submit_max = 0; 114 uint64_t g_tsc_complete = 0; 115 uint64_t g_tsc_complete_min = UINT64_MAX; 116 uint64_t g_tsc_complete_max = 0; 117 uint64_t g_io_completed = 0; 118 119 static struct spdk_nvme_transport_id g_trid = {}; 120 121 static void 122 register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns) 123 { 124 struct ns_entry *entry; 125 const struct spdk_nvme_ctrlr_data *cdata; 126 127 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 128 129 if (!spdk_nvme_ns_is_active(ns)) { 130 printf("Controller %-20.20s (%-20.20s): Skipping inactive NS %u\n", 131 cdata->mn, cdata->sn, 132 spdk_nvme_ns_get_id(ns)); 133 return; 134 } 135 136 if (spdk_nvme_ns_get_size(ns) < g_io_size_bytes || 137 spdk_nvme_ns_get_sector_size(ns) > g_io_size_bytes) { 138 printf("WARNING: controller %-20.20s (%-20.20s) ns %u has invalid " 139 "ns size %" PRIu64 " / block size %u for I/O size %u\n", 140 cdata->mn, cdata->sn, spdk_nvme_ns_get_id(ns), 141 spdk_nvme_ns_get_size(ns), spdk_nvme_ns_get_sector_size(ns), g_io_size_bytes); 142 return; 143 } 144 145 entry = calloc(1, sizeof(struct ns_entry)); 146 if (entry == NULL) { 147 perror("ns_entry malloc"); 148 exit(1); 149 } 150 151 entry->type = ENTRY_TYPE_NVME_NS; 152 entry->u.nvme.ctrlr = ctrlr; 153 entry->u.nvme.ns = ns; 154 155 entry->size_in_ios = spdk_nvme_ns_get_size(ns) / 156 g_io_size_bytes; 157 entry->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(ns); 158 entry->submit_histogram = spdk_histogram_data_alloc(); 159 entry->complete_histogram = spdk_histogram_data_alloc(); 160 161 snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn); 162 163 entry->next = g_ns; 164 g_ns = entry; 165 } 166 167 static void 168 register_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 169 { 170 int num_ns; 171 struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry)); 172 const struct spdk_nvme_ctrlr_data *cdata = spdk_nvme_ctrlr_get_data(ctrlr); 173 174 if (entry == NULL) { 175 perror("ctrlr_entry malloc"); 176 exit(1); 177 } 178 179 snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn); 180 181 entry->ctrlr = ctrlr; 182 183 TAILQ_INSERT_TAIL(&g_ctrlr, entry, link); 184 185 num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr); 186 /* Only register the first namespace. */ 187 if (num_ns < 1) { 188 fprintf(stderr, "controller found with no namespaces\n"); 189 return; 190 } 191 192 register_ns(ctrlr, spdk_nvme_ctrlr_get_ns(ctrlr, 1)); 193 } 194 195 #if HAVE_LIBAIO 196 static int 197 register_aio_file(const char *path) 198 { 199 struct ns_entry *entry; 200 201 int fd; 202 uint64_t size; 203 uint32_t blklen; 204 205 fd = open(path, O_RDWR | O_DIRECT); 206 if (fd < 0) { 207 fprintf(stderr, "Could not open AIO device %s: %s\n", path, strerror(errno)); 208 return -1; 209 } 210 211 size = spdk_fd_get_size(fd); 212 if (size == 0) { 213 fprintf(stderr, "Could not determine size of AIO device %s\n", path); 214 close(fd); 215 return -1; 216 } 217 218 blklen = spdk_fd_get_blocklen(fd); 219 if (blklen == 0) { 220 fprintf(stderr, "Could not determine block size of AIO device %s\n", path); 221 close(fd); 222 return -1; 223 } 224 225 entry = calloc(1, sizeof(struct ns_entry)); 226 if (entry == NULL) { 227 close(fd); 228 perror("aio ns_entry malloc"); 229 return -1; 230 } 231 232 entry->type = ENTRY_TYPE_AIO_FILE; 233 entry->u.aio.fd = fd; 234 entry->size_in_ios = size / g_io_size_bytes; 235 entry->io_size_blocks = g_io_size_bytes / blklen; 236 entry->submit_histogram = spdk_histogram_data_alloc(); 237 entry->complete_histogram = spdk_histogram_data_alloc(); 238 239 snprintf(entry->name, sizeof(entry->name), "%s", path); 240 241 g_ns = entry; 242 243 return 0; 244 } 245 246 static int 247 aio_submit(io_context_t aio_ctx, struct iocb *iocb, int fd, enum io_iocb_cmd cmd, void *buf, 248 unsigned long nbytes, uint64_t offset, void *cb_ctx) 249 { 250 iocb->aio_fildes = fd; 251 iocb->aio_reqprio = 0; 252 iocb->aio_lio_opcode = cmd; 253 iocb->u.c.buf = buf; 254 iocb->u.c.nbytes = nbytes; 255 iocb->u.c.offset = offset; 256 iocb->data = cb_ctx; 257 258 if (io_submit(aio_ctx, 1, &iocb) < 0) { 259 printf("io_submit"); 260 return -1; 261 } 262 263 return 0; 264 } 265 266 static void 267 aio_check_io(void) 268 { 269 int count, i; 270 struct timespec timeout; 271 272 timeout.tv_sec = 0; 273 timeout.tv_nsec = 0; 274 275 count = io_getevents(g_ns->u.aio.ctx, 1, 1, g_ns->u.aio.events, &timeout); 276 if (count < 0) { 277 fprintf(stderr, "io_getevents error\n"); 278 exit(1); 279 } 280 281 for (i = 0; i < count; i++) { 282 g_ns->current_queue_depth--; 283 } 284 } 285 #endif /* HAVE_LIBAIO */ 286 287 static void io_complete(void *ctx, const struct spdk_nvme_cpl *completion); 288 289 static __thread unsigned int seed = 0; 290 291 static void 292 submit_single_io(void) 293 { 294 uint64_t offset_in_ios; 295 uint64_t start; 296 int rc; 297 struct ns_entry *entry = g_ns; 298 uint64_t tsc_submit; 299 300 offset_in_ios = rand_r(&seed) % entry->size_in_ios; 301 302 start = spdk_get_ticks(); 303 spdk_rmb(); 304 #if HAVE_LIBAIO 305 if (entry->type == ENTRY_TYPE_AIO_FILE) { 306 rc = aio_submit(g_ns->u.aio.ctx, &g_task->iocb, entry->u.aio.fd, IO_CMD_PREAD, g_task->buf, 307 g_io_size_bytes, offset_in_ios * g_io_size_bytes, g_task); 308 } else 309 #endif 310 { 311 rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, g_ns->u.nvme.qpair, g_task->buf, 312 offset_in_ios * entry->io_size_blocks, 313 entry->io_size_blocks, io_complete, g_task, 0); 314 } 315 316 spdk_rmb(); 317 tsc_submit = spdk_get_ticks() - start; 318 g_tsc_submit += tsc_submit; 319 if (tsc_submit < g_tsc_submit_min) { 320 g_tsc_submit_min = tsc_submit; 321 } 322 if (tsc_submit > g_tsc_submit_max) { 323 g_tsc_submit_max = tsc_submit; 324 } 325 if (g_enable_histogram) { 326 spdk_histogram_data_tally(entry->submit_histogram, tsc_submit); 327 } 328 329 if (rc != 0) { 330 fprintf(stderr, "starting I/O failed\n"); 331 } else { 332 g_ns->current_queue_depth++; 333 } 334 } 335 336 static void 337 io_complete(void *ctx, const struct spdk_nvme_cpl *completion) 338 { 339 g_ns->current_queue_depth--; 340 } 341 342 uint64_t g_complete_tsc_start; 343 344 static uint64_t 345 check_io(void) 346 { 347 uint64_t end, tsc_complete; 348 349 spdk_rmb(); 350 #if HAVE_LIBAIO 351 if (g_ns->type == ENTRY_TYPE_AIO_FILE) { 352 aio_check_io(); 353 } else 354 #endif 355 { 356 spdk_nvme_qpair_process_completions(g_ns->u.nvme.qpair, 0); 357 } 358 spdk_rmb(); 359 end = spdk_get_ticks(); 360 if (g_ns->current_queue_depth == 1) { 361 /* 362 * Account for race condition in AIO case where interrupt occurs 363 * after checking for queue depth. If the timestamp capture 364 * is too big compared to the last capture, assume that an 365 * interrupt fired, and do not bump the start tsc forward. This 366 * will ensure this extra time is accounted for next time through 367 * when we see current_queue_depth drop to 0. 368 */ 369 if (g_ns->type == ENTRY_TYPE_NVME_NS || (end - g_complete_tsc_start) < 500) { 370 g_complete_tsc_start = end; 371 } 372 } else { 373 tsc_complete = end - g_complete_tsc_start; 374 g_tsc_complete += tsc_complete; 375 if (tsc_complete < g_tsc_complete_min) { 376 g_tsc_complete_min = tsc_complete; 377 } 378 if (tsc_complete > g_tsc_complete_max) { 379 g_tsc_complete_max = tsc_complete; 380 } 381 if (g_enable_histogram) { 382 spdk_histogram_data_tally(g_ns->complete_histogram, tsc_complete); 383 } 384 g_io_completed++; 385 if (!g_ns->is_draining) { 386 submit_single_io(); 387 } 388 end = g_complete_tsc_start = spdk_get_ticks(); 389 } 390 391 return end; 392 } 393 394 static void 395 drain_io(void) 396 { 397 g_ns->is_draining = true; 398 while (g_ns->current_queue_depth > 0) { 399 check_io(); 400 } 401 } 402 403 static int 404 init_ns_worker_ctx(void) 405 { 406 if (g_ns->type == ENTRY_TYPE_AIO_FILE) { 407 #ifdef HAVE_LIBAIO 408 g_ns->u.aio.events = calloc(1, sizeof(struct io_event)); 409 if (!g_ns->u.aio.events) { 410 return -1; 411 } 412 g_ns->u.aio.ctx = 0; 413 if (io_setup(1, &g_ns->u.aio.ctx) < 0) { 414 free(g_ns->u.aio.events); 415 perror("io_setup"); 416 return -1; 417 } 418 #endif 419 } else { 420 /* 421 * TODO: If a controller has multiple namespaces, they could all use the same queue. 422 * For now, give each namespace/thread combination its own queue. 423 */ 424 g_ns->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ns->u.nvme.ctrlr, NULL, 0); 425 if (!g_ns->u.nvme.qpair) { 426 printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n"); 427 return -1; 428 } 429 } 430 431 return 0; 432 } 433 434 static void 435 cleanup_ns_worker_ctx(void) 436 { 437 if (g_ns->type == ENTRY_TYPE_AIO_FILE) { 438 #ifdef HAVE_LIBAIO 439 io_destroy(g_ns->u.aio.ctx); 440 free(g_ns->u.aio.events); 441 #endif 442 } else { 443 spdk_nvme_ctrlr_free_io_qpair(g_ns->u.nvme.qpair); 444 } 445 } 446 447 static int 448 work_fn(void) 449 { 450 uint64_t tsc_end, current; 451 452 /* Allocate a queue pair for each namespace. */ 453 if (init_ns_worker_ctx() != 0) { 454 printf("ERROR: init_ns_worker_ctx() failed\n"); 455 return 1; 456 } 457 458 tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate; 459 460 /* Submit initial I/O for each namespace. */ 461 submit_single_io(); 462 g_complete_tsc_start = spdk_get_ticks(); 463 464 while (1) { 465 /* 466 * Check for completed I/O for each controller. A new 467 * I/O will be submitted in the io_complete callback 468 * to replace each I/O that is completed. 469 */ 470 current = check_io(); 471 472 if (current > tsc_end) { 473 break; 474 } 475 } 476 477 drain_io(); 478 cleanup_ns_worker_ctx(); 479 480 return 0; 481 } 482 483 static void usage(char *program_name) 484 { 485 printf("%s options", program_name); 486 #if HAVE_LIBAIO 487 printf(" [AIO device(s)]..."); 488 #endif 489 printf("\t\n"); 490 printf("\t[-d DPDK huge memory size in MB]\n"); 491 printf("\t[-s io size in bytes]\n"); 492 printf("\t[-t time in seconds]\n"); 493 printf("\t\t(default: 1)]\n"); 494 printf("\t[-H enable histograms]\n"); 495 printf("\t[-g use single file descriptor for DPDK memory segments]\n"); 496 printf("\t[-i shared memory group ID]\n"); 497 printf("\t[-r remote NVMe over Fabrics target address]\n"); 498 #ifdef DEBUG 499 printf("\t[-L enable debug logging]\n"); 500 #else 501 printf("\t[-L enable debug logging (flag disabled, must reconfigure with --enable-debug)\n"); 502 #endif 503 spdk_log_usage(stdout, "\t\t-L"); 504 } 505 506 static void 507 print_bucket(void *ctx, uint64_t start, uint64_t end, uint64_t count, 508 uint64_t total, uint64_t so_far) 509 { 510 double so_far_pct; 511 512 if (count == 0) { 513 return; 514 } 515 516 so_far_pct = (double)so_far * 100 / total; 517 518 printf("%9.3f - %9.3f: %9.4f%% (%9ju)\n", 519 (double)start * 1000 * 1000 / g_tsc_rate, 520 (double)end * 1000 * 1000 / g_tsc_rate, 521 so_far_pct, count); 522 } 523 524 static void 525 print_stats(void) 526 { 527 double divisor = (double)g_tsc_rate / (1000 * 1000 * 1000); 528 529 printf("submit (in ns) avg, min, max = %8.1f, %8.1f, %8.1f\n", 530 (double)g_tsc_submit / g_io_completed / divisor, 531 (double)g_tsc_submit_min / divisor, 532 (double)g_tsc_submit_max / divisor); 533 printf("complete (in ns) avg, min, max = %8.1f, %8.1f, %8.1f\n", 534 (double)g_tsc_complete / g_io_completed / divisor, 535 (double)g_tsc_complete_min / divisor, 536 (double)g_tsc_complete_max / divisor); 537 538 if (!g_enable_histogram) { 539 return; 540 } 541 542 printf("\n"); 543 printf("Submit histogram\n"); 544 printf("================\n"); 545 printf(" Range in us Cumulative Count\n"); 546 spdk_histogram_data_iterate(g_ns->submit_histogram, print_bucket, NULL); 547 printf("\n"); 548 549 printf("Complete histogram\n"); 550 printf("==================\n"); 551 printf(" Range in us Cumulative Count\n"); 552 spdk_histogram_data_iterate(g_ns->complete_histogram, print_bucket, NULL); 553 printf("\n"); 554 555 } 556 557 static int 558 parse_args(int argc, char **argv, struct spdk_env_opts *env_opts) 559 { 560 int op, rc; 561 long int val; 562 563 /* default value */ 564 g_io_size_bytes = 0; 565 g_time_in_sec = 0; 566 567 spdk_nvme_trid_populate_transport(&g_trid, SPDK_NVME_TRANSPORT_PCIE); 568 snprintf(g_trid.subnqn, sizeof(g_trid.subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN); 569 570 while ((op = getopt(argc, argv, "d:ghi:r:s:t:HL:")) != -1) { 571 switch (op) { 572 case 'h': 573 usage(argv[0]); 574 exit(0); 575 break; 576 case 's': 577 val = spdk_strtol(optarg, 10); 578 if (val < 0) { 579 fprintf(stderr, "Invalid io size\n"); 580 return val; 581 } 582 g_io_size_bytes = (uint32_t)val; 583 break; 584 case 't': 585 g_time_in_sec = spdk_strtol(optarg, 10); 586 if (g_time_in_sec < 0) { 587 fprintf(stderr, "Invalid run time\n"); 588 return g_time_in_sec; 589 } 590 break; 591 case 'H': 592 g_enable_histogram = true; 593 break; 594 case 'i': 595 env_opts->shm_id = spdk_strtol(optarg, 10); 596 if (env_opts->shm_id < 0) { 597 fprintf(stderr, "Invalid shared memory ID\n"); 598 return env_opts->shm_id; 599 } 600 break; 601 case 'g': 602 env_opts->hugepage_single_segments = true; 603 break; 604 case 'r': 605 if (spdk_nvme_transport_id_parse(&g_trid, optarg) != 0) { 606 fprintf(stderr, "Error parsing transport address\n"); 607 return 1; 608 } 609 break; 610 case 'd': 611 env_opts->mem_size = spdk_strtol(optarg, 10); 612 if (env_opts->mem_size < 0) { 613 fprintf(stderr, "Invalid DPDK memory size\n"); 614 return env_opts->mem_size; 615 } 616 break; 617 case 'L': 618 rc = spdk_log_set_flag(optarg); 619 if (rc < 0) { 620 fprintf(stderr, "unknown flag\n"); 621 usage(argv[0]); 622 exit(EXIT_FAILURE); 623 } 624 #ifdef DEBUG 625 spdk_log_set_print_level(SPDK_LOG_DEBUG); 626 #endif 627 break; 628 default: 629 usage(argv[0]); 630 return 1; 631 } 632 } 633 634 if (!g_io_size_bytes) { 635 usage(argv[0]); 636 return 1; 637 } 638 if (!g_time_in_sec) { 639 usage(argv[0]); 640 return 1; 641 } 642 643 g_aio_optind = optind; 644 645 return 0; 646 } 647 648 static bool 649 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 650 struct spdk_nvme_ctrlr_opts *opts) 651 { 652 static uint32_t ctrlr_found = 0; 653 654 if (ctrlr_found == 1) { 655 fprintf(stderr, "only attaching to one controller, so skipping\n"); 656 fprintf(stderr, " controller at PCI address %s\n", 657 trid->traddr); 658 return false; 659 } 660 ctrlr_found = 1; 661 662 printf("Attaching to %s\n", trid->traddr); 663 664 return true; 665 } 666 667 static void 668 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 669 struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts) 670 { 671 printf("Attached to %s\n", trid->traddr); 672 673 register_ctrlr(ctrlr); 674 } 675 676 static int 677 register_controllers(void) 678 { 679 printf("Initializing NVMe Controllers\n"); 680 681 if (spdk_nvme_probe(&g_trid, NULL, probe_cb, attach_cb, NULL) != 0) { 682 fprintf(stderr, "spdk_nvme_probe() failed\n"); 683 return 1; 684 } 685 686 if (g_ns == NULL) { 687 fprintf(stderr, "no NVMe controller found - check that device is bound to uio/vfio\n"); 688 return 1; 689 } 690 691 return 0; 692 } 693 694 static void 695 cleanup(void) 696 { 697 struct ns_entry *ns_entry = g_ns; 698 struct ctrlr_entry *ctrlr_entry, *tmp_ctrlr_entry; 699 struct spdk_nvme_detach_ctx *detach_ctx = NULL; 700 701 while (ns_entry) { 702 struct ns_entry *next = ns_entry->next; 703 704 spdk_histogram_data_free(ns_entry->submit_histogram); 705 spdk_histogram_data_free(ns_entry->complete_histogram); 706 free(ns_entry); 707 ns_entry = next; 708 } 709 710 TAILQ_FOREACH_SAFE(ctrlr_entry, &g_ctrlr, link, tmp_ctrlr_entry) { 711 TAILQ_REMOVE(&g_ctrlr, ctrlr_entry, link); 712 spdk_nvme_detach_async(ctrlr_entry->ctrlr, &detach_ctx); 713 free(ctrlr_entry); 714 } 715 716 if (detach_ctx) { 717 spdk_nvme_detach_poll(detach_ctx); 718 } 719 } 720 721 int main(int argc, char **argv) 722 { 723 int rc; 724 struct spdk_env_opts opts; 725 726 spdk_env_opts_init(&opts); 727 rc = parse_args(argc, argv, &opts); 728 if (rc != 0) { 729 return rc; 730 } 731 732 opts.name = "overhead"; 733 opts.core_mask = "0x1"; 734 if (spdk_env_init(&opts) < 0) { 735 fprintf(stderr, "Unable to initialize SPDK env\n"); 736 return 1; 737 } 738 739 g_task = spdk_zmalloc(sizeof(struct perf_task), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 740 if (g_task == NULL) { 741 fprintf(stderr, "g_task alloc failed\n"); 742 exit(1); 743 } 744 745 g_task->buf = spdk_zmalloc(g_io_size_bytes, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 746 if (g_task->buf == NULL) { 747 fprintf(stderr, "g_task->buf spdk_zmalloc failed\n"); 748 exit(1); 749 } 750 751 g_tsc_rate = spdk_get_ticks_hz(); 752 753 #if HAVE_LIBAIO 754 if (g_aio_optind < argc) { 755 printf("Measuring overhead for AIO device %s.\n", argv[g_aio_optind]); 756 if (register_aio_file(argv[g_aio_optind]) != 0) { 757 cleanup(); 758 return -1; 759 } 760 } else 761 #endif 762 { 763 if (register_controllers() != 0) { 764 cleanup(); 765 return -1; 766 } 767 } 768 769 printf("Initialization complete. Launching workers.\n"); 770 771 rc = work_fn(); 772 773 print_stats(); 774 775 cleanup(); 776 777 if (rc != 0) { 778 fprintf(stderr, "%s: errors occurred\n", argv[0]); 779 } 780 781 return rc; 782 } 783