xref: /spdk/test/nvme/overhead/overhead.c (revision 4a9209bf1db1fc02a00f683aeb3c2754fe8ef99b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/barrier.h"
9 #include "spdk/fd.h"
10 #include "spdk/nvme.h"
11 #include "spdk/env.h"
12 #include "spdk/string.h"
13 #include "spdk/nvme_intel.h"
14 #include "spdk/histogram_data.h"
15 #include "spdk/string.h"
16 #include "spdk/log.h"
17 
18 #if HAVE_LIBAIO
19 #include <libaio.h>
20 #endif
21 
22 struct ctrlr_entry {
23 	struct spdk_nvme_ctrlr			*ctrlr;
24 	TAILQ_ENTRY(ctrlr_entry)		link;
25 	char					name[1024];
26 };
27 
28 enum entry_type {
29 	ENTRY_TYPE_NVME_NS,
30 	ENTRY_TYPE_AIO_FILE,
31 };
32 
33 struct ns_entry {
34 	enum entry_type		type;
35 
36 	union {
37 		struct {
38 			struct spdk_nvme_ctrlr	*ctrlr;
39 			struct spdk_nvme_ns	*ns;
40 			struct spdk_nvme_qpair	*qpair;
41 		} nvme;
42 #if HAVE_LIBAIO
43 		struct {
44 			int			fd;
45 			struct io_event		*events;
46 			io_context_t		ctx;
47 		} aio;
48 #endif
49 	} u;
50 
51 	uint32_t		io_size_blocks;
52 	uint64_t		size_in_ios;
53 	bool			is_draining;
54 	uint32_t		current_queue_depth;
55 	char			name[1024];
56 	struct ns_entry		*next;
57 
58 	struct spdk_histogram_data	*submit_histogram;
59 	struct spdk_histogram_data	*complete_histogram;
60 };
61 
62 struct perf_task {
63 	void			*buf;
64 	uint64_t		submit_tsc;
65 #if HAVE_LIBAIO
66 	struct iocb		iocb;
67 #endif
68 };
69 
70 static bool g_enable_histogram = false;
71 
72 static TAILQ_HEAD(, ctrlr_entry) g_ctrlr = TAILQ_HEAD_INITIALIZER(g_ctrlr);
73 static struct ns_entry *g_ns = NULL;
74 
75 static uint64_t g_tsc_rate;
76 
77 static uint32_t g_io_size_bytes;
78 static int g_time_in_sec;
79 
80 static int g_aio_optind; /* Index of first AIO filename in argv */
81 
82 struct perf_task *g_task;
83 uint64_t g_tsc_submit = 0;
84 uint64_t g_tsc_submit_min = UINT64_MAX;
85 uint64_t g_tsc_submit_max = 0;
86 uint64_t g_tsc_complete = 0;
87 uint64_t g_tsc_complete_min = UINT64_MAX;
88 uint64_t g_tsc_complete_max = 0;
89 uint64_t g_io_completed = 0;
90 
91 static struct spdk_nvme_transport_id g_trid = {};
92 
93 static void
94 register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
95 {
96 	struct ns_entry *entry;
97 	const struct spdk_nvme_ctrlr_data *cdata;
98 
99 	cdata = spdk_nvme_ctrlr_get_data(ctrlr);
100 
101 	if (!spdk_nvme_ns_is_active(ns)) {
102 		printf("Controller %-20.20s (%-20.20s): Skipping inactive NS %u\n",
103 		       cdata->mn, cdata->sn,
104 		       spdk_nvme_ns_get_id(ns));
105 		return;
106 	}
107 
108 	if (spdk_nvme_ns_get_size(ns) < g_io_size_bytes ||
109 	    spdk_nvme_ns_get_sector_size(ns) > g_io_size_bytes) {
110 		printf("WARNING: controller %-20.20s (%-20.20s) ns %u has invalid "
111 		       "ns size %" PRIu64 " / block size %u for I/O size %u\n",
112 		       cdata->mn, cdata->sn, spdk_nvme_ns_get_id(ns),
113 		       spdk_nvme_ns_get_size(ns), spdk_nvme_ns_get_sector_size(ns), g_io_size_bytes);
114 		return;
115 	}
116 
117 	entry = calloc(1, sizeof(struct ns_entry));
118 	if (entry == NULL) {
119 		perror("ns_entry malloc");
120 		exit(1);
121 	}
122 
123 	entry->type = ENTRY_TYPE_NVME_NS;
124 	entry->u.nvme.ctrlr = ctrlr;
125 	entry->u.nvme.ns = ns;
126 
127 	entry->size_in_ios = spdk_nvme_ns_get_size(ns) /
128 			     g_io_size_bytes;
129 	entry->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(ns);
130 	entry->submit_histogram = spdk_histogram_data_alloc();
131 	entry->complete_histogram = spdk_histogram_data_alloc();
132 
133 	snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
134 
135 	entry->next = g_ns;
136 	g_ns = entry;
137 }
138 
139 static void
140 register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
141 {
142 	int num_ns;
143 	struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
144 	const struct spdk_nvme_ctrlr_data *cdata = spdk_nvme_ctrlr_get_data(ctrlr);
145 
146 	if (entry == NULL) {
147 		perror("ctrlr_entry malloc");
148 		exit(1);
149 	}
150 
151 	snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
152 
153 	entry->ctrlr = ctrlr;
154 
155 	TAILQ_INSERT_TAIL(&g_ctrlr, entry, link);
156 
157 	num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
158 	/* Only register the first namespace. */
159 	if (num_ns < 1) {
160 		fprintf(stderr, "controller found with no namespaces\n");
161 		return;
162 	}
163 
164 	register_ns(ctrlr, spdk_nvme_ctrlr_get_ns(ctrlr, 1));
165 }
166 
167 #if HAVE_LIBAIO
168 static int
169 register_aio_file(const char *path)
170 {
171 	struct ns_entry *entry;
172 
173 	int fd;
174 	uint64_t size;
175 	uint32_t blklen;
176 
177 	fd = open(path, O_RDWR | O_DIRECT);
178 	if (fd < 0) {
179 		fprintf(stderr, "Could not open AIO device %s: %s\n", path, strerror(errno));
180 		return -1;
181 	}
182 
183 	size = spdk_fd_get_size(fd);
184 	if (size == 0) {
185 		fprintf(stderr, "Could not determine size of AIO device %s\n", path);
186 		close(fd);
187 		return -1;
188 	}
189 
190 	blklen = spdk_fd_get_blocklen(fd);
191 	if (blklen == 0) {
192 		fprintf(stderr, "Could not determine block size of AIO device %s\n", path);
193 		close(fd);
194 		return -1;
195 	}
196 
197 	entry = calloc(1, sizeof(struct ns_entry));
198 	if (entry == NULL) {
199 		close(fd);
200 		perror("aio ns_entry malloc");
201 		return -1;
202 	}
203 
204 	entry->type = ENTRY_TYPE_AIO_FILE;
205 	entry->u.aio.fd = fd;
206 	entry->size_in_ios = size / g_io_size_bytes;
207 	entry->io_size_blocks = g_io_size_bytes / blklen;
208 	entry->submit_histogram = spdk_histogram_data_alloc();
209 	entry->complete_histogram = spdk_histogram_data_alloc();
210 
211 	snprintf(entry->name, sizeof(entry->name), "%s", path);
212 
213 	g_ns = entry;
214 
215 	return 0;
216 }
217 
218 static int
219 aio_submit(io_context_t aio_ctx, struct iocb *iocb, int fd, enum io_iocb_cmd cmd, void *buf,
220 	   unsigned long nbytes, uint64_t offset, void *cb_ctx)
221 {
222 	iocb->aio_fildes = fd;
223 	iocb->aio_reqprio = 0;
224 	iocb->aio_lio_opcode = cmd;
225 	iocb->u.c.buf = buf;
226 	iocb->u.c.nbytes = nbytes;
227 	iocb->u.c.offset = offset;
228 	iocb->data = cb_ctx;
229 
230 	if (io_submit(aio_ctx, 1, &iocb) < 0) {
231 		printf("io_submit");
232 		return -1;
233 	}
234 
235 	return 0;
236 }
237 
238 static void
239 aio_check_io(void)
240 {
241 	int count, i;
242 	struct timespec timeout;
243 
244 	timeout.tv_sec = 0;
245 	timeout.tv_nsec = 0;
246 
247 	count = io_getevents(g_ns->u.aio.ctx, 1, 1, g_ns->u.aio.events, &timeout);
248 	if (count < 0) {
249 		fprintf(stderr, "io_getevents error\n");
250 		exit(1);
251 	}
252 
253 	for (i = 0; i < count; i++) {
254 		g_ns->current_queue_depth--;
255 	}
256 }
257 #endif /* HAVE_LIBAIO */
258 
259 static void io_complete(void *ctx, const struct spdk_nvme_cpl *completion);
260 
261 static __thread unsigned int seed = 0;
262 
263 static void
264 submit_single_io(void)
265 {
266 	uint64_t		offset_in_ios;
267 	uint64_t		start;
268 	int			rc;
269 	struct ns_entry		*entry = g_ns;
270 	uint64_t		tsc_submit;
271 
272 	offset_in_ios = rand_r(&seed) % entry->size_in_ios;
273 
274 	start = spdk_get_ticks();
275 	spdk_rmb();
276 #if HAVE_LIBAIO
277 	if (entry->type == ENTRY_TYPE_AIO_FILE) {
278 		rc = aio_submit(g_ns->u.aio.ctx, &g_task->iocb, entry->u.aio.fd, IO_CMD_PREAD, g_task->buf,
279 				g_io_size_bytes, offset_in_ios * g_io_size_bytes, g_task);
280 	} else
281 #endif
282 	{
283 		rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, g_ns->u.nvme.qpair, g_task->buf,
284 					   offset_in_ios * entry->io_size_blocks,
285 					   entry->io_size_blocks, io_complete, g_task, 0);
286 	}
287 
288 	spdk_rmb();
289 	tsc_submit = spdk_get_ticks() - start;
290 	g_tsc_submit += tsc_submit;
291 	if (tsc_submit < g_tsc_submit_min) {
292 		g_tsc_submit_min = tsc_submit;
293 	}
294 	if (tsc_submit > g_tsc_submit_max) {
295 		g_tsc_submit_max = tsc_submit;
296 	}
297 	if (g_enable_histogram) {
298 		spdk_histogram_data_tally(entry->submit_histogram, tsc_submit);
299 	}
300 
301 	if (rc != 0) {
302 		fprintf(stderr, "starting I/O failed\n");
303 	} else {
304 		g_ns->current_queue_depth++;
305 	}
306 }
307 
308 static void
309 io_complete(void *ctx, const struct spdk_nvme_cpl *completion)
310 {
311 	g_ns->current_queue_depth--;
312 }
313 
314 uint64_t g_complete_tsc_start;
315 
316 static uint64_t
317 check_io(void)
318 {
319 	uint64_t end, tsc_complete;
320 
321 	spdk_rmb();
322 #if HAVE_LIBAIO
323 	if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
324 		aio_check_io();
325 	} else
326 #endif
327 	{
328 		spdk_nvme_qpair_process_completions(g_ns->u.nvme.qpair, 0);
329 	}
330 	spdk_rmb();
331 	end = spdk_get_ticks();
332 	if (g_ns->current_queue_depth == 1) {
333 		/*
334 		 * Account for race condition in AIO case where interrupt occurs
335 		 *  after checking for queue depth.  If the timestamp capture
336 		 *  is too big compared to the last capture, assume that an
337 		 *  interrupt fired, and do not bump the start tsc forward.  This
338 		 *  will ensure this extra time is accounted for next time through
339 		 *  when we see current_queue_depth drop to 0.
340 		 */
341 		if (g_ns->type == ENTRY_TYPE_NVME_NS || (end - g_complete_tsc_start) < 500) {
342 			g_complete_tsc_start = end;
343 		}
344 	} else {
345 		tsc_complete = end - g_complete_tsc_start;
346 		g_tsc_complete += tsc_complete;
347 		if (tsc_complete < g_tsc_complete_min) {
348 			g_tsc_complete_min = tsc_complete;
349 		}
350 		if (tsc_complete > g_tsc_complete_max) {
351 			g_tsc_complete_max = tsc_complete;
352 		}
353 		if (g_enable_histogram) {
354 			spdk_histogram_data_tally(g_ns->complete_histogram, tsc_complete);
355 		}
356 		g_io_completed++;
357 		if (!g_ns->is_draining) {
358 			submit_single_io();
359 		}
360 		end = g_complete_tsc_start = spdk_get_ticks();
361 	}
362 
363 	return end;
364 }
365 
366 static void
367 drain_io(void)
368 {
369 	g_ns->is_draining = true;
370 	while (g_ns->current_queue_depth > 0) {
371 		check_io();
372 	}
373 }
374 
375 static int
376 init_ns_worker_ctx(void)
377 {
378 	if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
379 #ifdef HAVE_LIBAIO
380 		g_ns->u.aio.events = calloc(1, sizeof(struct io_event));
381 		if (!g_ns->u.aio.events) {
382 			return -1;
383 		}
384 		g_ns->u.aio.ctx = 0;
385 		if (io_setup(1, &g_ns->u.aio.ctx) < 0) {
386 			free(g_ns->u.aio.events);
387 			perror("io_setup");
388 			return -1;
389 		}
390 #endif
391 	} else {
392 		/*
393 		 * TODO: If a controller has multiple namespaces, they could all use the same queue.
394 		 *  For now, give each namespace/thread combination its own queue.
395 		 */
396 		g_ns->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ns->u.nvme.ctrlr, NULL, 0);
397 		if (!g_ns->u.nvme.qpair) {
398 			printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n");
399 			return -1;
400 		}
401 	}
402 
403 	return 0;
404 }
405 
406 static void
407 cleanup_ns_worker_ctx(void)
408 {
409 	if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
410 #ifdef HAVE_LIBAIO
411 		io_destroy(g_ns->u.aio.ctx);
412 		free(g_ns->u.aio.events);
413 #endif
414 	} else {
415 		spdk_nvme_ctrlr_free_io_qpair(g_ns->u.nvme.qpair);
416 	}
417 }
418 
419 static int
420 work_fn(void)
421 {
422 	uint64_t tsc_end, current;
423 
424 	/* Allocate a queue pair for each namespace. */
425 	if (init_ns_worker_ctx() != 0) {
426 		printf("ERROR: init_ns_worker_ctx() failed\n");
427 		return 1;
428 	}
429 
430 	tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
431 
432 	/* Submit initial I/O for each namespace. */
433 	submit_single_io();
434 	g_complete_tsc_start = spdk_get_ticks();
435 
436 	while (1) {
437 		/*
438 		 * Check for completed I/O for each controller. A new
439 		 * I/O will be submitted in the io_complete callback
440 		 * to replace each I/O that is completed.
441 		 */
442 		current = check_io();
443 
444 		if (current > tsc_end) {
445 			break;
446 		}
447 	}
448 
449 	drain_io();
450 	cleanup_ns_worker_ctx();
451 
452 	return 0;
453 }
454 
455 static void
456 usage(char *program_name)
457 {
458 	printf("%s options", program_name);
459 #if HAVE_LIBAIO
460 	printf(" [AIO device(s)]...");
461 #endif
462 	printf("\t\n");
463 	printf("\t[-d DPDK huge memory size in MB]\n");
464 	printf("\t[-o io size in bytes]\n");
465 	printf("\t[-t time in seconds]\n");
466 	printf("\t\t(default: 1)]\n");
467 	printf("\t[-H enable histograms]\n");
468 	printf("\t[-g use single file descriptor for DPDK memory segments]\n");
469 	printf("\t[-i shared memory group ID]\n");
470 	printf("\t[-r remote NVMe over Fabrics target address]\n");
471 #ifdef DEBUG
472 	printf("\t[-L enable debug logging]\n");
473 #else
474 	printf("\t[-L enable debug logging (flag disabled, must reconfigure with --enable-debug)]\n");
475 #endif
476 	spdk_log_usage(stdout, "\t\t-L");
477 }
478 
479 static void
480 print_bucket(void *ctx, uint64_t start, uint64_t end, uint64_t count,
481 	     uint64_t total, uint64_t so_far)
482 {
483 	double so_far_pct;
484 
485 	if (count == 0) {
486 		return;
487 	}
488 
489 	so_far_pct = (double)so_far * 100 / total;
490 
491 	printf("%9.3f - %9.3f: %9.4f%%  (%9ju)\n",
492 	       (double)start * 1000 * 1000 / g_tsc_rate,
493 	       (double)end * 1000 * 1000 / g_tsc_rate,
494 	       so_far_pct, count);
495 }
496 
497 static void
498 print_stats(void)
499 {
500 	double divisor = (double)g_tsc_rate / (1000 * 1000 * 1000);
501 
502 	printf("submit (in ns)   avg, min, max = %8.1f, %8.1f, %8.1f\n",
503 	       (double)g_tsc_submit / g_io_completed / divisor,
504 	       (double)g_tsc_submit_min / divisor,
505 	       (double)g_tsc_submit_max / divisor);
506 	printf("complete (in ns) avg, min, max = %8.1f, %8.1f, %8.1f\n",
507 	       (double)g_tsc_complete / g_io_completed / divisor,
508 	       (double)g_tsc_complete_min / divisor,
509 	       (double)g_tsc_complete_max / divisor);
510 
511 	if (!g_enable_histogram) {
512 		return;
513 	}
514 
515 	printf("\n");
516 	printf("Submit histogram\n");
517 	printf("================\n");
518 	printf("       Range in us     Cumulative     Count\n");
519 	spdk_histogram_data_iterate(g_ns->submit_histogram, print_bucket, NULL);
520 	printf("\n");
521 
522 	printf("Complete histogram\n");
523 	printf("==================\n");
524 	printf("       Range in us     Cumulative     Count\n");
525 	spdk_histogram_data_iterate(g_ns->complete_histogram, print_bucket, NULL);
526 	printf("\n");
527 
528 }
529 
530 static int
531 parse_args(int argc, char **argv, struct spdk_env_opts *env_opts)
532 {
533 	int op, rc;
534 	long int val;
535 
536 	/* default value */
537 	g_io_size_bytes = 0;
538 	g_time_in_sec = 0;
539 
540 	spdk_nvme_trid_populate_transport(&g_trid, SPDK_NVME_TRANSPORT_PCIE);
541 	snprintf(g_trid.subnqn, sizeof(g_trid.subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
542 
543 	while ((op = getopt(argc, argv, "d:ghi:o:r:t:HL:")) != -1) {
544 		switch (op) {
545 		case 'h':
546 			usage(argv[0]);
547 			exit(0);
548 			break;
549 		case 'o':
550 			val = spdk_strtol(optarg, 10);
551 			if (val < 0) {
552 				fprintf(stderr, "Invalid io size\n");
553 				return val;
554 			}
555 			g_io_size_bytes = (uint32_t)val;
556 			break;
557 		case 't':
558 			g_time_in_sec = spdk_strtol(optarg, 10);
559 			if (g_time_in_sec < 0) {
560 				fprintf(stderr, "Invalid run time\n");
561 				return g_time_in_sec;
562 			}
563 			break;
564 		case 'H':
565 			g_enable_histogram = true;
566 			break;
567 		case 'i':
568 			env_opts->shm_id = spdk_strtol(optarg, 10);
569 			if (env_opts->shm_id < 0) {
570 				fprintf(stderr, "Invalid shared memory ID\n");
571 				return env_opts->shm_id;
572 			}
573 			break;
574 		case 'g':
575 			env_opts->hugepage_single_segments = true;
576 			break;
577 		case 'r':
578 			if (spdk_nvme_transport_id_parse(&g_trid, optarg) != 0) {
579 				fprintf(stderr, "Error parsing transport address\n");
580 				return 1;
581 			}
582 			break;
583 		case 'd':
584 			env_opts->mem_size = spdk_strtol(optarg, 10);
585 			if (env_opts->mem_size < 0) {
586 				fprintf(stderr, "Invalid DPDK memory size\n");
587 				return env_opts->mem_size;
588 			}
589 			break;
590 		case 'L':
591 			rc = spdk_log_set_flag(optarg);
592 			if (rc < 0) {
593 				fprintf(stderr, "unknown flag\n");
594 				usage(argv[0]);
595 				exit(EXIT_FAILURE);
596 			}
597 #ifdef DEBUG
598 			spdk_log_set_print_level(SPDK_LOG_DEBUG);
599 #endif
600 			break;
601 		default:
602 			usage(argv[0]);
603 			return 1;
604 		}
605 	}
606 
607 	if (!g_io_size_bytes) {
608 		usage(argv[0]);
609 		return 1;
610 	}
611 	if (!g_time_in_sec) {
612 		usage(argv[0]);
613 		return 1;
614 	}
615 
616 	g_aio_optind = optind;
617 
618 	return 0;
619 }
620 
621 static bool
622 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
623 	 struct spdk_nvme_ctrlr_opts *opts)
624 {
625 	static uint32_t ctrlr_found = 0;
626 
627 	if (ctrlr_found == 1) {
628 		fprintf(stderr, "only attaching to one controller, so skipping\n");
629 		fprintf(stderr, " controller at PCI address %s\n",
630 			trid->traddr);
631 		return false;
632 	}
633 	ctrlr_found = 1;
634 
635 	printf("Attaching to %s\n", trid->traddr);
636 
637 	return true;
638 }
639 
640 static void
641 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
642 	  struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
643 {
644 	printf("Attached to %s\n", trid->traddr);
645 
646 	register_ctrlr(ctrlr);
647 }
648 
649 static int
650 register_controllers(void)
651 {
652 	printf("Initializing NVMe Controllers\n");
653 
654 	if (spdk_nvme_probe(&g_trid, NULL, probe_cb, attach_cb, NULL) != 0) {
655 		fprintf(stderr, "spdk_nvme_probe() failed\n");
656 		return 1;
657 	}
658 
659 	if (g_ns == NULL) {
660 		fprintf(stderr, "no NVMe controller found - check that device is bound to uio/vfio\n");
661 		return 1;
662 	}
663 
664 	return 0;
665 }
666 
667 static void
668 cleanup(void)
669 {
670 	struct ns_entry *ns_entry = g_ns;
671 	struct ctrlr_entry *ctrlr_entry, *tmp_ctrlr_entry;
672 	struct spdk_nvme_detach_ctx *detach_ctx = NULL;
673 
674 	while (ns_entry) {
675 		struct ns_entry *next = ns_entry->next;
676 
677 		spdk_histogram_data_free(ns_entry->submit_histogram);
678 		spdk_histogram_data_free(ns_entry->complete_histogram);
679 		free(ns_entry);
680 		ns_entry = next;
681 	}
682 
683 	TAILQ_FOREACH_SAFE(ctrlr_entry, &g_ctrlr, link, tmp_ctrlr_entry) {
684 		TAILQ_REMOVE(&g_ctrlr, ctrlr_entry, link);
685 		spdk_nvme_detach_async(ctrlr_entry->ctrlr, &detach_ctx);
686 		free(ctrlr_entry);
687 	}
688 
689 	if (detach_ctx) {
690 		spdk_nvme_detach_poll(detach_ctx);
691 	}
692 }
693 
694 int
695 main(int argc, char **argv)
696 {
697 	int			rc;
698 	struct spdk_env_opts	opts;
699 
700 	spdk_env_opts_init(&opts);
701 	rc = parse_args(argc, argv, &opts);
702 	if (rc != 0) {
703 		return rc;
704 	}
705 
706 	opts.name = "overhead";
707 	opts.core_mask = "0x1";
708 	if (spdk_env_init(&opts) < 0) {
709 		fprintf(stderr, "Unable to initialize SPDK env\n");
710 		return 1;
711 	}
712 
713 	g_task = spdk_zmalloc(sizeof(struct perf_task), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
714 	if (g_task == NULL) {
715 		fprintf(stderr, "g_task alloc failed\n");
716 		exit(1);
717 	}
718 
719 	g_task->buf = spdk_zmalloc(g_io_size_bytes, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
720 	if (g_task->buf == NULL) {
721 		fprintf(stderr, "g_task->buf spdk_zmalloc failed\n");
722 		exit(1);
723 	}
724 
725 	g_tsc_rate = spdk_get_ticks_hz();
726 
727 #if HAVE_LIBAIO
728 	if (g_aio_optind < argc) {
729 		printf("Measuring overhead for AIO device %s.\n", argv[g_aio_optind]);
730 		if (register_aio_file(argv[g_aio_optind]) != 0) {
731 			cleanup();
732 			return -1;
733 		}
734 	} else
735 #endif
736 	{
737 		if (register_controllers() != 0) {
738 			cleanup();
739 			return -1;
740 		}
741 	}
742 
743 	printf("Initialization complete. Launching workers.\n");
744 
745 	rc = work_fn();
746 
747 	print_stats();
748 
749 	cleanup();
750 
751 	if (rc != 0) {
752 		fprintf(stderr, "%s: errors occurred\n", argv[0]);
753 	}
754 
755 	return rc;
756 }
757