xref: /spdk/test/nvme/overhead/overhead.c (revision 57fd99b91e71a4baa5543e19ff83958dc99d4dac)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/barrier.h"
9 #include "spdk/fd.h"
10 #include "spdk/nvme.h"
11 #include "spdk/env.h"
12 #include "spdk/string.h"
13 #include "spdk/nvme_intel.h"
14 #include "spdk/histogram_data.h"
15 #include "spdk/log.h"
16 
17 #if HAVE_LIBAIO
18 #include <libaio.h>
19 #endif
20 
21 struct ctrlr_entry {
22 	struct spdk_nvme_ctrlr			*ctrlr;
23 	TAILQ_ENTRY(ctrlr_entry)		link;
24 	char					name[1024];
25 };
26 
27 enum entry_type {
28 	ENTRY_TYPE_NVME_NS,
29 	ENTRY_TYPE_AIO_FILE,
30 };
31 
32 struct ns_entry {
33 	enum entry_type		type;
34 
35 	union {
36 		struct {
37 			struct spdk_nvme_ctrlr	*ctrlr;
38 			struct spdk_nvme_ns	*ns;
39 			struct spdk_nvme_qpair	*qpair;
40 		} nvme;
41 #if HAVE_LIBAIO
42 		struct {
43 			int			fd;
44 			struct io_event		*events;
45 			io_context_t		ctx;
46 		} aio;
47 #endif
48 	} u;
49 
50 	uint32_t		io_size_blocks;
51 	uint64_t		size_in_ios;
52 	bool			is_draining;
53 	uint32_t		current_queue_depth;
54 	char			name[1024];
55 	struct ns_entry		*next;
56 
57 	struct spdk_histogram_data	*submit_histogram;
58 	struct spdk_histogram_data	*complete_histogram;
59 };
60 
61 struct perf_task {
62 	void			*buf;
63 	uint64_t		submit_tsc;
64 #if HAVE_LIBAIO
65 	struct iocb		iocb;
66 #endif
67 };
68 
69 static bool g_enable_histogram = false;
70 
71 static TAILQ_HEAD(, ctrlr_entry) g_ctrlr = TAILQ_HEAD_INITIALIZER(g_ctrlr);
72 static struct ns_entry *g_ns = NULL;
73 
74 static uint64_t g_tsc_rate;
75 
76 static uint32_t g_io_size_bytes;
77 static int g_time_in_sec;
78 
79 static int g_aio_optind; /* Index of first AIO filename in argv */
80 
81 struct perf_task *g_task;
82 uint64_t g_tsc_submit = 0;
83 uint64_t g_tsc_submit_min = UINT64_MAX;
84 uint64_t g_tsc_submit_max = 0;
85 uint64_t g_tsc_complete = 0;
86 uint64_t g_tsc_complete_min = UINT64_MAX;
87 uint64_t g_tsc_complete_max = 0;
88 uint64_t g_io_completed = 0;
89 
90 static struct spdk_nvme_transport_id g_trid = {};
91 
92 static void
93 register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
94 {
95 	struct ns_entry *entry;
96 	const struct spdk_nvme_ctrlr_data *cdata;
97 
98 	cdata = spdk_nvme_ctrlr_get_data(ctrlr);
99 
100 	if (!spdk_nvme_ns_is_active(ns)) {
101 		printf("Controller %-20.20s (%-20.20s): Skipping inactive NS %u\n",
102 		       cdata->mn, cdata->sn,
103 		       spdk_nvme_ns_get_id(ns));
104 		return;
105 	}
106 
107 	if (spdk_nvme_ns_get_size(ns) < g_io_size_bytes ||
108 	    spdk_nvme_ns_get_sector_size(ns) > g_io_size_bytes) {
109 		printf("WARNING: controller %-20.20s (%-20.20s) ns %u has invalid "
110 		       "ns size %" PRIu64 " / block size %u for I/O size %u\n",
111 		       cdata->mn, cdata->sn, spdk_nvme_ns_get_id(ns),
112 		       spdk_nvme_ns_get_size(ns), spdk_nvme_ns_get_sector_size(ns), g_io_size_bytes);
113 		return;
114 	}
115 
116 	entry = calloc(1, sizeof(struct ns_entry));
117 	if (entry == NULL) {
118 		perror("ns_entry malloc");
119 		exit(1);
120 	}
121 
122 	entry->type = ENTRY_TYPE_NVME_NS;
123 	entry->u.nvme.ctrlr = ctrlr;
124 	entry->u.nvme.ns = ns;
125 
126 	entry->size_in_ios = spdk_nvme_ns_get_size(ns) /
127 			     g_io_size_bytes;
128 	entry->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(ns);
129 	entry->submit_histogram = spdk_histogram_data_alloc();
130 	entry->complete_histogram = spdk_histogram_data_alloc();
131 
132 	snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
133 
134 	entry->next = g_ns;
135 	g_ns = entry;
136 }
137 
138 static void
139 register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
140 {
141 	int num_ns;
142 	struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
143 	const struct spdk_nvme_ctrlr_data *cdata = spdk_nvme_ctrlr_get_data(ctrlr);
144 
145 	if (entry == NULL) {
146 		perror("ctrlr_entry malloc");
147 		exit(1);
148 	}
149 
150 	snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
151 
152 	entry->ctrlr = ctrlr;
153 
154 	TAILQ_INSERT_TAIL(&g_ctrlr, entry, link);
155 
156 	num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
157 	/* Only register the first namespace. */
158 	if (num_ns < 1) {
159 		fprintf(stderr, "controller found with no namespaces\n");
160 		return;
161 	}
162 
163 	register_ns(ctrlr, spdk_nvme_ctrlr_get_ns(ctrlr, 1));
164 }
165 
166 #if HAVE_LIBAIO
167 static int
168 register_aio_file(const char *path)
169 {
170 	struct ns_entry *entry;
171 
172 	int fd;
173 	uint64_t size;
174 	uint32_t blklen;
175 
176 	fd = open(path, O_RDWR | O_DIRECT);
177 	if (fd < 0) {
178 		fprintf(stderr, "Could not open AIO device %s: %s\n", path, strerror(errno));
179 		return -1;
180 	}
181 
182 	size = spdk_fd_get_size(fd);
183 	if (size == 0) {
184 		fprintf(stderr, "Could not determine size of AIO device %s\n", path);
185 		close(fd);
186 		return -1;
187 	}
188 
189 	blklen = spdk_fd_get_blocklen(fd);
190 	if (blklen == 0) {
191 		fprintf(stderr, "Could not determine block size of AIO device %s\n", path);
192 		close(fd);
193 		return -1;
194 	}
195 
196 	entry = calloc(1, sizeof(struct ns_entry));
197 	if (entry == NULL) {
198 		close(fd);
199 		perror("aio ns_entry malloc");
200 		return -1;
201 	}
202 
203 	entry->type = ENTRY_TYPE_AIO_FILE;
204 	entry->u.aio.fd = fd;
205 	entry->size_in_ios = size / g_io_size_bytes;
206 	entry->io_size_blocks = g_io_size_bytes / blklen;
207 	entry->submit_histogram = spdk_histogram_data_alloc();
208 	entry->complete_histogram = spdk_histogram_data_alloc();
209 
210 	snprintf(entry->name, sizeof(entry->name), "%s", path);
211 
212 	g_ns = entry;
213 
214 	return 0;
215 }
216 
217 static int
218 aio_submit(io_context_t aio_ctx, struct iocb *iocb, int fd, enum io_iocb_cmd cmd, void *buf,
219 	   unsigned long nbytes, uint64_t offset, void *cb_ctx)
220 {
221 	iocb->aio_fildes = fd;
222 	iocb->aio_reqprio = 0;
223 	iocb->aio_lio_opcode = cmd;
224 	iocb->u.c.buf = buf;
225 	iocb->u.c.nbytes = nbytes;
226 	iocb->u.c.offset = offset;
227 	iocb->data = cb_ctx;
228 
229 	if (io_submit(aio_ctx, 1, &iocb) < 0) {
230 		printf("io_submit");
231 		return -1;
232 	}
233 
234 	return 0;
235 }
236 
237 static void
238 aio_check_io(void)
239 {
240 	int count, i;
241 	struct timespec timeout;
242 
243 	timeout.tv_sec = 0;
244 	timeout.tv_nsec = 0;
245 
246 	count = io_getevents(g_ns->u.aio.ctx, 1, 1, g_ns->u.aio.events, &timeout);
247 	if (count < 0) {
248 		fprintf(stderr, "io_getevents error\n");
249 		exit(1);
250 	}
251 
252 	for (i = 0; i < count; i++) {
253 		g_ns->current_queue_depth--;
254 	}
255 }
256 #endif /* HAVE_LIBAIO */
257 
258 static void io_complete(void *ctx, const struct spdk_nvme_cpl *completion);
259 
260 static __thread unsigned int seed = 0;
261 
262 static void
263 submit_single_io(void)
264 {
265 	uint64_t		offset_in_ios;
266 	uint64_t		start;
267 	int			rc;
268 	struct ns_entry		*entry = g_ns;
269 	uint64_t		tsc_submit;
270 
271 	offset_in_ios = rand_r(&seed) % entry->size_in_ios;
272 
273 	start = spdk_get_ticks();
274 	spdk_rmb();
275 #if HAVE_LIBAIO
276 	if (entry->type == ENTRY_TYPE_AIO_FILE) {
277 		rc = aio_submit(g_ns->u.aio.ctx, &g_task->iocb, entry->u.aio.fd, IO_CMD_PREAD, g_task->buf,
278 				g_io_size_bytes, offset_in_ios * g_io_size_bytes, g_task);
279 	} else
280 #endif
281 	{
282 		rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, g_ns->u.nvme.qpair, g_task->buf,
283 					   offset_in_ios * entry->io_size_blocks,
284 					   entry->io_size_blocks, io_complete, g_task, 0);
285 	}
286 
287 	spdk_rmb();
288 	tsc_submit = spdk_get_ticks() - start;
289 	g_tsc_submit += tsc_submit;
290 	if (tsc_submit < g_tsc_submit_min) {
291 		g_tsc_submit_min = tsc_submit;
292 	}
293 	if (tsc_submit > g_tsc_submit_max) {
294 		g_tsc_submit_max = tsc_submit;
295 	}
296 	if (g_enable_histogram) {
297 		spdk_histogram_data_tally(entry->submit_histogram, tsc_submit);
298 	}
299 
300 	if (rc != 0) {
301 		fprintf(stderr, "starting I/O failed\n");
302 	} else {
303 		g_ns->current_queue_depth++;
304 	}
305 }
306 
307 static void
308 io_complete(void *ctx, const struct spdk_nvme_cpl *completion)
309 {
310 	g_ns->current_queue_depth--;
311 }
312 
313 uint64_t g_complete_tsc_start;
314 
315 static uint64_t
316 check_io(void)
317 {
318 	uint64_t end, tsc_complete;
319 
320 	spdk_rmb();
321 #if HAVE_LIBAIO
322 	if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
323 		aio_check_io();
324 	} else
325 #endif
326 	{
327 		spdk_nvme_qpair_process_completions(g_ns->u.nvme.qpair, 0);
328 	}
329 	spdk_rmb();
330 	end = spdk_get_ticks();
331 	if (g_ns->current_queue_depth == 1) {
332 		/*
333 		 * Account for race condition in AIO case where interrupt occurs
334 		 *  after checking for queue depth.  If the timestamp capture
335 		 *  is too big compared to the last capture, assume that an
336 		 *  interrupt fired, and do not bump the start tsc forward.  This
337 		 *  will ensure this extra time is accounted for next time through
338 		 *  when we see current_queue_depth drop to 0.
339 		 */
340 		if (g_ns->type == ENTRY_TYPE_NVME_NS || (end - g_complete_tsc_start) < 500) {
341 			g_complete_tsc_start = end;
342 		}
343 	} else {
344 		tsc_complete = end - g_complete_tsc_start;
345 		g_tsc_complete += tsc_complete;
346 		if (tsc_complete < g_tsc_complete_min) {
347 			g_tsc_complete_min = tsc_complete;
348 		}
349 		if (tsc_complete > g_tsc_complete_max) {
350 			g_tsc_complete_max = tsc_complete;
351 		}
352 		if (g_enable_histogram) {
353 			spdk_histogram_data_tally(g_ns->complete_histogram, tsc_complete);
354 		}
355 		g_io_completed++;
356 		if (!g_ns->is_draining) {
357 			submit_single_io();
358 		}
359 		end = g_complete_tsc_start = spdk_get_ticks();
360 	}
361 
362 	return end;
363 }
364 
365 static void
366 drain_io(void)
367 {
368 	g_ns->is_draining = true;
369 	while (g_ns->current_queue_depth > 0) {
370 		check_io();
371 	}
372 }
373 
374 static int
375 init_ns_worker_ctx(void)
376 {
377 	if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
378 #ifdef HAVE_LIBAIO
379 		g_ns->u.aio.events = calloc(1, sizeof(struct io_event));
380 		if (!g_ns->u.aio.events) {
381 			return -1;
382 		}
383 		g_ns->u.aio.ctx = 0;
384 		if (io_setup(1, &g_ns->u.aio.ctx) < 0) {
385 			free(g_ns->u.aio.events);
386 			perror("io_setup");
387 			return -1;
388 		}
389 #endif
390 	} else {
391 		/*
392 		 * TODO: If a controller has multiple namespaces, they could all use the same queue.
393 		 *  For now, give each namespace/thread combination its own queue.
394 		 */
395 		g_ns->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ns->u.nvme.ctrlr, NULL, 0);
396 		if (!g_ns->u.nvme.qpair) {
397 			printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n");
398 			return -1;
399 		}
400 	}
401 
402 	return 0;
403 }
404 
405 static void
406 cleanup_ns_worker_ctx(void)
407 {
408 	if (g_ns->type == ENTRY_TYPE_AIO_FILE) {
409 #ifdef HAVE_LIBAIO
410 		io_destroy(g_ns->u.aio.ctx);
411 		free(g_ns->u.aio.events);
412 #endif
413 	} else {
414 		spdk_nvme_ctrlr_free_io_qpair(g_ns->u.nvme.qpair);
415 	}
416 }
417 
418 static int
419 work_fn(void)
420 {
421 	uint64_t tsc_end, current;
422 
423 	/* Allocate a queue pair for each namespace. */
424 	if (init_ns_worker_ctx() != 0) {
425 		printf("ERROR: init_ns_worker_ctx() failed\n");
426 		return 1;
427 	}
428 
429 	tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
430 
431 	/* Submit initial I/O for each namespace. */
432 	submit_single_io();
433 	g_complete_tsc_start = spdk_get_ticks();
434 
435 	while (1) {
436 		/*
437 		 * Check for completed I/O for each controller. A new
438 		 * I/O will be submitted in the io_complete callback
439 		 * to replace each I/O that is completed.
440 		 */
441 		current = check_io();
442 
443 		if (current > tsc_end) {
444 			break;
445 		}
446 	}
447 
448 	drain_io();
449 	cleanup_ns_worker_ctx();
450 
451 	return 0;
452 }
453 
454 static void
455 usage(char *program_name)
456 {
457 	printf("%s options", program_name);
458 #if HAVE_LIBAIO
459 	printf(" [AIO device(s)]...");
460 #endif
461 	printf("\t\n");
462 	printf("\t[-d DPDK huge memory size in MB]\n");
463 	printf("\t[-o io size in bytes]\n");
464 	printf("\t[-t time in seconds]\n");
465 	printf("\t\t(default: 1)]\n");
466 	printf("\t[-H enable histograms]\n");
467 	printf("\t[-g use single file descriptor for DPDK memory segments]\n");
468 	printf("\t[-i shared memory group ID]\n");
469 	printf("\t[-r remote NVMe over Fabrics target address]\n");
470 #ifdef DEBUG
471 	printf("\t[-L enable debug logging]\n");
472 #else
473 	printf("\t[-L enable debug logging (flag disabled, must reconfigure with --enable-debug)]\n");
474 #endif
475 	spdk_log_usage(stdout, "\t\t-L");
476 }
477 
478 static void
479 print_bucket(void *ctx, uint64_t start, uint64_t end, uint64_t count,
480 	     uint64_t total, uint64_t so_far)
481 {
482 	double so_far_pct;
483 
484 	if (count == 0) {
485 		return;
486 	}
487 
488 	so_far_pct = (double)so_far * 100 / total;
489 
490 	printf("%9.3f - %9.3f: %9.4f%%  (%9ju)\n",
491 	       (double)start * 1000 * 1000 / g_tsc_rate,
492 	       (double)end * 1000 * 1000 / g_tsc_rate,
493 	       so_far_pct, count);
494 }
495 
496 static void
497 print_stats(void)
498 {
499 	double divisor = (double)g_tsc_rate / (1000 * 1000 * 1000);
500 
501 	printf("submit (in ns)   avg, min, max = %8.1f, %8.1f, %8.1f\n",
502 	       (double)g_tsc_submit / g_io_completed / divisor,
503 	       (double)g_tsc_submit_min / divisor,
504 	       (double)g_tsc_submit_max / divisor);
505 	printf("complete (in ns) avg, min, max = %8.1f, %8.1f, %8.1f\n",
506 	       (double)g_tsc_complete / g_io_completed / divisor,
507 	       (double)g_tsc_complete_min / divisor,
508 	       (double)g_tsc_complete_max / divisor);
509 
510 	if (!g_enable_histogram) {
511 		return;
512 	}
513 
514 	printf("\n");
515 	printf("Submit histogram\n");
516 	printf("================\n");
517 	printf("       Range in us     Cumulative     Count\n");
518 	spdk_histogram_data_iterate(g_ns->submit_histogram, print_bucket, NULL);
519 	printf("\n");
520 
521 	printf("Complete histogram\n");
522 	printf("==================\n");
523 	printf("       Range in us     Cumulative     Count\n");
524 	spdk_histogram_data_iterate(g_ns->complete_histogram, print_bucket, NULL);
525 	printf("\n");
526 
527 }
528 
529 static int
530 parse_args(int argc, char **argv, struct spdk_env_opts *env_opts)
531 {
532 	int op, rc;
533 	long int val;
534 
535 	/* default value */
536 	g_io_size_bytes = 0;
537 	g_time_in_sec = 0;
538 
539 	spdk_nvme_trid_populate_transport(&g_trid, SPDK_NVME_TRANSPORT_PCIE);
540 	snprintf(g_trid.subnqn, sizeof(g_trid.subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
541 
542 	while ((op = getopt(argc, argv, "d:ghi:o:r:t:HL:")) != -1) {
543 		switch (op) {
544 		case 'h':
545 			usage(argv[0]);
546 			exit(0);
547 			break;
548 		case 'o':
549 			val = spdk_strtol(optarg, 10);
550 			if (val < 0) {
551 				fprintf(stderr, "Invalid io size\n");
552 				return val;
553 			}
554 			g_io_size_bytes = (uint32_t)val;
555 			break;
556 		case 't':
557 			g_time_in_sec = spdk_strtol(optarg, 10);
558 			if (g_time_in_sec < 0) {
559 				fprintf(stderr, "Invalid run time\n");
560 				return g_time_in_sec;
561 			}
562 			break;
563 		case 'H':
564 			g_enable_histogram = true;
565 			break;
566 		case 'i':
567 			env_opts->shm_id = spdk_strtol(optarg, 10);
568 			if (env_opts->shm_id < 0) {
569 				fprintf(stderr, "Invalid shared memory ID\n");
570 				return env_opts->shm_id;
571 			}
572 			break;
573 		case 'g':
574 			env_opts->hugepage_single_segments = true;
575 			break;
576 		case 'r':
577 			if (spdk_nvme_transport_id_parse(&g_trid, optarg) != 0) {
578 				fprintf(stderr, "Error parsing transport address\n");
579 				return 1;
580 			}
581 			break;
582 		case 'd':
583 			env_opts->mem_size = spdk_strtol(optarg, 10);
584 			if (env_opts->mem_size < 0) {
585 				fprintf(stderr, "Invalid DPDK memory size\n");
586 				return env_opts->mem_size;
587 			}
588 			break;
589 		case 'L':
590 			rc = spdk_log_set_flag(optarg);
591 			if (rc < 0) {
592 				fprintf(stderr, "unknown flag\n");
593 				usage(argv[0]);
594 				exit(EXIT_FAILURE);
595 			}
596 #ifdef DEBUG
597 			spdk_log_set_print_level(SPDK_LOG_DEBUG);
598 #endif
599 			break;
600 		default:
601 			usage(argv[0]);
602 			return 1;
603 		}
604 	}
605 
606 	if (!g_io_size_bytes) {
607 		usage(argv[0]);
608 		return 1;
609 	}
610 	if (!g_time_in_sec) {
611 		usage(argv[0]);
612 		return 1;
613 	}
614 
615 	g_aio_optind = optind;
616 
617 	return 0;
618 }
619 
620 static bool
621 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
622 	 struct spdk_nvme_ctrlr_opts *opts)
623 {
624 	static uint32_t ctrlr_found = 0;
625 
626 	if (ctrlr_found == 1) {
627 		fprintf(stderr, "only attaching to one controller, so skipping\n");
628 		fprintf(stderr, " controller at PCI address %s\n",
629 			trid->traddr);
630 		return false;
631 	}
632 	ctrlr_found = 1;
633 
634 	printf("Attaching to %s\n", trid->traddr);
635 
636 	return true;
637 }
638 
639 static void
640 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
641 	  struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
642 {
643 	printf("Attached to %s\n", trid->traddr);
644 
645 	register_ctrlr(ctrlr);
646 }
647 
648 static int
649 register_controllers(void)
650 {
651 	printf("Initializing NVMe Controllers\n");
652 
653 	if (spdk_nvme_probe(&g_trid, NULL, probe_cb, attach_cb, NULL) != 0) {
654 		fprintf(stderr, "spdk_nvme_probe() failed\n");
655 		return 1;
656 	}
657 
658 	if (g_ns == NULL) {
659 		fprintf(stderr, "no NVMe controller found - check that device is bound to uio/vfio\n");
660 		return 1;
661 	}
662 
663 	return 0;
664 }
665 
666 static void
667 cleanup(void)
668 {
669 	struct ns_entry *ns_entry = g_ns;
670 	struct ctrlr_entry *ctrlr_entry, *tmp_ctrlr_entry;
671 	struct spdk_nvme_detach_ctx *detach_ctx = NULL;
672 
673 	while (ns_entry) {
674 		struct ns_entry *next = ns_entry->next;
675 
676 		spdk_histogram_data_free(ns_entry->submit_histogram);
677 		spdk_histogram_data_free(ns_entry->complete_histogram);
678 		free(ns_entry);
679 		ns_entry = next;
680 	}
681 
682 	TAILQ_FOREACH_SAFE(ctrlr_entry, &g_ctrlr, link, tmp_ctrlr_entry) {
683 		TAILQ_REMOVE(&g_ctrlr, ctrlr_entry, link);
684 		spdk_nvme_detach_async(ctrlr_entry->ctrlr, &detach_ctx);
685 		free(ctrlr_entry);
686 	}
687 
688 	if (detach_ctx) {
689 		spdk_nvme_detach_poll(detach_ctx);
690 	}
691 }
692 
693 int
694 main(int argc, char **argv)
695 {
696 	int			rc;
697 	struct spdk_env_opts	opts;
698 
699 	opts.opts_size = sizeof(opts);
700 	spdk_env_opts_init(&opts);
701 	rc = parse_args(argc, argv, &opts);
702 	if (rc != 0) {
703 		return rc;
704 	}
705 
706 	opts.name = "overhead";
707 	opts.core_mask = "0x1";
708 	if (spdk_env_init(&opts) < 0) {
709 		fprintf(stderr, "Unable to initialize SPDK env\n");
710 		return 1;
711 	}
712 
713 	g_task = spdk_zmalloc(sizeof(struct perf_task), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
714 	if (g_task == NULL) {
715 		fprintf(stderr, "g_task alloc failed\n");
716 		exit(1);
717 	}
718 
719 	g_task->buf = spdk_zmalloc(g_io_size_bytes, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
720 	if (g_task->buf == NULL) {
721 		fprintf(stderr, "g_task->buf spdk_zmalloc failed\n");
722 		exit(1);
723 	}
724 
725 	g_tsc_rate = spdk_get_ticks_hz();
726 
727 #if HAVE_LIBAIO
728 	if (g_aio_optind < argc) {
729 		printf("Measuring overhead for AIO device %s.\n", argv[g_aio_optind]);
730 		if (register_aio_file(argv[g_aio_optind]) != 0) {
731 			cleanup();
732 			return -1;
733 		}
734 	} else
735 #endif
736 	{
737 		if (register_controllers() != 0) {
738 			cleanup();
739 			return -1;
740 		}
741 	}
742 
743 	printf("Initialization complete. Launching workers.\n");
744 
745 	rc = work_fn();
746 
747 	print_stats();
748 
749 	cleanup();
750 
751 	if (rc != 0) {
752 		fprintf(stderr, "%s: errors occurred\n", argv[0]);
753 	}
754 
755 	return rc;
756 }
757