xref: /spdk/examples/nvme/abort/abort.c (revision 57fd99b91e71a4baa5543e19ff83958dc99d4dac)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2020 Intel Corporation.
3ca1b318fSShuhei Matsumoto  *   All rights reserved.
4ca1b318fSShuhei Matsumoto  */
5ca1b318fSShuhei Matsumoto 
6ca1b318fSShuhei Matsumoto #include "spdk/stdinc.h"
7ca1b318fSShuhei Matsumoto 
8ca1b318fSShuhei Matsumoto #include "spdk/env.h"
9ca1b318fSShuhei Matsumoto #include "spdk/log.h"
10ca1b318fSShuhei Matsumoto #include "spdk/nvme.h"
11ca1b318fSShuhei Matsumoto #include "spdk/queue.h"
12ca1b318fSShuhei Matsumoto #include "spdk/string.h"
13ca1b318fSShuhei Matsumoto #include "spdk/util.h"
14ca1b318fSShuhei Matsumoto #include "spdk/likely.h"
15ca1b318fSShuhei Matsumoto 
161826245aSSarvesh Lanke #define ABORT_GETOPT_STRING "a:c:i:l:o:q:r:s:t:w:GM:T:"
17ca1b318fSShuhei Matsumoto struct ctrlr_entry {
18ca1b318fSShuhei Matsumoto 	struct spdk_nvme_ctrlr		*ctrlr;
19ca1b318fSShuhei Matsumoto 	enum spdk_nvme_transport_type	trtype;
20ca1b318fSShuhei Matsumoto 
214c3fd228SShuhei Matsumoto 	TAILQ_ENTRY(ctrlr_entry)	link;
22ca1b318fSShuhei Matsumoto 	char				name[1024];
23ca1b318fSShuhei Matsumoto };
24ca1b318fSShuhei Matsumoto 
25ca1b318fSShuhei Matsumoto struct ns_entry {
26ca1b318fSShuhei Matsumoto 	struct spdk_nvme_ctrlr		*ctrlr;
27ca1b318fSShuhei Matsumoto 	struct spdk_nvme_ns		*ns;
28ca1b318fSShuhei Matsumoto 
294c3fd228SShuhei Matsumoto 	TAILQ_ENTRY(ns_entry)		link;
30ca1b318fSShuhei Matsumoto 	uint32_t			io_size_blocks;
31ca1b318fSShuhei Matsumoto 	uint32_t			num_io_requests;
32ca1b318fSShuhei Matsumoto 	uint64_t			size_in_ios;
33ca1b318fSShuhei Matsumoto 	uint32_t			block_size;
34ca1b318fSShuhei Matsumoto 	char				name[1024];
35ca1b318fSShuhei Matsumoto };
36ca1b318fSShuhei Matsumoto 
37ca1b318fSShuhei Matsumoto struct ctrlr_worker_ctx {
38ca1b318fSShuhei Matsumoto 	pthread_mutex_t			mutex;
39ca1b318fSShuhei Matsumoto 	struct ctrlr_entry		*entry;
40ca1b318fSShuhei Matsumoto 	uint64_t			abort_submitted;
41ca1b318fSShuhei Matsumoto 	uint64_t			abort_submit_failed;
42ca1b318fSShuhei Matsumoto 	uint64_t			successful_abort;
43ca1b318fSShuhei Matsumoto 	uint64_t			unsuccessful_abort;
44ca1b318fSShuhei Matsumoto 	uint64_t			abort_failed;
45ca1b318fSShuhei Matsumoto 	uint64_t			current_queue_depth;
46ca1b318fSShuhei Matsumoto 	struct spdk_nvme_ctrlr		*ctrlr;
474c3fd228SShuhei Matsumoto 	TAILQ_ENTRY(ctrlr_worker_ctx)	link;
48ca1b318fSShuhei Matsumoto };
49ca1b318fSShuhei Matsumoto 
50ca1b318fSShuhei Matsumoto struct ns_worker_ctx {
51ca1b318fSShuhei Matsumoto 	struct ns_entry			*entry;
52ca1b318fSShuhei Matsumoto 	uint64_t			io_submitted;
53ca1b318fSShuhei Matsumoto 	uint64_t			io_completed;
54ca1b318fSShuhei Matsumoto 	uint64_t			io_aborted;
55ca1b318fSShuhei Matsumoto 	uint64_t			io_failed;
56ca1b318fSShuhei Matsumoto 	uint64_t			current_queue_depth;
57ca1b318fSShuhei Matsumoto 	uint64_t			offset_in_ios;
58ca1b318fSShuhei Matsumoto 	bool				is_draining;
59ca1b318fSShuhei Matsumoto 	struct spdk_nvme_qpair		*qpair;
60ca1b318fSShuhei Matsumoto 	struct ctrlr_worker_ctx		*ctrlr_ctx;
614c3fd228SShuhei Matsumoto 	TAILQ_ENTRY(ns_worker_ctx)	link;
62ca1b318fSShuhei Matsumoto };
63ca1b318fSShuhei Matsumoto 
64ca1b318fSShuhei Matsumoto struct perf_task {
65ca1b318fSShuhei Matsumoto 	struct ns_worker_ctx		*ns_ctx;
66ca1b318fSShuhei Matsumoto 	void				*buf;
67ca1b318fSShuhei Matsumoto };
68ca1b318fSShuhei Matsumoto 
69ca1b318fSShuhei Matsumoto struct worker_thread {
704c3fd228SShuhei Matsumoto 	TAILQ_HEAD(, ns_worker_ctx)	ns_ctx;
714c3fd228SShuhei Matsumoto 	TAILQ_HEAD(, ctrlr_worker_ctx)	ctrlr_ctx;
724c3fd228SShuhei Matsumoto 	TAILQ_ENTRY(worker_thread)	link;
73ca1b318fSShuhei Matsumoto 	unsigned			lcore;
749c04e6d8SKonrad Sztyber 	int				status;
75ca1b318fSShuhei Matsumoto };
76ca1b318fSShuhei Matsumoto 
77ca1b318fSShuhei Matsumoto static const char *g_workload_type = "read";
784c3fd228SShuhei Matsumoto static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
794c3fd228SShuhei Matsumoto static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
80ca1b318fSShuhei Matsumoto static int g_num_namespaces;
814c3fd228SShuhei Matsumoto static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
824c3fd228SShuhei Matsumoto static int g_num_workers = 0;
8387b21afdSJim Harris static uint32_t g_main_core;
84ca1b318fSShuhei Matsumoto 
85ca1b318fSShuhei Matsumoto static int g_abort_interval = 1;
86ca1b318fSShuhei Matsumoto 
87ca1b318fSShuhei Matsumoto static uint64_t g_tsc_rate;
88ca1b318fSShuhei Matsumoto 
89ca1b318fSShuhei Matsumoto static uint32_t g_io_size_bytes = 131072;
90ca1b318fSShuhei Matsumoto static uint32_t g_max_io_size_blocks;
91ca1b318fSShuhei Matsumoto static int g_rw_percentage = -1;
92ca1b318fSShuhei Matsumoto static int g_is_random;
93ca1b318fSShuhei Matsumoto static int g_queue_depth = 128;
94ca1b318fSShuhei Matsumoto static int g_time_in_sec = 3;
95ca1b318fSShuhei Matsumoto static int g_dpdk_mem;
96ca1b318fSShuhei Matsumoto static int g_shm_id = -1;
97ca1b318fSShuhei Matsumoto static bool g_no_pci;
98ca1b318fSShuhei Matsumoto static bool g_warn;
99ca1b318fSShuhei Matsumoto static bool g_mix_specified;
1001826245aSSarvesh Lanke static bool g_no_hugepages;
101ca1b318fSShuhei Matsumoto 
102ca1b318fSShuhei Matsumoto static const char *g_core_mask;
103ca1b318fSShuhei Matsumoto 
1041826245aSSarvesh Lanke static const struct option g_abort_cmdline_opts[] = {
1051826245aSSarvesh Lanke #define ABORT_NO_HUGE        257
1061826245aSSarvesh Lanke 	{"no-huge",			no_argument,	NULL, ABORT_NO_HUGE},
1071826245aSSarvesh Lanke 	{0, 0, 0, 0}
1081826245aSSarvesh Lanke };
1091826245aSSarvesh Lanke 
110ca1b318fSShuhei Matsumoto struct trid_entry {
111ca1b318fSShuhei Matsumoto 	struct spdk_nvme_transport_id	trid;
112ca1b318fSShuhei Matsumoto 	uint16_t			nsid;
113ca1b318fSShuhei Matsumoto 	TAILQ_ENTRY(trid_entry)		tailq;
114ca1b318fSShuhei Matsumoto };
115ca1b318fSShuhei Matsumoto 
116ca1b318fSShuhei Matsumoto static TAILQ_HEAD(, trid_entry) g_trid_list = TAILQ_HEAD_INITIALIZER(g_trid_list);
117ca1b318fSShuhei Matsumoto 
118ca1b318fSShuhei Matsumoto static void io_complete(void *ctx, const struct spdk_nvme_cpl *cpl);
119ca1b318fSShuhei Matsumoto 
120ca1b318fSShuhei Matsumoto static int
121ca1b318fSShuhei Matsumoto build_nvme_name(char *name, size_t length, struct spdk_nvme_ctrlr *ctrlr)
122ca1b318fSShuhei Matsumoto {
123ca1b318fSShuhei Matsumoto 	const struct spdk_nvme_transport_id *trid;
124ca1b318fSShuhei Matsumoto 	int res = 0;
125ca1b318fSShuhei Matsumoto 
126ca1b318fSShuhei Matsumoto 	trid = spdk_nvme_ctrlr_get_transport_id(ctrlr);
127ca1b318fSShuhei Matsumoto 
128ca1b318fSShuhei Matsumoto 	switch (trid->trtype) {
129ca1b318fSShuhei Matsumoto 	case SPDK_NVME_TRANSPORT_PCIE:
130ca1b318fSShuhei Matsumoto 		res = snprintf(name, length, "PCIE (%s)", trid->traddr);
131ca1b318fSShuhei Matsumoto 		break;
132ca1b318fSShuhei Matsumoto 	case SPDK_NVME_TRANSPORT_RDMA:
133ca1b318fSShuhei Matsumoto 		res = snprintf(name, length, "RDMA (addr:%s subnqn:%s)", trid->traddr, trid->subnqn);
134ca1b318fSShuhei Matsumoto 		break;
135ca1b318fSShuhei Matsumoto 	case SPDK_NVME_TRANSPORT_TCP:
136ca1b318fSShuhei Matsumoto 		res = snprintf(name, length, "TCP (addr:%s subnqn:%s)", trid->traddr, trid->subnqn);
1378bb7511fSChangpeng Liu 		break;
1388bb7511fSChangpeng Liu 	case SPDK_NVME_TRANSPORT_CUSTOM:
1398bb7511fSChangpeng Liu 		res = snprintf(name, length, "CUSTOM (%s)", trid->traddr);
140ca1b318fSShuhei Matsumoto 		break;
141ca1b318fSShuhei Matsumoto 
142ca1b318fSShuhei Matsumoto 	default:
143ca1b318fSShuhei Matsumoto 		fprintf(stderr, "Unknown transport type %d\n", trid->trtype);
144ca1b318fSShuhei Matsumoto 		break;
145ca1b318fSShuhei Matsumoto 	}
146ca1b318fSShuhei Matsumoto 	return res;
147ca1b318fSShuhei Matsumoto }
148ca1b318fSShuhei Matsumoto 
149ca1b318fSShuhei Matsumoto static void
150ca1b318fSShuhei Matsumoto build_nvme_ns_name(char *name, size_t length, struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
151ca1b318fSShuhei Matsumoto {
152ca1b318fSShuhei Matsumoto 	int res = 0;
153ca1b318fSShuhei Matsumoto 
154ca1b318fSShuhei Matsumoto 	res = build_nvme_name(name, length, ctrlr);
155ca1b318fSShuhei Matsumoto 	if (res > 0) {
156ca1b318fSShuhei Matsumoto 		snprintf(name + res, length - res, " NSID %u", nsid);
157ca1b318fSShuhei Matsumoto 	}
158ca1b318fSShuhei Matsumoto 
159ca1b318fSShuhei Matsumoto }
160ca1b318fSShuhei Matsumoto 
161ca1b318fSShuhei Matsumoto static void
162ca1b318fSShuhei Matsumoto register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
163ca1b318fSShuhei Matsumoto {
164ca1b318fSShuhei Matsumoto 	struct ns_entry *entry;
165ca1b318fSShuhei Matsumoto 	const struct spdk_nvme_ctrlr_data *cdata;
166ca1b318fSShuhei Matsumoto 	uint32_t max_xfer_size, entries, sector_size;
167ca1b318fSShuhei Matsumoto 	uint64_t ns_size;
168ca1b318fSShuhei Matsumoto 	struct spdk_nvme_io_qpair_opts opts;
169ca1b318fSShuhei Matsumoto 
170ca1b318fSShuhei Matsumoto 	cdata = spdk_nvme_ctrlr_get_data(ctrlr);
171ca1b318fSShuhei Matsumoto 
172ca1b318fSShuhei Matsumoto 	if (!spdk_nvme_ns_is_active(ns)) {
173ca1b318fSShuhei Matsumoto 		printf("Controller %-20.20s (%-20.20s): Skipping inactive NS %u\n",
174ca1b318fSShuhei Matsumoto 		       cdata->mn, cdata->sn,
175ca1b318fSShuhei Matsumoto 		       spdk_nvme_ns_get_id(ns));
176ca1b318fSShuhei Matsumoto 		g_warn = true;
177ca1b318fSShuhei Matsumoto 		return;
178ca1b318fSShuhei Matsumoto 	}
179ca1b318fSShuhei Matsumoto 
180ca1b318fSShuhei Matsumoto 	ns_size = spdk_nvme_ns_get_size(ns);
181ca1b318fSShuhei Matsumoto 	sector_size = spdk_nvme_ns_get_sector_size(ns);
182ca1b318fSShuhei Matsumoto 
183ca1b318fSShuhei Matsumoto 	if (ns_size < g_io_size_bytes || sector_size > g_io_size_bytes) {
184ca1b318fSShuhei Matsumoto 		printf("WARNING: controller %-20.20s (%-20.20s) ns %u has invalid "
185ca1b318fSShuhei Matsumoto 		       "ns size %" PRIu64 " / block size %u for I/O size %u\n",
186ca1b318fSShuhei Matsumoto 		       cdata->mn, cdata->sn, spdk_nvme_ns_get_id(ns),
187ca1b318fSShuhei Matsumoto 		       ns_size, spdk_nvme_ns_get_sector_size(ns), g_io_size_bytes);
188ca1b318fSShuhei Matsumoto 		g_warn = true;
189ca1b318fSShuhei Matsumoto 		return;
190ca1b318fSShuhei Matsumoto 	}
191ca1b318fSShuhei Matsumoto 
192ca1b318fSShuhei Matsumoto 	max_xfer_size = spdk_nvme_ns_get_max_io_xfer_size(ns);
193ca1b318fSShuhei Matsumoto 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
194ca1b318fSShuhei Matsumoto 	/* NVMe driver may add additional entries based on
195ca1b318fSShuhei Matsumoto 	 * stripe size and maximum transfer size, we assume
196ca1b318fSShuhei Matsumoto 	 * 1 more entry be used for stripe.
197ca1b318fSShuhei Matsumoto 	 */
198ca1b318fSShuhei Matsumoto 	entries = (g_io_size_bytes - 1) / max_xfer_size + 2;
199ca1b318fSShuhei Matsumoto 	if ((g_queue_depth * entries) > opts.io_queue_size) {
200ca1b318fSShuhei Matsumoto 		printf("controller IO queue size %u less than required\n",
201ca1b318fSShuhei Matsumoto 		       opts.io_queue_size);
202ca1b318fSShuhei Matsumoto 		printf("Consider using lower queue depth or small IO size because "
203ca1b318fSShuhei Matsumoto 		       "IO requests may be queued at the NVMe driver.\n");
204ca1b318fSShuhei Matsumoto 	}
205ca1b318fSShuhei Matsumoto 	/* For requests which have children requests, parent request itself
206ca1b318fSShuhei Matsumoto 	 * will also occupy 1 entry.
207ca1b318fSShuhei Matsumoto 	 */
208ca1b318fSShuhei Matsumoto 	entries += 1;
209ca1b318fSShuhei Matsumoto 
210ca1b318fSShuhei Matsumoto 	entry = calloc(1, sizeof(struct ns_entry));
211ca1b318fSShuhei Matsumoto 	if (entry == NULL) {
212ca1b318fSShuhei Matsumoto 		perror("ns_entry malloc");
213ca1b318fSShuhei Matsumoto 		exit(1);
214ca1b318fSShuhei Matsumoto 	}
215ca1b318fSShuhei Matsumoto 
216ca1b318fSShuhei Matsumoto 	entry->ctrlr = ctrlr;
217ca1b318fSShuhei Matsumoto 	entry->ns = ns;
218ca1b318fSShuhei Matsumoto 	entry->num_io_requests = g_queue_depth * entries;
219ca1b318fSShuhei Matsumoto 
220ca1b318fSShuhei Matsumoto 	entry->size_in_ios = ns_size / g_io_size_bytes;
221ca1b318fSShuhei Matsumoto 	entry->io_size_blocks = g_io_size_bytes / sector_size;
222ca1b318fSShuhei Matsumoto 
223ca1b318fSShuhei Matsumoto 	entry->block_size = spdk_nvme_ns_get_sector_size(ns);
224ca1b318fSShuhei Matsumoto 
225ca1b318fSShuhei Matsumoto 	if (g_max_io_size_blocks < entry->io_size_blocks) {
226ca1b318fSShuhei Matsumoto 		g_max_io_size_blocks = entry->io_size_blocks;
227ca1b318fSShuhei Matsumoto 	}
228ca1b318fSShuhei Matsumoto 
229ca1b318fSShuhei Matsumoto 	build_nvme_ns_name(entry->name, sizeof(entry->name), ctrlr, spdk_nvme_ns_get_id(ns));
230ca1b318fSShuhei Matsumoto 
231ca1b318fSShuhei Matsumoto 	g_num_namespaces++;
2324c3fd228SShuhei Matsumoto 	TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
233ca1b318fSShuhei Matsumoto }
234ca1b318fSShuhei Matsumoto 
235ca1b318fSShuhei Matsumoto static void
236ca1b318fSShuhei Matsumoto unregister_namespaces(void)
237ca1b318fSShuhei Matsumoto {
2384c3fd228SShuhei Matsumoto 	struct ns_entry *entry, *tmp;
239ca1b318fSShuhei Matsumoto 
2404c3fd228SShuhei Matsumoto 	TAILQ_FOREACH_SAFE(entry, &g_namespaces, link, tmp) {
2414c3fd228SShuhei Matsumoto 		TAILQ_REMOVE(&g_namespaces, entry, link);
242ca1b318fSShuhei Matsumoto 		free(entry);
243ca1b318fSShuhei Matsumoto 	}
244ca1b318fSShuhei Matsumoto }
245ca1b318fSShuhei Matsumoto 
246ca1b318fSShuhei Matsumoto static void
247ca1b318fSShuhei Matsumoto register_ctrlr(struct spdk_nvme_ctrlr *ctrlr, struct trid_entry *trid_entry)
248ca1b318fSShuhei Matsumoto {
249ca1b318fSShuhei Matsumoto 	struct spdk_nvme_ns *ns;
250ca1b318fSShuhei Matsumoto 	struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
251ca1b318fSShuhei Matsumoto 	uint32_t nsid;
252ca1b318fSShuhei Matsumoto 
253ca1b318fSShuhei Matsumoto 	if (entry == NULL) {
254ca1b318fSShuhei Matsumoto 		perror("ctrlr_entry malloc");
255ca1b318fSShuhei Matsumoto 		exit(1);
256ca1b318fSShuhei Matsumoto 	}
257ca1b318fSShuhei Matsumoto 
258ca1b318fSShuhei Matsumoto 	build_nvme_name(entry->name, sizeof(entry->name), ctrlr);
259ca1b318fSShuhei Matsumoto 
260ca1b318fSShuhei Matsumoto 	entry->ctrlr = ctrlr;
261ca1b318fSShuhei Matsumoto 	entry->trtype = trid_entry->trid.trtype;
2624c3fd228SShuhei Matsumoto 	TAILQ_INSERT_TAIL(&g_controllers, entry, link);
263ca1b318fSShuhei Matsumoto 
264ca1b318fSShuhei Matsumoto 	if (trid_entry->nsid == 0) {
265ca1b318fSShuhei Matsumoto 		for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
266ca1b318fSShuhei Matsumoto 		     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
267ca1b318fSShuhei Matsumoto 			ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
268ca1b318fSShuhei Matsumoto 			if (ns == NULL) {
269ca1b318fSShuhei Matsumoto 				continue;
270ca1b318fSShuhei Matsumoto 			}
271ca1b318fSShuhei Matsumoto 			register_ns(ctrlr, ns);
272ca1b318fSShuhei Matsumoto 		}
273ca1b318fSShuhei Matsumoto 	} else {
274ca1b318fSShuhei Matsumoto 		ns = spdk_nvme_ctrlr_get_ns(ctrlr, trid_entry->nsid);
275ca1b318fSShuhei Matsumoto 		if (!ns) {
276ca1b318fSShuhei Matsumoto 			perror("Namespace does not exist.");
277ca1b318fSShuhei Matsumoto 			exit(1);
278ca1b318fSShuhei Matsumoto 		}
279ca1b318fSShuhei Matsumoto 
280ca1b318fSShuhei Matsumoto 		register_ns(ctrlr, ns);
281ca1b318fSShuhei Matsumoto 	}
282ca1b318fSShuhei Matsumoto }
283ca1b318fSShuhei Matsumoto 
284ca1b318fSShuhei Matsumoto static void
285ca1b318fSShuhei Matsumoto abort_complete(void *ctx, const struct spdk_nvme_cpl *cpl)
286ca1b318fSShuhei Matsumoto {
287ca1b318fSShuhei Matsumoto 	struct ctrlr_worker_ctx	*ctrlr_ctx = ctx;
288ca1b318fSShuhei Matsumoto 
289ca1b318fSShuhei Matsumoto 	ctrlr_ctx->current_queue_depth--;
290ca1b318fSShuhei Matsumoto 	if (spdk_unlikely(spdk_nvme_cpl_is_error(cpl))) {
291ca1b318fSShuhei Matsumoto 		ctrlr_ctx->abort_failed++;
292ca1b318fSShuhei Matsumoto 	} else if ((cpl->cdw0 & 0x1) == 0) {
293ca1b318fSShuhei Matsumoto 		ctrlr_ctx->successful_abort++;
294ca1b318fSShuhei Matsumoto 	} else {
295ca1b318fSShuhei Matsumoto 		ctrlr_ctx->unsuccessful_abort++;
296ca1b318fSShuhei Matsumoto 	}
297ca1b318fSShuhei Matsumoto }
298ca1b318fSShuhei Matsumoto 
299ca1b318fSShuhei Matsumoto static void
300ca1b318fSShuhei Matsumoto abort_task(struct perf_task *task)
301ca1b318fSShuhei Matsumoto {
302ca1b318fSShuhei Matsumoto 	struct ns_worker_ctx	*ns_ctx = task->ns_ctx;
303ca1b318fSShuhei Matsumoto 	struct ctrlr_worker_ctx	*ctrlr_ctx = ns_ctx->ctrlr_ctx;
304ca1b318fSShuhei Matsumoto 	int			rc;
305ca1b318fSShuhei Matsumoto 
306ca1b318fSShuhei Matsumoto 	/* Hold mutex to guard ctrlr_ctx->current_queue_depth. */
307ca1b318fSShuhei Matsumoto 	pthread_mutex_lock(&ctrlr_ctx->mutex);
308ca1b318fSShuhei Matsumoto 
309ca1b318fSShuhei Matsumoto 	rc = spdk_nvme_ctrlr_cmd_abort_ext(ctrlr_ctx->ctrlr, ns_ctx->qpair, task, abort_complete,
310ca1b318fSShuhei Matsumoto 					   ctrlr_ctx);
311ca1b318fSShuhei Matsumoto 
312ca1b318fSShuhei Matsumoto 	if (spdk_unlikely(rc != 0)) {
313ca1b318fSShuhei Matsumoto 		ctrlr_ctx->abort_submit_failed++;
314ca1b318fSShuhei Matsumoto 	} else {
315ca1b318fSShuhei Matsumoto 		ctrlr_ctx->current_queue_depth++;
316ca1b318fSShuhei Matsumoto 		ctrlr_ctx->abort_submitted++;
317ca1b318fSShuhei Matsumoto 	}
318ca1b318fSShuhei Matsumoto 
319ca1b318fSShuhei Matsumoto 	pthread_mutex_unlock(&ctrlr_ctx->mutex);
320ca1b318fSShuhei Matsumoto }
321ca1b318fSShuhei Matsumoto 
322ca1b318fSShuhei Matsumoto static __thread unsigned int seed = 0;
323ca1b318fSShuhei Matsumoto 
324ca1b318fSShuhei Matsumoto static inline void
325ca1b318fSShuhei Matsumoto submit_single_io(struct perf_task *task)
326ca1b318fSShuhei Matsumoto {
327ca1b318fSShuhei Matsumoto 	uint64_t		offset_in_ios, lba;
328ca1b318fSShuhei Matsumoto 	int			rc;
329ca1b318fSShuhei Matsumoto 	struct ns_worker_ctx	*ns_ctx = task->ns_ctx;
330ca1b318fSShuhei Matsumoto 	struct ns_entry		*entry = ns_ctx->entry;
331ca1b318fSShuhei Matsumoto 
332ca1b318fSShuhei Matsumoto 	if (g_is_random) {
333ca1b318fSShuhei Matsumoto 		offset_in_ios = rand_r(&seed) % entry->size_in_ios;
334ca1b318fSShuhei Matsumoto 	} else {
335ca1b318fSShuhei Matsumoto 		offset_in_ios = ns_ctx->offset_in_ios++;
336ca1b318fSShuhei Matsumoto 		if (ns_ctx->offset_in_ios == entry->size_in_ios) {
337ca1b318fSShuhei Matsumoto 			ns_ctx->offset_in_ios = 0;
338ca1b318fSShuhei Matsumoto 		}
339ca1b318fSShuhei Matsumoto 	}
340ca1b318fSShuhei Matsumoto 
341ca1b318fSShuhei Matsumoto 	lba = offset_in_ios * entry->io_size_blocks;
342ca1b318fSShuhei Matsumoto 
343ca1b318fSShuhei Matsumoto 	if ((g_rw_percentage == 100) ||
344ca1b318fSShuhei Matsumoto 	    (g_rw_percentage != 0 && (rand_r(&seed) % 100) < g_rw_percentage)) {
345ca1b318fSShuhei Matsumoto 		rc = spdk_nvme_ns_cmd_read(entry->ns, ns_ctx->qpair, task->buf,
346ca1b318fSShuhei Matsumoto 					   lba, entry->io_size_blocks, io_complete, task, 0);
347ca1b318fSShuhei Matsumoto 	} else {
348ca1b318fSShuhei Matsumoto 		rc = spdk_nvme_ns_cmd_write(entry->ns, ns_ctx->qpair, task->buf,
349ca1b318fSShuhei Matsumoto 					    lba, entry->io_size_blocks, io_complete, task, 0);
350ca1b318fSShuhei Matsumoto 	}
351ca1b318fSShuhei Matsumoto 
352ca1b318fSShuhei Matsumoto 	if (spdk_unlikely(rc != 0)) {
353ca1b318fSShuhei Matsumoto 		fprintf(stderr, "I/O submission failed\n");
354ca1b318fSShuhei Matsumoto 	} else {
355ca1b318fSShuhei Matsumoto 		ns_ctx->current_queue_depth++;
356ca1b318fSShuhei Matsumoto 		ns_ctx->io_submitted++;
357ca1b318fSShuhei Matsumoto 
358ca1b318fSShuhei Matsumoto 		if ((ns_ctx->io_submitted % g_abort_interval) == 0) {
359ca1b318fSShuhei Matsumoto 			abort_task(task);
360ca1b318fSShuhei Matsumoto 		}
361ca1b318fSShuhei Matsumoto 	}
362ca1b318fSShuhei Matsumoto 
363ca1b318fSShuhei Matsumoto }
364ca1b318fSShuhei Matsumoto 
365ca1b318fSShuhei Matsumoto static void
366ca1b318fSShuhei Matsumoto io_complete(void *ctx, const struct spdk_nvme_cpl *cpl)
367ca1b318fSShuhei Matsumoto {
368ca1b318fSShuhei Matsumoto 	struct perf_task	*task = ctx;
369ca1b318fSShuhei Matsumoto 	struct ns_worker_ctx	*ns_ctx = task->ns_ctx;
370ca1b318fSShuhei Matsumoto 
371ca1b318fSShuhei Matsumoto 	ns_ctx->current_queue_depth--;
372ca1b318fSShuhei Matsumoto 	if (spdk_unlikely(spdk_nvme_cpl_is_error(cpl))) {
373ca1b318fSShuhei Matsumoto 		ns_ctx->io_failed++;
374ca1b318fSShuhei Matsumoto 	} else {
375ca1b318fSShuhei Matsumoto 		ns_ctx->io_completed++;
376ca1b318fSShuhei Matsumoto 	}
377ca1b318fSShuhei Matsumoto 
378ca1b318fSShuhei Matsumoto 	/* is_draining indicates when time has expired for the test run and we are
379ca1b318fSShuhei Matsumoto 	 * just waiting for the previously submitted I/O to complete. In this case,
380ca1b318fSShuhei Matsumoto 	 * do not submit a new I/O to replace the one just completed.
381ca1b318fSShuhei Matsumoto 	 */
382ca1b318fSShuhei Matsumoto 	if (spdk_unlikely(ns_ctx->is_draining)) {
383ca1b318fSShuhei Matsumoto 		spdk_dma_free(task->buf);
384ca1b318fSShuhei Matsumoto 		free(task);
385ca1b318fSShuhei Matsumoto 	} else {
386ca1b318fSShuhei Matsumoto 		submit_single_io(task);
387ca1b318fSShuhei Matsumoto 	}
388ca1b318fSShuhei Matsumoto }
389ca1b318fSShuhei Matsumoto 
390ca1b318fSShuhei Matsumoto static struct perf_task *
391ca1b318fSShuhei Matsumoto allocate_task(struct ns_worker_ctx *ns_ctx)
392ca1b318fSShuhei Matsumoto {
393ca1b318fSShuhei Matsumoto 	struct perf_task *task;
394ca1b318fSShuhei Matsumoto 
395ca1b318fSShuhei Matsumoto 	task = calloc(1, sizeof(*task));
396ca1b318fSShuhei Matsumoto 	if (task == NULL) {
397ca1b318fSShuhei Matsumoto 		fprintf(stderr, "Failed to allocate task\n");
398ca1b318fSShuhei Matsumoto 		exit(1);
399ca1b318fSShuhei Matsumoto 	}
400ca1b318fSShuhei Matsumoto 
401ca1b318fSShuhei Matsumoto 	task->buf = spdk_dma_zmalloc(g_io_size_bytes, 0x200, NULL);
402ca1b318fSShuhei Matsumoto 	if (task->buf == NULL) {
403ca1b318fSShuhei Matsumoto 		free(task);
404ca1b318fSShuhei Matsumoto 		fprintf(stderr, "Failed to allocate task->buf\n");
405ca1b318fSShuhei Matsumoto 		exit(1);
406ca1b318fSShuhei Matsumoto 	}
407ca1b318fSShuhei Matsumoto 
408ca1b318fSShuhei Matsumoto 	task->ns_ctx = ns_ctx;
409ca1b318fSShuhei Matsumoto 
410ca1b318fSShuhei Matsumoto 	return task;
411ca1b318fSShuhei Matsumoto }
412ca1b318fSShuhei Matsumoto 
413ca1b318fSShuhei Matsumoto static void
414ca1b318fSShuhei Matsumoto submit_io(struct ns_worker_ctx *ns_ctx, int queue_depth)
415ca1b318fSShuhei Matsumoto {
416ca1b318fSShuhei Matsumoto 	struct perf_task *task;
417ca1b318fSShuhei Matsumoto 
418ca1b318fSShuhei Matsumoto 	while (queue_depth-- > 0) {
419ca1b318fSShuhei Matsumoto 		task = allocate_task(ns_ctx);
420ca1b318fSShuhei Matsumoto 		submit_single_io(task);
421ca1b318fSShuhei Matsumoto 	}
422ca1b318fSShuhei Matsumoto }
423ca1b318fSShuhei Matsumoto 
424ca1b318fSShuhei Matsumoto static int
425ca1b318fSShuhei Matsumoto work_fn(void *arg)
426ca1b318fSShuhei Matsumoto {
427ca1b318fSShuhei Matsumoto 	struct worker_thread *worker = (struct worker_thread *)arg;
428ca1b318fSShuhei Matsumoto 	struct ns_worker_ctx *ns_ctx;
429ca1b318fSShuhei Matsumoto 	struct ctrlr_worker_ctx *ctrlr_ctx;
430ca1b318fSShuhei Matsumoto 	struct ns_entry *ns_entry;
431ca1b318fSShuhei Matsumoto 	struct spdk_nvme_io_qpair_opts opts;
432ca1b318fSShuhei Matsumoto 	uint64_t tsc_end;
433ca1b318fSShuhei Matsumoto 	uint32_t unfinished_ctx;
43413d31231SKonrad Sztyber 	int rc = 0;
435ca1b318fSShuhei Matsumoto 
436ca1b318fSShuhei Matsumoto 	/* Allocate queue pair for each namespace. */
4374c3fd228SShuhei Matsumoto 	TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
438ca1b318fSShuhei Matsumoto 		ns_entry = ns_ctx->entry;
439ca1b318fSShuhei Matsumoto 
440ca1b318fSShuhei Matsumoto 		spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_entry->ctrlr, &opts, sizeof(opts));
441ca1b318fSShuhei Matsumoto 		if (opts.io_queue_requests < ns_entry->num_io_requests) {
442ca1b318fSShuhei Matsumoto 			opts.io_queue_requests = ns_entry->num_io_requests;
443ca1b318fSShuhei Matsumoto 		}
444ca1b318fSShuhei Matsumoto 
445ca1b318fSShuhei Matsumoto 		ns_ctx->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, &opts, sizeof(opts));
446ca1b318fSShuhei Matsumoto 		if (ns_ctx->qpair == NULL) {
447ca1b318fSShuhei Matsumoto 			fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair failed\n");
4489c04e6d8SKonrad Sztyber 			worker->status = -ENOMEM;
449924a61bfSKonrad Sztyber 			goto out;
450ca1b318fSShuhei Matsumoto 		}
451ca1b318fSShuhei Matsumoto 	}
452ca1b318fSShuhei Matsumoto 
453ca1b318fSShuhei Matsumoto 	tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
454ca1b318fSShuhei Matsumoto 
455ca1b318fSShuhei Matsumoto 	/* Submit initial I/O for each namespace. */
4564c3fd228SShuhei Matsumoto 	TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
457ca1b318fSShuhei Matsumoto 		submit_io(ns_ctx, g_queue_depth);
458ca1b318fSShuhei Matsumoto 	}
459ca1b318fSShuhei Matsumoto 
460ca1b318fSShuhei Matsumoto 	while (1) {
4614c3fd228SShuhei Matsumoto 		TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
46213d31231SKonrad Sztyber 			rc = spdk_nvme_qpair_process_completions(ns_ctx->qpair, 0);
46313d31231SKonrad Sztyber 			if (rc < 0) {
46413d31231SKonrad Sztyber 				fprintf(stderr, "spdk_nvme_qpair_process_completions returned "
46513d31231SKonrad Sztyber 					"%d\n", rc);
46613d31231SKonrad Sztyber 				worker->status = rc;
46713d31231SKonrad Sztyber 				goto out;
46813d31231SKonrad Sztyber 			}
469ca1b318fSShuhei Matsumoto 		}
470ca1b318fSShuhei Matsumoto 
47187b21afdSJim Harris 		if (worker->lcore == g_main_core) {
4724c3fd228SShuhei Matsumoto 			TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
473ca1b318fSShuhei Matsumoto 				/* Hold mutex to guard ctrlr_ctx->current_queue_depth. */
474ca1b318fSShuhei Matsumoto 				pthread_mutex_lock(&ctrlr_ctx->mutex);
47513d31231SKonrad Sztyber 				rc = spdk_nvme_ctrlr_process_admin_completions(ctrlr_ctx->ctrlr);
476ca1b318fSShuhei Matsumoto 				pthread_mutex_unlock(&ctrlr_ctx->mutex);
47713d31231SKonrad Sztyber 				if (rc < 0) {
47813d31231SKonrad Sztyber 					fprintf(stderr, "spdk_nvme_ctrlr_process_admin_completions "
47913d31231SKonrad Sztyber 						"returned %d\n", rc);
48013d31231SKonrad Sztyber 					worker->status = rc;
48113d31231SKonrad Sztyber 					goto out;
48213d31231SKonrad Sztyber 				}
483ca1b318fSShuhei Matsumoto 			}
484ca1b318fSShuhei Matsumoto 		}
485ca1b318fSShuhei Matsumoto 
486ca1b318fSShuhei Matsumoto 		if (spdk_get_ticks() > tsc_end) {
487ca1b318fSShuhei Matsumoto 			break;
488ca1b318fSShuhei Matsumoto 		}
489ca1b318fSShuhei Matsumoto 	}
490ca1b318fSShuhei Matsumoto 
491ca1b318fSShuhei Matsumoto 	do {
492ca1b318fSShuhei Matsumoto 		unfinished_ctx = 0;
493ca1b318fSShuhei Matsumoto 
4944c3fd228SShuhei Matsumoto 		TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
495ca1b318fSShuhei Matsumoto 			if (!ns_ctx->is_draining) {
496ca1b318fSShuhei Matsumoto 				ns_ctx->is_draining = true;
497ca1b318fSShuhei Matsumoto 			}
498ca1b318fSShuhei Matsumoto 			if (ns_ctx->current_queue_depth > 0) {
49913d31231SKonrad Sztyber 				rc = spdk_nvme_qpair_process_completions(ns_ctx->qpair, 0);
50013d31231SKonrad Sztyber 				if (rc < 0) {
50113d31231SKonrad Sztyber 					fprintf(stderr, "spdk_nvme_qpair_process_completions "
50213d31231SKonrad Sztyber 						"returned %d\n", rc);
50313d31231SKonrad Sztyber 					worker->status = rc;
50413d31231SKonrad Sztyber 					goto out;
50513d31231SKonrad Sztyber 				}
506ca1b318fSShuhei Matsumoto 				unfinished_ctx++;
507ca1b318fSShuhei Matsumoto 			}
508ca1b318fSShuhei Matsumoto 		}
509ca1b318fSShuhei Matsumoto 	} while (unfinished_ctx > 0);
510ca1b318fSShuhei Matsumoto 
51187b21afdSJim Harris 	if (worker->lcore == g_main_core) {
512ca1b318fSShuhei Matsumoto 		do {
513ca1b318fSShuhei Matsumoto 			unfinished_ctx = 0;
514ca1b318fSShuhei Matsumoto 
5154c3fd228SShuhei Matsumoto 			TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
516ca1b318fSShuhei Matsumoto 				pthread_mutex_lock(&ctrlr_ctx->mutex);
517ca1b318fSShuhei Matsumoto 				if (ctrlr_ctx->current_queue_depth > 0) {
51813d31231SKonrad Sztyber 					rc = spdk_nvme_ctrlr_process_admin_completions(ctrlr_ctx->ctrlr);
519ca1b318fSShuhei Matsumoto 					unfinished_ctx++;
520ca1b318fSShuhei Matsumoto 				}
521ca1b318fSShuhei Matsumoto 				pthread_mutex_unlock(&ctrlr_ctx->mutex);
52213d31231SKonrad Sztyber 				if (rc < 0) {
52313d31231SKonrad Sztyber 					fprintf(stderr, "spdk_nvme_ctrlr_process_admin_completions "
52413d31231SKonrad Sztyber 						"returned %d\n", rc);
52513d31231SKonrad Sztyber 					worker->status = rc;
52613d31231SKonrad Sztyber 					goto out;
52713d31231SKonrad Sztyber 				}
528ca1b318fSShuhei Matsumoto 			}
529ca1b318fSShuhei Matsumoto 		} while (unfinished_ctx > 0);
530ca1b318fSShuhei Matsumoto 	}
531924a61bfSKonrad Sztyber out:
532924a61bfSKonrad Sztyber 	TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
53313d31231SKonrad Sztyber 		/* Make sure we don't submit any IOs at this point */
53413d31231SKonrad Sztyber 		ns_ctx->is_draining = true;
535924a61bfSKonrad Sztyber 		spdk_nvme_ctrlr_free_io_qpair(ns_ctx->qpair);
536924a61bfSKonrad Sztyber 	}
537ca1b318fSShuhei Matsumoto 
538924a61bfSKonrad Sztyber 	return worker->status != 0;
539ca1b318fSShuhei Matsumoto }
540ca1b318fSShuhei Matsumoto 
541ca1b318fSShuhei Matsumoto static void
542ca1b318fSShuhei Matsumoto usage(char *program_name)
543ca1b318fSShuhei Matsumoto {
544ca1b318fSShuhei Matsumoto 	printf("%s options", program_name);
545ca1b318fSShuhei Matsumoto 
546ca1b318fSShuhei Matsumoto 	printf("\n");
547ca1b318fSShuhei Matsumoto 	printf("\t[-q io depth]\n");
548ca1b318fSShuhei Matsumoto 	printf("\t[-o io size in bytes]\n");
549ca1b318fSShuhei Matsumoto 	printf("\t[-w io pattern type, must be one of\n");
550ca1b318fSShuhei Matsumoto 	printf("\t\t(read, write, randread, randwrite, rw, randrw)]\n");
551ca1b318fSShuhei Matsumoto 	printf("\t[-M rwmixread (100 for reads, 0 for writes)]\n");
552ca1b318fSShuhei Matsumoto 	printf("\t[-t time in seconds]\n");
553ca1b318fSShuhei Matsumoto 	printf("\t[-c core mask for I/O submission/completion.]\n");
554ca1b318fSShuhei Matsumoto 	printf("\t\t(default: 1)\n");
555ca1b318fSShuhei Matsumoto 	printf("\t[-r Transport ID for local PCIe NVMe or NVMeoF]\n");
556ca1b318fSShuhei Matsumoto 	printf("\t Format: 'key:value [key:value] ...'\n");
557ca1b318fSShuhei Matsumoto 	printf("\t Keys:\n");
558ca1b318fSShuhei Matsumoto 	printf("\t  trtype      Transport type (e.g. PCIe, RDMA)\n");
559ca1b318fSShuhei Matsumoto 	printf("\t  adrfam      Address family (e.g. IPv4, IPv6)\n");
560ca1b318fSShuhei Matsumoto 	printf("\t  traddr      Transport address (e.g. 0000:04:00.0 for PCIe or 192.168.100.8 for RDMA)\n");
561ca1b318fSShuhei Matsumoto 	printf("\t  trsvcid     Transport service identifier (e.g. 4420)\n");
562ca1b318fSShuhei Matsumoto 	printf("\t  subnqn      Subsystem NQN (default: %s)\n", SPDK_NVMF_DISCOVERY_NQN);
563ca1b318fSShuhei Matsumoto 	printf("\t Example: -r 'trtype:PCIe traddr:0000:04:00.0' for PCIe or\n");
564ca1b318fSShuhei Matsumoto 	printf("\t          -r 'trtype:RDMA adrfam:IPv4 traddr:192.168.100.8 trsvcid:4420' for NVMeoF\n");
565ca1b318fSShuhei Matsumoto 	printf("\t[-s DPDK huge memory size in MB.]\n");
566ca1b318fSShuhei Matsumoto 	printf("\t[-i shared memory group ID]\n");
567ca1b318fSShuhei Matsumoto 	printf("\t[-a abort interval.]\n");
5681826245aSSarvesh Lanke 	printf("\t[--no-huge SPDK is run without hugepages\n");
569ca1b318fSShuhei Matsumoto 	printf("\t");
570ca1b318fSShuhei Matsumoto 	spdk_log_usage(stdout, "-T");
571ca1b318fSShuhei Matsumoto #ifdef DEBUG
572ca1b318fSShuhei Matsumoto 	printf("\t[-G enable debug logging]\n");
573ca1b318fSShuhei Matsumoto #else
574809ae055Swanghailiangx 	printf("\t[-G enable debug logging (flag disabled, must reconfigure with --enable-debug)]\n");
575ca1b318fSShuhei Matsumoto #endif
576bace0549SKrzysztof Karas 	printf("\t[-l log level]\n");
577bace0549SKrzysztof Karas 	printf("\t Available log levels:\n");
578bace0549SKrzysztof Karas 	printf("\t  disabled, error, warning, notice, info, debug\n");
579ca1b318fSShuhei Matsumoto }
580ca1b318fSShuhei Matsumoto 
581ca1b318fSShuhei Matsumoto static void
582ca1b318fSShuhei Matsumoto unregister_trids(void)
583ca1b318fSShuhei Matsumoto {
584ca1b318fSShuhei Matsumoto 	struct trid_entry *trid_entry, *tmp;
585ca1b318fSShuhei Matsumoto 
586ca1b318fSShuhei Matsumoto 	TAILQ_FOREACH_SAFE(trid_entry, &g_trid_list, tailq, tmp) {
587ca1b318fSShuhei Matsumoto 		TAILQ_REMOVE(&g_trid_list, trid_entry, tailq);
588ca1b318fSShuhei Matsumoto 		free(trid_entry);
589ca1b318fSShuhei Matsumoto 	}
590ca1b318fSShuhei Matsumoto }
591ca1b318fSShuhei Matsumoto 
592ca1b318fSShuhei Matsumoto static int
593ca1b318fSShuhei Matsumoto add_trid(const char *trid_str)
594ca1b318fSShuhei Matsumoto {
595ca1b318fSShuhei Matsumoto 	struct trid_entry *trid_entry;
596ca1b318fSShuhei Matsumoto 	struct spdk_nvme_transport_id *trid;
597ca1b318fSShuhei Matsumoto 	char *ns;
598ca1b318fSShuhei Matsumoto 
599ca1b318fSShuhei Matsumoto 	trid_entry = calloc(1, sizeof(*trid_entry));
600ca1b318fSShuhei Matsumoto 	if (trid_entry == NULL) {
601ca1b318fSShuhei Matsumoto 		return -1;
602ca1b318fSShuhei Matsumoto 	}
603ca1b318fSShuhei Matsumoto 
604ca1b318fSShuhei Matsumoto 	trid = &trid_entry->trid;
605ca1b318fSShuhei Matsumoto 	trid->trtype = SPDK_NVME_TRANSPORT_PCIE;
606ca1b318fSShuhei Matsumoto 	snprintf(trid->subnqn, sizeof(trid->subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
607ca1b318fSShuhei Matsumoto 
608ca1b318fSShuhei Matsumoto 	if (spdk_nvme_transport_id_parse(trid, trid_str) != 0) {
609ca1b318fSShuhei Matsumoto 		fprintf(stderr, "Invalid transport ID format '%s'\n", trid_str);
610ca1b318fSShuhei Matsumoto 		free(trid_entry);
611ca1b318fSShuhei Matsumoto 		return 1;
612ca1b318fSShuhei Matsumoto 	}
613ca1b318fSShuhei Matsumoto 
614ca1b318fSShuhei Matsumoto 	spdk_nvme_transport_id_populate_trstring(trid,
615ca1b318fSShuhei Matsumoto 			spdk_nvme_transport_id_trtype_str(trid->trtype));
616ca1b318fSShuhei Matsumoto 
617ca1b318fSShuhei Matsumoto 	ns = strcasestr(trid_str, "ns:");
618ca1b318fSShuhei Matsumoto 	if (ns) {
619ca1b318fSShuhei Matsumoto 		char nsid_str[6]; /* 5 digits maximum in an nsid */
620ca1b318fSShuhei Matsumoto 		int len;
621ca1b318fSShuhei Matsumoto 		int nsid;
622ca1b318fSShuhei Matsumoto 
623ca1b318fSShuhei Matsumoto 		ns += 3;
624ca1b318fSShuhei Matsumoto 
625ca1b318fSShuhei Matsumoto 		len = strcspn(ns, " \t\n");
626ca1b318fSShuhei Matsumoto 		if (len > 5) {
627ca1b318fSShuhei Matsumoto 			fprintf(stderr, "NVMe namespace IDs must be 5 digits or less\n");
628ca1b318fSShuhei Matsumoto 			free(trid_entry);
629ca1b318fSShuhei Matsumoto 			return 1;
630ca1b318fSShuhei Matsumoto 		}
631ca1b318fSShuhei Matsumoto 
632ca1b318fSShuhei Matsumoto 		memcpy(nsid_str, ns, len);
633ca1b318fSShuhei Matsumoto 		nsid_str[len] = '\0';
634ca1b318fSShuhei Matsumoto 
635ca1b318fSShuhei Matsumoto 		nsid = spdk_strtol(nsid_str, 10);
636ca1b318fSShuhei Matsumoto 		if (nsid <= 0 || nsid > 65535) {
637ca1b318fSShuhei Matsumoto 			fprintf(stderr, "NVMe namespace IDs must be less than 65536 and greater than 0\n");
638ca1b318fSShuhei Matsumoto 			free(trid_entry);
639ca1b318fSShuhei Matsumoto 			return 1;
640ca1b318fSShuhei Matsumoto 		}
641ca1b318fSShuhei Matsumoto 
642ca1b318fSShuhei Matsumoto 		trid_entry->nsid = (uint16_t)nsid;
643ca1b318fSShuhei Matsumoto 	}
644ca1b318fSShuhei Matsumoto 
645ca1b318fSShuhei Matsumoto 	TAILQ_INSERT_TAIL(&g_trid_list, trid_entry, tailq);
646ca1b318fSShuhei Matsumoto 	return 0;
647ca1b318fSShuhei Matsumoto }
648ca1b318fSShuhei Matsumoto 
649ca1b318fSShuhei Matsumoto static int
650ca1b318fSShuhei Matsumoto parse_args(int argc, char **argv)
651ca1b318fSShuhei Matsumoto {
6521826245aSSarvesh Lanke 	int op, opt_index;
653ca1b318fSShuhei Matsumoto 	long int val;
654ca1b318fSShuhei Matsumoto 	int rc;
655ca1b318fSShuhei Matsumoto 
6561826245aSSarvesh Lanke 	while ((op = getopt_long(argc, argv, ABORT_GETOPT_STRING, g_abort_cmdline_opts,
6571826245aSSarvesh Lanke 				 &opt_index)) != -1) {
658ca1b318fSShuhei Matsumoto 		switch (op) {
659ca1b318fSShuhei Matsumoto 		case 'a':
660ca1b318fSShuhei Matsumoto 		case 'i':
661ca1b318fSShuhei Matsumoto 		case 'o':
662ca1b318fSShuhei Matsumoto 		case 'q':
663ca1b318fSShuhei Matsumoto 		case 's':
664ca1b318fSShuhei Matsumoto 		case 't':
665ca1b318fSShuhei Matsumoto 		case 'M':
666ca1b318fSShuhei Matsumoto 			val = spdk_strtol(optarg, 10);
667ca1b318fSShuhei Matsumoto 			if (val < 0) {
668ca1b318fSShuhei Matsumoto 				fprintf(stderr, "Converting a string to integer failed\n");
669ca1b318fSShuhei Matsumoto 				return val;
670ca1b318fSShuhei Matsumoto 			}
671ca1b318fSShuhei Matsumoto 			switch (op) {
672ca1b318fSShuhei Matsumoto 			case 'a':
673ca1b318fSShuhei Matsumoto 				g_abort_interval = val;
674ca1b318fSShuhei Matsumoto 				break;
675ca1b318fSShuhei Matsumoto 			case 'i':
676ca1b318fSShuhei Matsumoto 				g_shm_id = val;
677ca1b318fSShuhei Matsumoto 				break;
678ca1b318fSShuhei Matsumoto 			case 'o':
679ca1b318fSShuhei Matsumoto 				g_io_size_bytes = val;
680ca1b318fSShuhei Matsumoto 				break;
681ca1b318fSShuhei Matsumoto 			case 'q':
682ca1b318fSShuhei Matsumoto 				g_queue_depth = val;
683ca1b318fSShuhei Matsumoto 				break;
684ca1b318fSShuhei Matsumoto 			case 's':
685ca1b318fSShuhei Matsumoto 				g_dpdk_mem = val;
686ca1b318fSShuhei Matsumoto 				break;
687ca1b318fSShuhei Matsumoto 			case 't':
688ca1b318fSShuhei Matsumoto 				g_time_in_sec = val;
689ca1b318fSShuhei Matsumoto 				break;
690ca1b318fSShuhei Matsumoto 			case 'M':
691ca1b318fSShuhei Matsumoto 				g_rw_percentage = val;
692ca1b318fSShuhei Matsumoto 				g_mix_specified = true;
693ca1b318fSShuhei Matsumoto 				break;
694ca1b318fSShuhei Matsumoto 			}
695ca1b318fSShuhei Matsumoto 			break;
696ca1b318fSShuhei Matsumoto 		case 'c':
697ca1b318fSShuhei Matsumoto 			g_core_mask = optarg;
698ca1b318fSShuhei Matsumoto 			break;
699ca1b318fSShuhei Matsumoto 		case 'r':
700ca1b318fSShuhei Matsumoto 			if (add_trid(optarg)) {
701ca1b318fSShuhei Matsumoto 				usage(argv[0]);
702ca1b318fSShuhei Matsumoto 				return 1;
703ca1b318fSShuhei Matsumoto 			}
704ca1b318fSShuhei Matsumoto 			break;
705ca1b318fSShuhei Matsumoto 		case 'w':
706ca1b318fSShuhei Matsumoto 			g_workload_type = optarg;
707ca1b318fSShuhei Matsumoto 			break;
708ca1b318fSShuhei Matsumoto 		case 'G':
709ca1b318fSShuhei Matsumoto #ifndef DEBUG
710ca1b318fSShuhei Matsumoto 			fprintf(stderr, "%s must be configured with --enable-debug for -G flag\n",
711ca1b318fSShuhei Matsumoto 				argv[0]);
712ca1b318fSShuhei Matsumoto 			usage(argv[0]);
713ca1b318fSShuhei Matsumoto 			return 1;
714ca1b318fSShuhei Matsumoto #else
715ca1b318fSShuhei Matsumoto 			spdk_log_set_flag("nvme");
716ca1b318fSShuhei Matsumoto 			spdk_log_set_print_level(SPDK_LOG_DEBUG);
717ca1b318fSShuhei Matsumoto 			break;
718ca1b318fSShuhei Matsumoto #endif
719ca1b318fSShuhei Matsumoto 		case 'T':
720ca1b318fSShuhei Matsumoto 			rc = spdk_log_set_flag(optarg);
721ca1b318fSShuhei Matsumoto 			if (rc < 0) {
722ca1b318fSShuhei Matsumoto 				fprintf(stderr, "unknown flag\n");
723ca1b318fSShuhei Matsumoto 				usage(argv[0]);
724ca1b318fSShuhei Matsumoto 				exit(EXIT_FAILURE);
725ca1b318fSShuhei Matsumoto 			}
7268a76c248STomasz Zawadzki #ifdef DEBUG
727ca1b318fSShuhei Matsumoto 			spdk_log_set_print_level(SPDK_LOG_DEBUG);
728ca1b318fSShuhei Matsumoto #endif
729ca1b318fSShuhei Matsumoto 			break;
730bace0549SKrzysztof Karas 		case 'l':
731bace0549SKrzysztof Karas 			if (!strcmp(optarg, "disabled")) {
732bace0549SKrzysztof Karas 				spdk_log_set_print_level(SPDK_LOG_DISABLED);
733bace0549SKrzysztof Karas 			} else if (!strcmp(optarg, "error")) {
734bace0549SKrzysztof Karas 				spdk_log_set_print_level(SPDK_LOG_ERROR);
735bace0549SKrzysztof Karas 			} else if (!strcmp(optarg, "warning")) {
736bace0549SKrzysztof Karas 				spdk_log_set_print_level(SPDK_LOG_WARN);
737bace0549SKrzysztof Karas 			} else if (!strcmp(optarg, "notice")) {
738bace0549SKrzysztof Karas 				spdk_log_set_print_level(SPDK_LOG_NOTICE);
739bace0549SKrzysztof Karas 			} else if (!strcmp(optarg, "info")) {
740bace0549SKrzysztof Karas 				spdk_log_set_print_level(SPDK_LOG_INFO);
741bace0549SKrzysztof Karas 			} else if (!strcmp(optarg, "debug")) {
742bace0549SKrzysztof Karas 				spdk_log_set_print_level(SPDK_LOG_DEBUG);
743bace0549SKrzysztof Karas 			} else {
744bace0549SKrzysztof Karas 				fprintf(stderr, "Unrecognized log level: %s\n", optarg);
745bace0549SKrzysztof Karas 				return 1;
746bace0549SKrzysztof Karas 			}
747bace0549SKrzysztof Karas 			break;
7481826245aSSarvesh Lanke 		case ABORT_NO_HUGE:
7491826245aSSarvesh Lanke 			g_no_hugepages = true;
7501826245aSSarvesh Lanke 			break;
751ca1b318fSShuhei Matsumoto 		default:
752ca1b318fSShuhei Matsumoto 			usage(argv[0]);
753ca1b318fSShuhei Matsumoto 			return 1;
754ca1b318fSShuhei Matsumoto 		}
755ca1b318fSShuhei Matsumoto 	}
756ca1b318fSShuhei Matsumoto 
757ca1b318fSShuhei Matsumoto 	if (!g_queue_depth) {
758ca1b318fSShuhei Matsumoto 		fprintf(stderr, "missing -q (queue size) operand\n");
759ca1b318fSShuhei Matsumoto 		usage(argv[0]);
760ca1b318fSShuhei Matsumoto 		return 1;
761ca1b318fSShuhei Matsumoto 	}
762ca1b318fSShuhei Matsumoto 	if (!g_io_size_bytes) {
763ca1b318fSShuhei Matsumoto 		fprintf(stderr, "missing -o (block size) operand\n");
764ca1b318fSShuhei Matsumoto 		usage(argv[0]);
765ca1b318fSShuhei Matsumoto 		return 1;
766ca1b318fSShuhei Matsumoto 	}
767ca1b318fSShuhei Matsumoto 	if (!g_workload_type) {
768ca1b318fSShuhei Matsumoto 		fprintf(stderr, "missing -t (test time in seconds) operand\n");
769ca1b318fSShuhei Matsumoto 		usage(argv[0]);
770ca1b318fSShuhei Matsumoto 		return 1;
771ca1b318fSShuhei Matsumoto 	}
772ca1b318fSShuhei Matsumoto 
773ca1b318fSShuhei Matsumoto 	if (!g_time_in_sec) {
774ca1b318fSShuhei Matsumoto 		usage(argv[0]);
775ca1b318fSShuhei Matsumoto 		return 1;
776ca1b318fSShuhei Matsumoto 	}
777ca1b318fSShuhei Matsumoto 
778ca1b318fSShuhei Matsumoto 	if (strncmp(g_workload_type, "rand", 4) == 0) {
779ca1b318fSShuhei Matsumoto 		g_is_random = 1;
780ca1b318fSShuhei Matsumoto 		g_workload_type = &g_workload_type[4];
781ca1b318fSShuhei Matsumoto 	}
782ca1b318fSShuhei Matsumoto 
783ca1b318fSShuhei Matsumoto 	if (strcmp(g_workload_type, "read") == 0 || strcmp(g_workload_type, "write") == 0) {
784ca1b318fSShuhei Matsumoto 		g_rw_percentage = strcmp(g_workload_type, "read") == 0 ? 100 : 0;
785ca1b318fSShuhei Matsumoto 		if (g_mix_specified) {
786ca1b318fSShuhei Matsumoto 			fprintf(stderr, "Ignoring -M option... Please use -M option"
787ca1b318fSShuhei Matsumoto 				" only when using rw or randrw.\n");
788ca1b318fSShuhei Matsumoto 		}
789ca1b318fSShuhei Matsumoto 	} else if (strcmp(g_workload_type, "rw") == 0) {
790ca1b318fSShuhei Matsumoto 		if (g_rw_percentage < 0 || g_rw_percentage > 100) {
791ca1b318fSShuhei Matsumoto 			fprintf(stderr,
792ca1b318fSShuhei Matsumoto 				"-M must be specified to value from 0 to 100 "
793ca1b318fSShuhei Matsumoto 				"for rw or randrw.\n");
794ca1b318fSShuhei Matsumoto 			return 1;
795ca1b318fSShuhei Matsumoto 		}
796ca1b318fSShuhei Matsumoto 	} else {
797ca1b318fSShuhei Matsumoto 		fprintf(stderr,
798ca1b318fSShuhei Matsumoto 			"io pattern type must be one of\n"
799ca1b318fSShuhei Matsumoto 			"(read, write, randread, randwrite, rw, randrw)\n");
800ca1b318fSShuhei Matsumoto 		return 1;
801ca1b318fSShuhei Matsumoto 	}
802ca1b318fSShuhei Matsumoto 
803ca1b318fSShuhei Matsumoto 	if (TAILQ_EMPTY(&g_trid_list)) {
804ca1b318fSShuhei Matsumoto 		/* If no transport IDs specified, default to enumerating all local PCIe devices */
805ca1b318fSShuhei Matsumoto 		add_trid("trtype:PCIe");
806ca1b318fSShuhei Matsumoto 	} else {
807ca1b318fSShuhei Matsumoto 		struct trid_entry *trid_entry, *trid_entry_tmp;
808ca1b318fSShuhei Matsumoto 
809ca1b318fSShuhei Matsumoto 		g_no_pci = true;
810ca1b318fSShuhei Matsumoto 		/* check whether there is local PCIe type */
811ca1b318fSShuhei Matsumoto 		TAILQ_FOREACH_SAFE(trid_entry, &g_trid_list, tailq, trid_entry_tmp) {
812ca1b318fSShuhei Matsumoto 			if (trid_entry->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
813ca1b318fSShuhei Matsumoto 				g_no_pci = false;
814ca1b318fSShuhei Matsumoto 				break;
815ca1b318fSShuhei Matsumoto 			}
816ca1b318fSShuhei Matsumoto 		}
817ca1b318fSShuhei Matsumoto 	}
818ca1b318fSShuhei Matsumoto 
819ca1b318fSShuhei Matsumoto 	return 0;
820ca1b318fSShuhei Matsumoto }
821ca1b318fSShuhei Matsumoto 
822ca1b318fSShuhei Matsumoto static int
823ca1b318fSShuhei Matsumoto register_workers(void)
824ca1b318fSShuhei Matsumoto {
825ca1b318fSShuhei Matsumoto 	uint32_t i;
826ca1b318fSShuhei Matsumoto 	struct worker_thread *worker;
827ca1b318fSShuhei Matsumoto 
828ca1b318fSShuhei Matsumoto 	SPDK_ENV_FOREACH_CORE(i) {
829ca1b318fSShuhei Matsumoto 		worker = calloc(1, sizeof(*worker));
830ca1b318fSShuhei Matsumoto 		if (worker == NULL) {
831ca1b318fSShuhei Matsumoto 			fprintf(stderr, "Unable to allocate worker\n");
832ca1b318fSShuhei Matsumoto 			return -1;
833ca1b318fSShuhei Matsumoto 		}
834ca1b318fSShuhei Matsumoto 
8354c3fd228SShuhei Matsumoto 		TAILQ_INIT(&worker->ns_ctx);
8364c3fd228SShuhei Matsumoto 		TAILQ_INIT(&worker->ctrlr_ctx);
837ca1b318fSShuhei Matsumoto 		worker->lcore = i;
8384c3fd228SShuhei Matsumoto 		TAILQ_INSERT_TAIL(&g_workers, worker, link);
839ca1b318fSShuhei Matsumoto 		g_num_workers++;
840ca1b318fSShuhei Matsumoto 	}
841ca1b318fSShuhei Matsumoto 
842ca1b318fSShuhei Matsumoto 	return 0;
843ca1b318fSShuhei Matsumoto }
844ca1b318fSShuhei Matsumoto 
845ca1b318fSShuhei Matsumoto static void
846ca1b318fSShuhei Matsumoto unregister_workers(void)
847ca1b318fSShuhei Matsumoto {
8484c3fd228SShuhei Matsumoto 	struct worker_thread *worker, *tmp_worker;
8494c3fd228SShuhei Matsumoto 	struct ns_worker_ctx *ns_ctx, *tmp_ns_ctx;
8504c3fd228SShuhei Matsumoto 	struct ctrlr_worker_ctx *ctrlr_ctx, *tmp_ctrlr_ctx;
851ca1b318fSShuhei Matsumoto 
852ca1b318fSShuhei Matsumoto 	/* Free namespace context and worker thread */
8534c3fd228SShuhei Matsumoto 	TAILQ_FOREACH_SAFE(worker, &g_workers, link, tmp_worker) {
8544c3fd228SShuhei Matsumoto 		TAILQ_REMOVE(&g_workers, worker, link);
855ca1b318fSShuhei Matsumoto 
8564c3fd228SShuhei Matsumoto 		TAILQ_FOREACH_SAFE(ns_ctx, &worker->ns_ctx, link, tmp_ns_ctx) {
8574c3fd228SShuhei Matsumoto 			TAILQ_REMOVE(&worker->ns_ctx, ns_ctx, link);
858bb19c18fSNick Connolly 			printf("NS: %s I/O completed: %" PRIu64 ", failed: %" PRIu64 "\n",
859ca1b318fSShuhei Matsumoto 			       ns_ctx->entry->name, ns_ctx->io_completed, ns_ctx->io_failed);
860ca1b318fSShuhei Matsumoto 			free(ns_ctx);
861ca1b318fSShuhei Matsumoto 		}
862ca1b318fSShuhei Matsumoto 
8634c3fd228SShuhei Matsumoto 		TAILQ_FOREACH_SAFE(ctrlr_ctx, &worker->ctrlr_ctx, link, tmp_ctrlr_ctx) {
8644c3fd228SShuhei Matsumoto 			TAILQ_REMOVE(&worker->ctrlr_ctx, ctrlr_ctx, link);
865bb19c18fSNick Connolly 			printf("CTRLR: %s abort submitted %" PRIu64 ", failed to submit %" PRIu64 "\n",
866ca1b318fSShuhei Matsumoto 			       ctrlr_ctx->entry->name, ctrlr_ctx->abort_submitted,
867ca1b318fSShuhei Matsumoto 			       ctrlr_ctx->abort_submit_failed);
86834edd9f1SKamil Godzwon 			printf("\t success %" PRIu64 ", unsuccessful %" PRIu64 ", failed %" PRIu64 "\n",
869ca1b318fSShuhei Matsumoto 			       ctrlr_ctx->successful_abort, ctrlr_ctx->unsuccessful_abort,
870ca1b318fSShuhei Matsumoto 			       ctrlr_ctx->abort_failed);
871ca1b318fSShuhei Matsumoto 			free(ctrlr_ctx);
872ca1b318fSShuhei Matsumoto 		}
873ca1b318fSShuhei Matsumoto 
874ca1b318fSShuhei Matsumoto 		free(worker);
875ca1b318fSShuhei Matsumoto 	}
876ca1b318fSShuhei Matsumoto }
877ca1b318fSShuhei Matsumoto 
878ca1b318fSShuhei Matsumoto static bool
879ca1b318fSShuhei Matsumoto probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
880ca1b318fSShuhei Matsumoto 	 struct spdk_nvme_ctrlr_opts *opts)
881ca1b318fSShuhei Matsumoto {
882a595959dSJim Harris 	uint16_t min_aq_size;
883a595959dSJim Harris 
884a595959dSJim Harris 	/* We need to make sure the admin queue is big enough to handle all of the aborts that
885a595959dSJim Harris 	 * will be sent by this test app.  We add a few extra entries to account for any admin
886a595959dSJim Harris 	 * commands other than the aborts. */
887a595959dSJim Harris 	min_aq_size = spdk_divide_round_up(g_queue_depth, g_abort_interval) + 8;
888a595959dSJim Harris 	opts->admin_queue_size = spdk_max(opts->admin_queue_size, min_aq_size);
889a595959dSJim Harris 
8900c5d34adSwanghailiangx 	/* Avoid possible nvme_qpair_abort_queued_reqs_with_cbarg ERROR when IO queue size is 128. */
8910c5d34adSwanghailiangx 	opts->disable_error_logging = true;
8920c5d34adSwanghailiangx 
893ca1b318fSShuhei Matsumoto 	return true;
894ca1b318fSShuhei Matsumoto }
895ca1b318fSShuhei Matsumoto 
896ca1b318fSShuhei Matsumoto static void
897ca1b318fSShuhei Matsumoto attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
898ca1b318fSShuhei Matsumoto 	  struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
899ca1b318fSShuhei Matsumoto {
900ca1b318fSShuhei Matsumoto 	struct trid_entry       *trid_entry = cb_ctx;
901ca1b318fSShuhei Matsumoto 	struct spdk_pci_addr    pci_addr;
902ca1b318fSShuhei Matsumoto 	struct spdk_pci_device  *pci_dev;
903ca1b318fSShuhei Matsumoto 	struct spdk_pci_id      pci_id;
904ca1b318fSShuhei Matsumoto 
905ca1b318fSShuhei Matsumoto 	if (trid->trtype != SPDK_NVME_TRANSPORT_PCIE) {
906ca1b318fSShuhei Matsumoto 		printf("Attached to NVMe over Fabrics controller at %s:%s: %s\n",
907ca1b318fSShuhei Matsumoto 		       trid->traddr, trid->trsvcid,
908ca1b318fSShuhei Matsumoto 		       trid->subnqn);
909ca1b318fSShuhei Matsumoto 	} else {
910ca1b318fSShuhei Matsumoto 		if (spdk_pci_addr_parse(&pci_addr, trid->traddr)) {
911ca1b318fSShuhei Matsumoto 			return;
912ca1b318fSShuhei Matsumoto 		}
913ca1b318fSShuhei Matsumoto 
914ca1b318fSShuhei Matsumoto 		pci_dev = spdk_nvme_ctrlr_get_pci_device(ctrlr);
915ca1b318fSShuhei Matsumoto 		if (!pci_dev) {
916ca1b318fSShuhei Matsumoto 			return;
917ca1b318fSShuhei Matsumoto 		}
918ca1b318fSShuhei Matsumoto 
919ca1b318fSShuhei Matsumoto 		pci_id = spdk_pci_device_get_id(pci_dev);
920ca1b318fSShuhei Matsumoto 
921ca1b318fSShuhei Matsumoto 		printf("Attached to NVMe Controller at %s [%04x:%04x]\n",
922ca1b318fSShuhei Matsumoto 		       trid->traddr,
923ca1b318fSShuhei Matsumoto 		       pci_id.vendor_id, pci_id.device_id);
924ca1b318fSShuhei Matsumoto 	}
925ca1b318fSShuhei Matsumoto 
926ca1b318fSShuhei Matsumoto 	register_ctrlr(ctrlr, trid_entry);
927ca1b318fSShuhei Matsumoto }
928ca1b318fSShuhei Matsumoto 
929ca1b318fSShuhei Matsumoto static int
930ca1b318fSShuhei Matsumoto register_controllers(void)
931ca1b318fSShuhei Matsumoto {
932ca1b318fSShuhei Matsumoto 	struct trid_entry *trid_entry;
933ca1b318fSShuhei Matsumoto 
934ca1b318fSShuhei Matsumoto 	printf("Initializing NVMe Controllers\n");
935ca1b318fSShuhei Matsumoto 
936ca1b318fSShuhei Matsumoto 	TAILQ_FOREACH(trid_entry, &g_trid_list, tailq) {
937ca1b318fSShuhei Matsumoto 		if (spdk_nvme_probe(&trid_entry->trid, trid_entry, probe_cb, attach_cb, NULL) != 0) {
938ca1b318fSShuhei Matsumoto 			fprintf(stderr, "spdk_nvme_probe() failed for transport address '%s'\n",
939ca1b318fSShuhei Matsumoto 				trid_entry->trid.traddr);
940ca1b318fSShuhei Matsumoto 			return -1;
941ca1b318fSShuhei Matsumoto 		}
942ca1b318fSShuhei Matsumoto 	}
943ca1b318fSShuhei Matsumoto 
944ca1b318fSShuhei Matsumoto 	return 0;
945ca1b318fSShuhei Matsumoto }
946ca1b318fSShuhei Matsumoto 
947ca1b318fSShuhei Matsumoto static void
948ca1b318fSShuhei Matsumoto unregister_controllers(void)
949ca1b318fSShuhei Matsumoto {
9504c3fd228SShuhei Matsumoto 	struct ctrlr_entry *entry, *tmp;
9510a903c91SShuhei Matsumoto 	struct spdk_nvme_detach_ctx *detach_ctx = NULL;
952ca1b318fSShuhei Matsumoto 
9534c3fd228SShuhei Matsumoto 	TAILQ_FOREACH_SAFE(entry, &g_controllers, link, tmp) {
9544c3fd228SShuhei Matsumoto 		TAILQ_REMOVE(&g_controllers, entry, link);
9550a903c91SShuhei Matsumoto 		spdk_nvme_detach_async(entry->ctrlr, &detach_ctx);
956ca1b318fSShuhei Matsumoto 		free(entry);
957ca1b318fSShuhei Matsumoto 	}
9580a903c91SShuhei Matsumoto 
9594fe4040aSShuhei Matsumoto 	if (detach_ctx) {
9604fe4040aSShuhei Matsumoto 		spdk_nvme_detach_poll(detach_ctx);
9610a903c91SShuhei Matsumoto 	}
962ca1b318fSShuhei Matsumoto }
963ca1b318fSShuhei Matsumoto 
964ca1b318fSShuhei Matsumoto static int
96587b21afdSJim Harris associate_main_worker_with_ctrlr(void)
966ca1b318fSShuhei Matsumoto {
9674c3fd228SShuhei Matsumoto 	struct ctrlr_entry	*entry;
9684c3fd228SShuhei Matsumoto 	struct worker_thread	*worker;
969ca1b318fSShuhei Matsumoto 	struct ctrlr_worker_ctx	*ctrlr_ctx;
970ca1b318fSShuhei Matsumoto 
9714c3fd228SShuhei Matsumoto 	TAILQ_FOREACH(worker, &g_workers, link) {
97287b21afdSJim Harris 		if (worker->lcore == g_main_core) {
973ca1b318fSShuhei Matsumoto 			break;
974ca1b318fSShuhei Matsumoto 		}
975ca1b318fSShuhei Matsumoto 	}
976ca1b318fSShuhei Matsumoto 
977ca1b318fSShuhei Matsumoto 	if (!worker) {
978ca1b318fSShuhei Matsumoto 		return -1;
979ca1b318fSShuhei Matsumoto 	}
980ca1b318fSShuhei Matsumoto 
9814c3fd228SShuhei Matsumoto 	TAILQ_FOREACH(entry, &g_controllers, link) {
982ca1b318fSShuhei Matsumoto 		ctrlr_ctx = calloc(1, sizeof(struct ctrlr_worker_ctx));
983ca1b318fSShuhei Matsumoto 		if (!ctrlr_ctx) {
984ca1b318fSShuhei Matsumoto 			return -1;
985ca1b318fSShuhei Matsumoto 		}
986ca1b318fSShuhei Matsumoto 
987ca1b318fSShuhei Matsumoto 		pthread_mutex_init(&ctrlr_ctx->mutex, NULL);
988ca1b318fSShuhei Matsumoto 		ctrlr_ctx->entry = entry;
989ca1b318fSShuhei Matsumoto 		ctrlr_ctx->ctrlr = entry->ctrlr;
990ca1b318fSShuhei Matsumoto 
9914c3fd228SShuhei Matsumoto 		TAILQ_INSERT_TAIL(&worker->ctrlr_ctx, ctrlr_ctx, link);
992ca1b318fSShuhei Matsumoto 	}
993ca1b318fSShuhei Matsumoto 
994ca1b318fSShuhei Matsumoto 	return 0;
995ca1b318fSShuhei Matsumoto }
996ca1b318fSShuhei Matsumoto 
997ca1b318fSShuhei Matsumoto static struct ctrlr_worker_ctx *
998ca1b318fSShuhei Matsumoto get_ctrlr_worker_ctx(struct spdk_nvme_ctrlr *ctrlr)
999ca1b318fSShuhei Matsumoto {
10004c3fd228SShuhei Matsumoto 	struct worker_thread	*worker;
1001ca1b318fSShuhei Matsumoto 	struct ctrlr_worker_ctx *ctrlr_ctx;
1002ca1b318fSShuhei Matsumoto 
10034c3fd228SShuhei Matsumoto 	TAILQ_FOREACH(worker, &g_workers, link) {
100487b21afdSJim Harris 		if (worker->lcore == g_main_core) {
1005ca1b318fSShuhei Matsumoto 			break;
1006ca1b318fSShuhei Matsumoto 		}
1007ca1b318fSShuhei Matsumoto 	}
1008ca1b318fSShuhei Matsumoto 
1009ca1b318fSShuhei Matsumoto 	if (!worker) {
1010ca1b318fSShuhei Matsumoto 		return NULL;
1011ca1b318fSShuhei Matsumoto 	}
1012ca1b318fSShuhei Matsumoto 
10134c3fd228SShuhei Matsumoto 	TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
1014ca1b318fSShuhei Matsumoto 		if (ctrlr_ctx->ctrlr == ctrlr) {
1015ca1b318fSShuhei Matsumoto 			return ctrlr_ctx;
1016ca1b318fSShuhei Matsumoto 		}
1017ca1b318fSShuhei Matsumoto 	}
1018ca1b318fSShuhei Matsumoto 
1019ca1b318fSShuhei Matsumoto 	return NULL;
1020ca1b318fSShuhei Matsumoto }
1021ca1b318fSShuhei Matsumoto 
1022ca1b318fSShuhei Matsumoto static int
1023ca1b318fSShuhei Matsumoto associate_workers_with_ns(void)
1024ca1b318fSShuhei Matsumoto {
10254c3fd228SShuhei Matsumoto 	struct ns_entry		*entry = TAILQ_FIRST(&g_namespaces);
10264c3fd228SShuhei Matsumoto 	struct worker_thread	*worker = TAILQ_FIRST(&g_workers);
1027ca1b318fSShuhei Matsumoto 	struct ns_worker_ctx	*ns_ctx;
1028ca1b318fSShuhei Matsumoto 	int			i, count;
1029ca1b318fSShuhei Matsumoto 
1030ca1b318fSShuhei Matsumoto 	count = g_num_namespaces > g_num_workers ? g_num_namespaces : g_num_workers;
1031ca1b318fSShuhei Matsumoto 
1032ca1b318fSShuhei Matsumoto 	for (i = 0; i < count; i++) {
1033ca1b318fSShuhei Matsumoto 		if (entry == NULL) {
1034ca1b318fSShuhei Matsumoto 			break;
1035ca1b318fSShuhei Matsumoto 		}
1036ca1b318fSShuhei Matsumoto 
1037ca1b318fSShuhei Matsumoto 		ns_ctx = calloc(1, sizeof(struct ns_worker_ctx));
1038ca1b318fSShuhei Matsumoto 		if (!ns_ctx) {
1039ca1b318fSShuhei Matsumoto 			return -1;
1040ca1b318fSShuhei Matsumoto 		}
1041ca1b318fSShuhei Matsumoto 
1042ca1b318fSShuhei Matsumoto 		printf("Associating %s with lcore %d\n", entry->name, worker->lcore);
1043ca1b318fSShuhei Matsumoto 		ns_ctx->entry = entry;
1044ca1b318fSShuhei Matsumoto 		ns_ctx->ctrlr_ctx = get_ctrlr_worker_ctx(entry->ctrlr);
1045ca1b318fSShuhei Matsumoto 		if (!ns_ctx->ctrlr_ctx) {
1046ca1b318fSShuhei Matsumoto 			free(ns_ctx);
1047ca1b318fSShuhei Matsumoto 			return -1;
1048ca1b318fSShuhei Matsumoto 		}
1049ca1b318fSShuhei Matsumoto 
10504c3fd228SShuhei Matsumoto 		TAILQ_INSERT_TAIL(&worker->ns_ctx, ns_ctx, link);
1051ca1b318fSShuhei Matsumoto 
10524c3fd228SShuhei Matsumoto 		worker = TAILQ_NEXT(worker, link);
1053ca1b318fSShuhei Matsumoto 		if (worker == NULL) {
10544c3fd228SShuhei Matsumoto 			worker = TAILQ_FIRST(&g_workers);
1055ca1b318fSShuhei Matsumoto 		}
1056ca1b318fSShuhei Matsumoto 
10574c3fd228SShuhei Matsumoto 		entry = TAILQ_NEXT(entry, link);
1058ca1b318fSShuhei Matsumoto 		if (entry == NULL) {
10594c3fd228SShuhei Matsumoto 			entry = TAILQ_FIRST(&g_namespaces);
1060ca1b318fSShuhei Matsumoto 		}
1061ca1b318fSShuhei Matsumoto 	}
1062ca1b318fSShuhei Matsumoto 
1063ca1b318fSShuhei Matsumoto 	return 0;
1064ca1b318fSShuhei Matsumoto }
1065ca1b318fSShuhei Matsumoto 
10668dd1cd21SBen Walker int
10678dd1cd21SBen Walker main(int argc, char **argv)
1068ca1b318fSShuhei Matsumoto {
1069ca1b318fSShuhei Matsumoto 	int rc;
107087b21afdSJim Harris 	struct worker_thread *worker, *main_worker;
1071ca1b318fSShuhei Matsumoto 	struct spdk_env_opts opts;
1072ca1b318fSShuhei Matsumoto 
1073ca1b318fSShuhei Matsumoto 	rc = parse_args(argc, argv);
1074ca1b318fSShuhei Matsumoto 	if (rc != 0) {
1075ca1b318fSShuhei Matsumoto 		return rc;
1076ca1b318fSShuhei Matsumoto 	}
1077ca1b318fSShuhei Matsumoto 
1078*57fd99b9SJim Harris 	opts.opts_size = sizeof(opts);
1079ca1b318fSShuhei Matsumoto 	spdk_env_opts_init(&opts);
1080ca1b318fSShuhei Matsumoto 	opts.name = "abort";
1081ca1b318fSShuhei Matsumoto 	opts.shm_id = g_shm_id;
1082ca1b318fSShuhei Matsumoto 	if (g_core_mask) {
1083ca1b318fSShuhei Matsumoto 		opts.core_mask = g_core_mask;
1084ca1b318fSShuhei Matsumoto 	}
1085ca1b318fSShuhei Matsumoto 
1086ca1b318fSShuhei Matsumoto 	if (g_dpdk_mem) {
1087ca1b318fSShuhei Matsumoto 		opts.mem_size = g_dpdk_mem;
1088ca1b318fSShuhei Matsumoto 	}
1089ca1b318fSShuhei Matsumoto 	if (g_no_pci) {
1090ca1b318fSShuhei Matsumoto 		opts.no_pci = g_no_pci;
1091ca1b318fSShuhei Matsumoto 	}
10921826245aSSarvesh Lanke 	if (g_no_hugepages) {
10931826245aSSarvesh Lanke 		opts.no_huge = true;
10941826245aSSarvesh Lanke 	}
1095ca1b318fSShuhei Matsumoto 	if (spdk_env_init(&opts) < 0) {
1096ca1b318fSShuhei Matsumoto 		fprintf(stderr, "Unable to initialize SPDK env\n");
10979ec9c8b3SChangpeng Liu 		unregister_trids();
10989ec9c8b3SChangpeng Liu 		return -1;
1099ca1b318fSShuhei Matsumoto 	}
1100ca1b318fSShuhei Matsumoto 
1101ca1b318fSShuhei Matsumoto 	g_tsc_rate = spdk_get_ticks_hz();
1102ca1b318fSShuhei Matsumoto 
1103ca1b318fSShuhei Matsumoto 	if (register_workers() != 0) {
1104ca1b318fSShuhei Matsumoto 		rc = -1;
1105ca1b318fSShuhei Matsumoto 		goto cleanup;
1106ca1b318fSShuhei Matsumoto 	}
1107ca1b318fSShuhei Matsumoto 
1108ca1b318fSShuhei Matsumoto 	if (register_controllers() != 0) {
1109ca1b318fSShuhei Matsumoto 		rc = -1;
1110ca1b318fSShuhei Matsumoto 		goto cleanup;
1111ca1b318fSShuhei Matsumoto 	}
1112ca1b318fSShuhei Matsumoto 
1113ca1b318fSShuhei Matsumoto 	if (g_warn) {
1114ca1b318fSShuhei Matsumoto 		printf("WARNING: Some requested NVMe devices were skipped\n");
1115ca1b318fSShuhei Matsumoto 	}
1116ca1b318fSShuhei Matsumoto 
1117ca1b318fSShuhei Matsumoto 	if (g_num_namespaces == 0) {
1118ca1b318fSShuhei Matsumoto 		fprintf(stderr, "No valid NVMe controllers found\n");
11192e4466d0SKonrad Sztyber 		rc = -1;
1120ca1b318fSShuhei Matsumoto 		goto cleanup;
1121ca1b318fSShuhei Matsumoto 	}
1122ca1b318fSShuhei Matsumoto 
112387b21afdSJim Harris 	if (associate_main_worker_with_ctrlr() != 0) {
1124ca1b318fSShuhei Matsumoto 		rc = -1;
1125ca1b318fSShuhei Matsumoto 		goto cleanup;
1126ca1b318fSShuhei Matsumoto 	}
1127ca1b318fSShuhei Matsumoto 
1128ca1b318fSShuhei Matsumoto 	if (associate_workers_with_ns() != 0) {
1129ca1b318fSShuhei Matsumoto 		rc = -1;
1130ca1b318fSShuhei Matsumoto 		goto cleanup;
1131ca1b318fSShuhei Matsumoto 	}
1132ca1b318fSShuhei Matsumoto 
1133ca1b318fSShuhei Matsumoto 	printf("Initialization complete. Launching workers.\n");
1134ca1b318fSShuhei Matsumoto 
113587b21afdSJim Harris 	/* Launch all of the secondary workers */
113687b21afdSJim Harris 	g_main_core = spdk_env_get_current_core();
113787b21afdSJim Harris 	main_worker = NULL;
11384c3fd228SShuhei Matsumoto 	TAILQ_FOREACH(worker, &g_workers, link) {
113987b21afdSJim Harris 		if (worker->lcore != g_main_core) {
1140ca1b318fSShuhei Matsumoto 			spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker);
1141ca1b318fSShuhei Matsumoto 		} else {
114287b21afdSJim Harris 			assert(main_worker == NULL);
114387b21afdSJim Harris 			main_worker = worker;
1144ca1b318fSShuhei Matsumoto 		}
1145ca1b318fSShuhei Matsumoto 	}
1146ca1b318fSShuhei Matsumoto 
114787b21afdSJim Harris 	assert(main_worker != NULL);
114887b21afdSJim Harris 	rc = work_fn(main_worker);
1149ca1b318fSShuhei Matsumoto 
1150ca1b318fSShuhei Matsumoto 	spdk_env_thread_wait_all();
1151ca1b318fSShuhei Matsumoto 
11529c04e6d8SKonrad Sztyber 	TAILQ_FOREACH(worker, &g_workers, link) {
11539c04e6d8SKonrad Sztyber 		if (worker->status != 0) {
11549c04e6d8SKonrad Sztyber 			rc = 1;
11559c04e6d8SKonrad Sztyber 			break;
11569c04e6d8SKonrad Sztyber 		}
11579c04e6d8SKonrad Sztyber 	}
11589c04e6d8SKonrad Sztyber 
1159ca1b318fSShuhei Matsumoto cleanup:
1160ca1b318fSShuhei Matsumoto 	unregister_trids();
1161ca1b318fSShuhei Matsumoto 	unregister_workers();
1162ca1b318fSShuhei Matsumoto 	unregister_namespaces();
1163ca1b318fSShuhei Matsumoto 	unregister_controllers();
1164ca1b318fSShuhei Matsumoto 
11659ec9c8b3SChangpeng Liu 	spdk_env_fini();
11669ec9c8b3SChangpeng Liu 
1167ca1b318fSShuhei Matsumoto 	if (rc != 0) {
1168b7876f9aSJosh Soref 		fprintf(stderr, "%s: errors occurred\n", argv[0]);
1169ca1b318fSShuhei Matsumoto 	}
1170ca1b318fSShuhei Matsumoto 
1171ca1b318fSShuhei Matsumoto 	return rc;
1172ca1b318fSShuhei Matsumoto }
1173