xref: /spdk/lib/nvme/nvme_ctrlr.c (revision 5977aad8f7486552c94c5cc93ea9bb110e1cb5d0)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvme_internal.h"
37 
38 #include "spdk/env.h"
39 #include "spdk/string.h"
40 
41 static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
42 		struct nvme_async_event_request *aer);
43 static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
44 static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
45 
46 static int
47 nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
48 {
49 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
50 					      &cc->raw);
51 }
52 
53 static int
54 nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
55 {
56 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
57 					      &csts->raw);
58 }
59 
60 int
61 nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
62 {
63 	return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
64 					      &cap->raw);
65 }
66 
67 int
68 nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
69 {
70 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
71 					      &vs->raw);
72 }
73 
74 static int
75 nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
76 {
77 	return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
78 					      cc->raw);
79 }
80 
81 void
82 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
83 {
84 	char host_id_str[SPDK_UUID_STRING_LEN];
85 
86 	assert(opts);
87 
88 	memset(opts, 0, opts_size);
89 
90 #define FIELD_OK(field) \
91 	offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
92 
93 	if (FIELD_OK(num_io_queues)) {
94 		opts->num_io_queues = DEFAULT_MAX_IO_QUEUES;
95 	}
96 
97 	if (FIELD_OK(use_cmb_sqs)) {
98 		opts->use_cmb_sqs = true;
99 	}
100 
101 	if (FIELD_OK(arb_mechanism)) {
102 		opts->arb_mechanism = SPDK_NVME_CC_AMS_RR;
103 	}
104 
105 	if (FIELD_OK(keep_alive_timeout_ms)) {
106 		opts->keep_alive_timeout_ms = 10 * 1000;
107 	}
108 
109 	if (FIELD_OK(io_queue_size)) {
110 		opts->io_queue_size = DEFAULT_IO_QUEUE_SIZE;
111 	}
112 
113 	if (FIELD_OK(io_queue_requests)) {
114 		opts->io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
115 	}
116 
117 	if (FIELD_OK(host_id)) {
118 		memset(opts->host_id, 0, sizeof(opts->host_id));
119 	}
120 
121 	if (nvme_driver_init() == 0) {
122 		if (FIELD_OK(extended_host_id)) {
123 			memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
124 			       sizeof(opts->extended_host_id));
125 		}
126 
127 		if (FIELD_OK(hostnqn)) {
128 			spdk_uuid_fmt_lower(host_id_str, sizeof(host_id_str),
129 					    &g_spdk_nvme_driver->default_extended_host_id);
130 			snprintf(opts->hostnqn, sizeof(opts->hostnqn), "2014-08.org.nvmexpress:uuid:%s", host_id_str);
131 		}
132 	}
133 
134 	if (FIELD_OK(src_addr)) {
135 		memset(opts->src_addr, 0, sizeof(opts->src_addr));
136 	}
137 
138 	if (FIELD_OK(src_svcid)) {
139 		memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
140 	}
141 
142 	if (FIELD_OK(command_set)) {
143 		opts->command_set = SPDK_NVME_CC_CSS_NVM;
144 	}
145 
146 	if (FIELD_OK(admin_timeout_ms)) {
147 		opts->admin_timeout_ms = NVME_MAX_TIMEOUT_PERIOD * 1000;
148 	}
149 
150 	if (FIELD_OK(header_digest)) {
151 		opts->header_digest = false;
152 	}
153 
154 	if (FIELD_OK(data_digest)) {
155 		opts->data_digest = false;
156 	}
157 #undef FIELD_OK
158 }
159 
160 /**
161  * This function will be called when the process allocates the IO qpair.
162  * Note: the ctrlr_lock must be held when calling this function.
163  */
164 static void
165 nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
166 {
167 	struct spdk_nvme_ctrlr_process	*active_proc;
168 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
169 
170 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
171 	if (active_proc) {
172 		TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
173 		qpair->active_proc = active_proc;
174 	}
175 }
176 
177 /**
178  * This function will be called when the process frees the IO qpair.
179  * Note: the ctrlr_lock must be held when calling this function.
180  */
181 static void
182 nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
183 {
184 	struct spdk_nvme_ctrlr_process	*active_proc;
185 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
186 	struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
187 
188 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
189 	if (!active_proc) {
190 		return;
191 	}
192 
193 	TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
194 			   per_process_tailq, tmp_qpair) {
195 		if (active_qpair == qpair) {
196 			TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
197 				     active_qpair, per_process_tailq);
198 
199 			break;
200 		}
201 	}
202 }
203 
204 void
205 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
206 		struct spdk_nvme_io_qpair_opts *opts,
207 		size_t opts_size)
208 {
209 	assert(ctrlr);
210 
211 	assert(opts);
212 
213 	memset(opts, 0, opts_size);
214 
215 #define FIELD_OK(field) \
216 	offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
217 
218 	if (FIELD_OK(qprio)) {
219 		opts->qprio = SPDK_NVME_QPRIO_URGENT;
220 	}
221 
222 	if (FIELD_OK(io_queue_size)) {
223 		opts->io_queue_size = ctrlr->opts.io_queue_size;
224 	}
225 
226 	if (FIELD_OK(io_queue_requests)) {
227 		opts->io_queue_requests = ctrlr->opts.io_queue_requests;
228 	}
229 
230 #undef FIELD_OK
231 }
232 
233 struct spdk_nvme_qpair *
234 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
235 			       const struct spdk_nvme_io_qpair_opts *user_opts,
236 			       size_t opts_size)
237 {
238 	uint32_t				qid;
239 	struct spdk_nvme_qpair			*qpair;
240 	union spdk_nvme_cc_register		cc;
241 	struct spdk_nvme_io_qpair_opts		opts;
242 
243 	if (!ctrlr) {
244 		return NULL;
245 	}
246 
247 	/*
248 	 * Get the default options, then overwrite them with the user-provided options
249 	 * up to opts_size.
250 	 *
251 	 * This allows for extensions of the opts structure without breaking
252 	 * ABI compatibility.
253 	 */
254 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
255 	if (user_opts) {
256 		memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
257 	}
258 
259 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
260 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
261 		SPDK_ERRLOG("get_cc failed\n");
262 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
263 		return NULL;
264 	}
265 
266 	/* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */
267 	if ((opts.qprio & 3) != opts.qprio) {
268 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
269 		return NULL;
270 	}
271 
272 	/*
273 	 * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
274 	 * default round robin arbitration method.
275 	 */
276 	if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts.qprio != SPDK_NVME_QPRIO_URGENT)) {
277 		SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
278 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
279 		return NULL;
280 	}
281 
282 	/*
283 	 * Get the first available I/O queue ID.
284 	 */
285 	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
286 	if (qid > ctrlr->opts.num_io_queues) {
287 		SPDK_ERRLOG("No free I/O queue IDs\n");
288 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
289 		return NULL;
290 	}
291 
292 	qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, &opts);
293 	if (qpair == NULL) {
294 		SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
295 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
296 		return NULL;
297 	}
298 	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
299 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
300 
301 	nvme_ctrlr_proc_add_io_qpair(qpair);
302 
303 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
304 
305 	if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
306 		spdk_delay_us(100);
307 	}
308 
309 	return qpair;
310 }
311 
312 int
313 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
314 {
315 	struct spdk_nvme_ctrlr *ctrlr;
316 
317 	if (qpair == NULL) {
318 		return 0;
319 	}
320 
321 	ctrlr = qpair->ctrlr;
322 
323 	if (qpair->in_completion_context) {
324 		/*
325 		 * There are many cases where it is convenient to delete an io qpair in the context
326 		 *  of that qpair's completion routine.  To handle this properly, set a flag here
327 		 *  so that the completion routine will perform an actual delete after the context
328 		 *  unwinds.
329 		 */
330 		qpair->delete_after_completion_context = 1;
331 		return 0;
332 	}
333 
334 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
335 
336 	nvme_ctrlr_proc_remove_io_qpair(qpair);
337 
338 	TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
339 	spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
340 
341 	if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
342 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
343 		return -1;
344 	}
345 
346 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
347 	return 0;
348 }
349 
350 static void
351 nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
352 		struct spdk_nvme_intel_log_page_directory *log_page_directory)
353 {
354 	if (log_page_directory == NULL) {
355 		return;
356 	}
357 
358 	if (ctrlr->cdata.vid != SPDK_PCI_VID_INTEL) {
359 		return;
360 	}
361 
362 	ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
363 
364 	if (log_page_directory->read_latency_log_len ||
365 	    (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
366 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
367 	}
368 	if (log_page_directory->write_latency_log_len ||
369 	    (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
370 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
371 	}
372 	if (log_page_directory->temperature_statistics_log_len) {
373 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
374 	}
375 	if (log_page_directory->smart_log_len) {
376 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
377 	}
378 	if (log_page_directory->marketing_description_log_len) {
379 		ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
380 	}
381 }
382 
383 static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
384 {
385 	int rc = 0;
386 	uint64_t phys_addr = 0;
387 	struct nvme_completion_poll_status	status;
388 	struct spdk_nvme_intel_log_page_directory *log_page_directory;
389 
390 	log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
391 					  64, &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
392 	if (log_page_directory == NULL) {
393 		SPDK_ERRLOG("could not allocate log_page_directory\n");
394 		return -ENXIO;
395 	}
396 
397 	rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
398 					      SPDK_NVME_GLOBAL_NS_TAG, log_page_directory,
399 					      sizeof(struct spdk_nvme_intel_log_page_directory),
400 					      0, nvme_completion_poll_cb, &status);
401 	if (rc != 0) {
402 		spdk_free(log_page_directory);
403 		return rc;
404 	}
405 
406 	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
407 		spdk_free(log_page_directory);
408 		SPDK_WARNLOG("Intel log pages not supported on Intel drive!\n");
409 		return 0;
410 	}
411 
412 	nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
413 	spdk_free(log_page_directory);
414 	return 0;
415 }
416 
417 static int
418 nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
419 {
420 	int	rc = 0;
421 
422 	memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
423 	/* Mandatory pages */
424 	ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
425 	ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
426 	ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
427 	if (ctrlr->cdata.lpa.celp) {
428 		ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
429 	}
430 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL && !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
431 		rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
432 	}
433 
434 	return rc;
435 }
436 
437 static void
438 nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
439 {
440 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
441 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
442 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
443 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
444 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
445 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
446 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
447 }
448 
449 static void
450 nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
451 {
452 	memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
453 	/* Mandatory features */
454 	ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
455 	ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
456 	ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
457 	ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
458 	ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
459 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
460 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
461 	ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
462 	ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
463 	/* Optional features */
464 	if (ctrlr->cdata.vwc.present) {
465 		ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
466 	}
467 	if (ctrlr->cdata.apsta.supported) {
468 		ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
469 	}
470 	if (ctrlr->cdata.hmpre) {
471 		ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
472 	}
473 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
474 		nvme_ctrlr_set_intel_supported_features(ctrlr);
475 	}
476 }
477 
478 void
479 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
480 {
481 	/*
482 	 * Set the flag here and leave the work failure of qpairs to
483 	 * spdk_nvme_qpair_process_completions().
484 	 */
485 	if (hot_remove) {
486 		ctrlr->is_removed = true;
487 	}
488 	ctrlr->is_failed = true;
489 	SPDK_ERRLOG("ctrlr %s in failed state.\n", ctrlr->trid.traddr);
490 }
491 
492 static void
493 nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr)
494 {
495 	union spdk_nvme_cc_register	cc;
496 	union spdk_nvme_csts_register	csts;
497 	uint32_t			ms_waited = 0;
498 	uint32_t			shutdown_timeout_ms;
499 
500 	if (ctrlr->is_removed) {
501 		return;
502 	}
503 
504 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
505 		SPDK_ERRLOG("get_cc() failed\n");
506 		return;
507 	}
508 
509 	cc.bits.shn = SPDK_NVME_SHN_NORMAL;
510 
511 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
512 		SPDK_ERRLOG("set_cc() failed\n");
513 		return;
514 	}
515 
516 	/*
517 	 * The NVMe specification defines RTD3E to be the time between
518 	 *  setting SHN = 1 until the controller will set SHST = 10b.
519 	 * If the device doesn't report RTD3 entry latency, or if it
520 	 *  reports RTD3 entry latency less than 10 seconds, pick
521 	 *  10 seconds as a reasonable amount of time to
522 	 *  wait before proceeding.
523 	 */
524 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
525 	shutdown_timeout_ms = (ctrlr->cdata.rtd3e + 999) / 1000;
526 	shutdown_timeout_ms = spdk_max(shutdown_timeout_ms, 10000);
527 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown timeout = %" PRIu32 " ms\n", shutdown_timeout_ms);
528 
529 	do {
530 		if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
531 			SPDK_ERRLOG("get_csts() failed\n");
532 			return;
533 		}
534 
535 		if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
536 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown complete in %u milliseconds\n",
537 				      ms_waited);
538 			return;
539 		}
540 
541 		nvme_delay(1000);
542 		ms_waited++;
543 	} while (ms_waited < shutdown_timeout_ms);
544 
545 	SPDK_ERRLOG("did not shutdown within %u milliseconds\n", shutdown_timeout_ms);
546 }
547 
548 static int
549 nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
550 {
551 	union spdk_nvme_cc_register	cc;
552 	int				rc;
553 
554 	rc = nvme_transport_ctrlr_enable(ctrlr);
555 	if (rc != 0) {
556 		SPDK_ERRLOG("transport ctrlr_enable failed\n");
557 		return rc;
558 	}
559 
560 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
561 		SPDK_ERRLOG("get_cc() failed\n");
562 		return -EIO;
563 	}
564 
565 	if (cc.bits.en != 0) {
566 		SPDK_ERRLOG("%s called with CC.EN = 1\n", __func__);
567 		return -EINVAL;
568 	}
569 
570 	cc.bits.en = 1;
571 	cc.bits.css = 0;
572 	cc.bits.shn = 0;
573 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
574 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
575 
576 	/* Page size is 2 ^ (12 + mps). */
577 	cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
578 
579 	if (ctrlr->cap.bits.css == 0) {
580 		SPDK_INFOLOG(SPDK_LOG_NVME,
581 			     "Drive reports no command sets supported. Assuming NVM is supported.\n");
582 		ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
583 	}
584 
585 	if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
586 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Requested I/O command set %u but supported mask is 0x%x\n",
587 			      ctrlr->opts.command_set, ctrlr->cap.bits.css);
588 		return -EINVAL;
589 	}
590 
591 	cc.bits.css = ctrlr->opts.command_set;
592 
593 	switch (ctrlr->opts.arb_mechanism) {
594 	case SPDK_NVME_CC_AMS_RR:
595 		break;
596 	case SPDK_NVME_CC_AMS_WRR:
597 		if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
598 			break;
599 		}
600 		return -EINVAL;
601 	case SPDK_NVME_CC_AMS_VS:
602 		if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
603 			break;
604 		}
605 		return -EINVAL;
606 	default:
607 		return -EINVAL;
608 	}
609 
610 	cc.bits.ams = ctrlr->opts.arb_mechanism;
611 
612 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
613 		SPDK_ERRLOG("set_cc() failed\n");
614 		return -EIO;
615 	}
616 
617 	return 0;
618 }
619 
620 #ifdef DEBUG
621 static const char *
622 nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
623 {
624 	switch (state) {
625 	case NVME_CTRLR_STATE_INIT_DELAY:
626 		return "delay init";
627 	case NVME_CTRLR_STATE_INIT:
628 		return "init";
629 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
630 		return "disable and wait for CSTS.RDY = 1";
631 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
632 		return "disable and wait for CSTS.RDY = 0";
633 	case NVME_CTRLR_STATE_ENABLE:
634 		return "enable controller by writing CC.EN = 1";
635 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
636 		return "wait for CSTS.RDY = 1";
637 	case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE:
638 		return "enable admin queue";
639 	case NVME_CTRLR_STATE_IDENTIFY:
640 		return "identify controller";
641 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
642 		return "wait for identify controller";
643 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
644 		return "set number of queues";
645 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
646 		return "wait for set number of queues";
647 	case NVME_CTRLR_STATE_GET_NUM_QUEUES:
648 		return "get number of queues";
649 	case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES:
650 		return "wait for get number of queues";
651 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
652 		return "construct namespaces";
653 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
654 		return "identify active ns";
655 	case NVME_CTRLR_STATE_IDENTIFY_NS:
656 		return "identify ns";
657 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
658 		return "wait for identify ns";
659 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
660 		return "identify namespace id descriptors";
661 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
662 		return "wait for identify namespace id descriptors";
663 	case NVME_CTRLR_STATE_CONFIGURE_AER:
664 		return "configure AER";
665 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
666 		return "wait for configure aer";
667 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
668 		return "set supported log pages";
669 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
670 		return "set supported features";
671 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
672 		return "set doorbell buffer config";
673 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
674 		return "wait for doorbell buffer config";
675 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
676 		return "set keep alive timeout";
677 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
678 		return "wait for set keep alive timeout";
679 	case NVME_CTRLR_STATE_SET_HOST_ID:
680 		return "set host ID";
681 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
682 		return "wait for set host ID";
683 	case NVME_CTRLR_STATE_READY:
684 		return "ready";
685 	case NVME_CTRLR_STATE_ERROR:
686 		return "error";
687 	}
688 	return "unknown";
689 };
690 #endif /* DEBUG */
691 
692 static void
693 nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
694 		     uint64_t timeout_in_ms)
695 {
696 	ctrlr->state = state;
697 	if (timeout_in_ms == 0) {
698 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (no timeout)\n",
699 			      nvme_ctrlr_state_string(ctrlr->state));
700 		ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
701 	} else {
702 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (timeout %" PRIu64 " ms)\n",
703 			      nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms);
704 		ctrlr->state_timeout_tsc = spdk_get_ticks() + (timeout_in_ms * spdk_get_ticks_hz()) / 1000;
705 	}
706 }
707 
708 static void
709 nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
710 {
711 	if (ctrlr->shadow_doorbell) {
712 		spdk_dma_free(ctrlr->shadow_doorbell);
713 		ctrlr->shadow_doorbell = NULL;
714 	}
715 
716 	if (ctrlr->eventidx) {
717 		spdk_dma_free(ctrlr->eventidx);
718 		ctrlr->eventidx = NULL;
719 	}
720 }
721 
722 static void
723 nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
724 {
725 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
726 
727 	if (spdk_nvme_cpl_is_error(cpl)) {
728 		SPDK_WARNLOG("Doorbell buffer config failed\n");
729 	} else {
730 		SPDK_INFOLOG(SPDK_LOG_NVME, "NVMe controller: %s doorbell buffer config enabled\n",
731 			     ctrlr->trid.traddr);
732 	}
733 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
734 			     ctrlr->opts.admin_timeout_ms);
735 }
736 
737 static int
738 nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
739 {
740 	int rc = 0;
741 	uint64_t prp1, prp2;
742 
743 	if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
744 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
745 				     ctrlr->opts.admin_timeout_ms);
746 		return 0;
747 	}
748 
749 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
750 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
751 				     ctrlr->opts.admin_timeout_ms);
752 		return 0;
753 	}
754 
755 	/* only 1 page size for doorbell buffer */
756 	ctrlr->shadow_doorbell = spdk_dma_zmalloc(ctrlr->page_size, ctrlr->page_size,
757 				 &prp1);
758 	if (ctrlr->shadow_doorbell == NULL) {
759 		rc = -ENOMEM;
760 		goto error;
761 	}
762 
763 	ctrlr->eventidx = spdk_dma_zmalloc(ctrlr->page_size, ctrlr->page_size, &prp2);
764 	if (ctrlr->eventidx == NULL) {
765 		rc = -ENOMEM;
766 		goto error;
767 	}
768 
769 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
770 			     ctrlr->opts.admin_timeout_ms);
771 
772 	rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
773 			nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
774 	if (rc != 0) {
775 		goto error;
776 	}
777 
778 	return 0;
779 
780 error:
781 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
782 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
783 	return rc;
784 }
785 
786 int
787 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
788 {
789 	int rc = 0;
790 	struct spdk_nvme_qpair	*qpair;
791 	struct nvme_request	*req, *tmp;
792 
793 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
794 
795 	if (ctrlr->is_resetting || ctrlr->is_failed) {
796 		/*
797 		 * Controller is already resetting or has failed.  Return
798 		 *  immediately since there is no need to kick off another
799 		 *  reset in these cases.
800 		 */
801 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
802 		return 0;
803 	}
804 
805 	ctrlr->is_resetting = true;
806 
807 	SPDK_NOTICELOG("resetting controller\n");
808 
809 	/* Free all of the queued abort requests */
810 	STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
811 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
812 		nvme_free_request(req);
813 		ctrlr->outstanding_aborts--;
814 	}
815 
816 	/* Disable all queues before disabling the controller hardware. */
817 	nvme_qpair_disable(ctrlr->adminq);
818 	TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
819 		nvme_qpair_disable(qpair);
820 	}
821 
822 	/* Doorbell buffer config is invalid during reset */
823 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
824 
825 	/* Set the state back to INIT to cause a full hardware reset. */
826 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
827 
828 	while (ctrlr->state != NVME_CTRLR_STATE_READY) {
829 		if (nvme_ctrlr_process_init(ctrlr) != 0) {
830 			SPDK_ERRLOG("%s: controller reinitialization failed\n", __func__);
831 			nvme_ctrlr_fail(ctrlr, false);
832 			rc = -1;
833 			break;
834 		}
835 	}
836 
837 	if (!ctrlr->is_failed) {
838 		/* Reinitialize qpairs */
839 		TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
840 			if (nvme_transport_ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) {
841 				nvme_ctrlr_fail(ctrlr, false);
842 				rc = -1;
843 			}
844 		}
845 	}
846 
847 	ctrlr->is_resetting = false;
848 
849 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
850 
851 	return rc;
852 }
853 
854 static void
855 nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
856 {
857 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
858 
859 	if (spdk_nvme_cpl_is_error(cpl)) {
860 		SPDK_ERRLOG("nvme_identify_controller failed!\n");
861 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
862 		return;
863 	}
864 
865 	/*
866 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
867 	 *  controller supports.
868 	 */
869 	ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
870 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
871 	if (ctrlr->cdata.mdts > 0) {
872 		ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
873 						ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
874 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
875 	}
876 
877 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
878 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
879 		ctrlr->cntlid = ctrlr->cdata.cntlid;
880 	} else {
881 		/*
882 		 * Fabrics controllers should already have CNTLID from the Connect command.
883 		 *
884 		 * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
885 		 * trust the one from Connect.
886 		 */
887 		if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
888 			SPDK_DEBUGLOG(SPDK_LOG_NVME,
889 				      "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
890 				      ctrlr->cdata.cntlid, ctrlr->cntlid);
891 		}
892 	}
893 
894 	if (ctrlr->cdata.sgls.supported) {
895 		ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
896 		ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
897 	}
898 
899 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
900 			     ctrlr->opts.admin_timeout_ms);
901 }
902 
903 static int
904 nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
905 {
906 	int	rc;
907 
908 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
909 			     ctrlr->opts.admin_timeout_ms);
910 
911 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
912 				     &ctrlr->cdata, sizeof(ctrlr->cdata),
913 				     nvme_ctrlr_identify_done, ctrlr);
914 	if (rc != 0) {
915 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
916 		return rc;
917 	}
918 
919 	return 0;
920 }
921 
922 int
923 nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
924 {
925 	struct nvme_completion_poll_status	status;
926 	int					rc;
927 	uint32_t				i;
928 	uint32_t				num_pages;
929 	uint32_t				next_nsid = 0;
930 	uint32_t				*new_ns_list = NULL;
931 
932 	if (ctrlr->num_ns == 0) {
933 		spdk_dma_free(ctrlr->active_ns_list);
934 		ctrlr->active_ns_list = NULL;
935 
936 		return 0;
937 	}
938 
939 	/*
940 	 * The allocated size must be a multiple of sizeof(struct spdk_nvme_ns_list)
941 	 */
942 	num_pages = (ctrlr->num_ns * sizeof(new_ns_list[0]) - 1) / sizeof(struct spdk_nvme_ns_list) + 1;
943 	new_ns_list = spdk_dma_zmalloc(num_pages * sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
944 				       NULL);
945 	if (!new_ns_list) {
946 		SPDK_ERRLOG("Failed to allocate active_ns_list!\n");
947 		return -ENOMEM;
948 	}
949 
950 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 1, 0) && !(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
951 		/*
952 		 * Iterate through the pages and fetch each chunk of 1024 namespaces until
953 		 * there are no more active namespaces
954 		 */
955 		for (i = 0; i < num_pages; i++) {
956 			rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid,
957 						     &new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list),
958 						     nvme_completion_poll_cb, &status);
959 			if (rc != 0) {
960 				goto fail;
961 			}
962 			if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
963 				SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n");
964 				rc = -ENXIO;
965 				goto fail;
966 			}
967 			next_nsid = new_ns_list[1024 * i + 1023];
968 			if (next_nsid == 0) {
969 				/*
970 				 * No more active namespaces found, no need to fetch additional chunks
971 				 */
972 				break;
973 			}
974 		}
975 
976 	} else {
977 		/*
978 		 * Controller doesn't support active ns list CNS 0x02 so dummy up
979 		 * an active ns list
980 		 */
981 		for (i = 0; i < ctrlr->num_ns; i++) {
982 			new_ns_list[i] = i + 1;
983 		}
984 	}
985 
986 	/*
987 	 * Now that that the list is properly setup, we can swap it in to the ctrlr and
988 	 * free up the previous one.
989 	 */
990 	spdk_dma_free(ctrlr->active_ns_list);
991 	ctrlr->active_ns_list = new_ns_list;
992 
993 	return 0;
994 fail:
995 	spdk_dma_free(new_ns_list);
996 	return rc;
997 }
998 
999 static void
1000 nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1001 {
1002 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1003 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1004 	uint32_t nsid;
1005 	int rc;
1006 
1007 	if (spdk_nvme_cpl_is_error(cpl)) {
1008 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1009 		return;
1010 	} else {
1011 		nvme_ns_set_identify_data(ns);
1012 	}
1013 
1014 	/* move on to the next active NS */
1015 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1016 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1017 	if (ns == NULL) {
1018 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
1019 				     ctrlr->opts.admin_timeout_ms);
1020 		return;
1021 	}
1022 	ns->ctrlr = ctrlr;
1023 	ns->id = nsid;
1024 
1025 	rc = nvme_ctrlr_identify_ns_async(ns);
1026 	if (rc) {
1027 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1028 	}
1029 }
1030 
1031 static int
1032 nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
1033 {
1034 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1035 	struct spdk_nvme_ns_data *nsdata;
1036 
1037 	nsdata = &ctrlr->nsdata[ns->id - 1];
1038 
1039 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
1040 			     ctrlr->opts.admin_timeout_ms);
1041 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
1042 				       nsdata, sizeof(*nsdata),
1043 				       nvme_ctrlr_identify_ns_async_done, ns);
1044 }
1045 
1046 static int
1047 nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1048 {
1049 	uint32_t nsid;
1050 	struct spdk_nvme_ns *ns;
1051 	int rc;
1052 
1053 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1054 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1055 	if (ns == NULL) {
1056 		/* No active NS, move on to the next state */
1057 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1058 				     ctrlr->opts.admin_timeout_ms);
1059 		return 0;
1060 	}
1061 
1062 	ns->ctrlr = ctrlr;
1063 	ns->id = nsid;
1064 
1065 	rc = nvme_ctrlr_identify_ns_async(ns);
1066 	if (rc) {
1067 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1068 	}
1069 
1070 	return rc;
1071 }
1072 
1073 static void
1074 nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1075 {
1076 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1077 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1078 	uint32_t nsid;
1079 	int rc;
1080 
1081 	if (spdk_nvme_cpl_is_error(cpl)) {
1082 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1083 				     ctrlr->opts.admin_timeout_ms);
1084 		return;
1085 	}
1086 
1087 	/* move on to the next active NS */
1088 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1089 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1090 	if (ns == NULL) {
1091 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1092 				     ctrlr->opts.admin_timeout_ms);
1093 		return;
1094 	}
1095 
1096 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1097 	if (rc) {
1098 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1099 	}
1100 }
1101 
1102 static int
1103 nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
1104 {
1105 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1106 
1107 	memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
1108 
1109 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
1110 			     ctrlr->opts.admin_timeout_ms);
1111 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
1112 				       0, ns->id, ns->id_desc_list, sizeof(ns->id_desc_list),
1113 				       nvme_ctrlr_identify_id_desc_async_done, ns);
1114 }
1115 
1116 static int
1117 nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1118 {
1119 	uint32_t nsid;
1120 	struct spdk_nvme_ns *ns;
1121 	int rc;
1122 
1123 	if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) ||
1124 	    (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1125 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
1126 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1127 				     ctrlr->opts.admin_timeout_ms);
1128 		return 0;
1129 	}
1130 
1131 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1132 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1133 	if (ns == NULL) {
1134 		/* No active NS, move on to the next state */
1135 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1136 				     ctrlr->opts.admin_timeout_ms);
1137 		return 0;
1138 	}
1139 
1140 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1141 	if (rc) {
1142 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1143 	}
1144 
1145 	return rc;
1146 }
1147 
1148 static void
1149 nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1150 {
1151 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1152 
1153 	if (spdk_nvme_cpl_is_error(cpl)) {
1154 		SPDK_ERRLOG("Set Features - Number of Queues failed!\n");
1155 	}
1156 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_NUM_QUEUES,
1157 			     ctrlr->opts.admin_timeout_ms);
1158 }
1159 
1160 static int
1161 nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1162 {
1163 	int rc;
1164 
1165 	if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
1166 		SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n",
1167 			       ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
1168 		ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
1169 	} else if (ctrlr->opts.num_io_queues < 1) {
1170 		SPDK_NOTICELOG("Requested num_io_queues 0, increasing to 1\n");
1171 		ctrlr->opts.num_io_queues = 1;
1172 	}
1173 
1174 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
1175 			     ctrlr->opts.admin_timeout_ms);
1176 
1177 	rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
1178 					   nvme_ctrlr_set_num_queues_done, ctrlr);
1179 	if (rc != 0) {
1180 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1181 		return rc;
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static void
1188 nvme_ctrlr_get_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1189 {
1190 	uint32_t cq_allocated, sq_allocated, min_allocated, i;
1191 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1192 
1193 	if (spdk_nvme_cpl_is_error(cpl)) {
1194 		SPDK_ERRLOG("Get Features - Number of Queues failed!\n");
1195 		ctrlr->opts.num_io_queues = 0;
1196 	} else {
1197 		/*
1198 		 * Data in cdw0 is 0-based.
1199 		 * Lower 16-bits indicate number of submission queues allocated.
1200 		 * Upper 16-bits indicate number of completion queues allocated.
1201 		 */
1202 		sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
1203 		cq_allocated = (cpl->cdw0 >> 16) + 1;
1204 
1205 		/*
1206 		 * For 1:1 queue mapping, set number of allocated queues to be minimum of
1207 		 * submission and completion queues.
1208 		 */
1209 		min_allocated = spdk_min(sq_allocated, cq_allocated);
1210 
1211 		/* Set number of queues to be minimum of requested and actually allocated. */
1212 		ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
1213 	}
1214 
1215 	ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
1216 	if (ctrlr->free_io_qids == NULL) {
1217 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1218 		return;
1219 	}
1220 
1221 	/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */
1222 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1223 	for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
1224 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1225 	}
1226 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS,
1227 			     ctrlr->opts.admin_timeout_ms);
1228 }
1229 
1230 static int
1231 nvme_ctrlr_get_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1232 {
1233 	int rc;
1234 
1235 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES,
1236 			     ctrlr->opts.admin_timeout_ms);
1237 
1238 	/* Obtain the number of queues allocated using Get Features. */
1239 	rc = nvme_ctrlr_cmd_get_num_queues(ctrlr, nvme_ctrlr_get_num_queues_done, ctrlr);
1240 	if (rc != 0) {
1241 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1242 		return rc;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 static void
1249 nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
1250 {
1251 	uint32_t keep_alive_interval_ms;
1252 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1253 
1254 	if (spdk_nvme_cpl_is_error(cpl)) {
1255 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n",
1256 			    cpl->status.sc, cpl->status.sct);
1257 		ctrlr->opts.keep_alive_timeout_ms = 0;
1258 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1259 		return;
1260 	}
1261 
1262 	if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
1263 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller adjusted keep alive timeout to %u ms\n",
1264 			      cpl->cdw0);
1265 	}
1266 
1267 	ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
1268 
1269 	keep_alive_interval_ms = ctrlr->opts.keep_alive_timeout_ms / 2;
1270 	if (keep_alive_interval_ms == 0) {
1271 		keep_alive_interval_ms = 1;
1272 	}
1273 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Sending keep alive every %u ms\n", keep_alive_interval_ms);
1274 
1275 	ctrlr->keep_alive_interval_ticks = (keep_alive_interval_ms * spdk_get_ticks_hz()) / UINT64_C(1000);
1276 
1277 	/* Schedule the first Keep Alive to be sent as soon as possible. */
1278 	ctrlr->next_keep_alive_tick = spdk_get_ticks();
1279 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1280 			     ctrlr->opts.admin_timeout_ms);
1281 }
1282 
1283 static int
1284 nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
1285 {
1286 	int rc;
1287 
1288 	if (ctrlr->opts.keep_alive_timeout_ms == 0) {
1289 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1290 				     ctrlr->opts.admin_timeout_ms);
1291 		return 0;
1292 	}
1293 
1294 	if (ctrlr->cdata.kas == 0) {
1295 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller KAS is 0 - not enabling Keep Alive\n");
1296 		ctrlr->opts.keep_alive_timeout_ms = 0;
1297 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1298 				     ctrlr->opts.admin_timeout_ms);
1299 		return 0;
1300 	}
1301 
1302 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
1303 			     ctrlr->opts.admin_timeout_ms);
1304 
1305 	/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
1306 	rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
1307 					     nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
1308 	if (rc != 0) {
1309 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: %d\n", rc);
1310 		ctrlr->opts.keep_alive_timeout_ms = 0;
1311 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1312 		return rc;
1313 	}
1314 
1315 	return 0;
1316 }
1317 
1318 static void
1319 nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
1320 {
1321 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1322 
1323 	if (spdk_nvme_cpl_is_error(cpl)) {
1324 		/*
1325 		 * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
1326 		 * is optional.
1327 		 */
1328 		SPDK_WARNLOG("Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
1329 			     cpl->status.sc, cpl->status.sct);
1330 	} else {
1331 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Set Features - Host ID was successful\n");
1332 	}
1333 
1334 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1335 }
1336 
1337 static int
1338 nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
1339 {
1340 	uint8_t *host_id;
1341 	uint32_t host_id_size;
1342 	int rc;
1343 
1344 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
1345 		/*
1346 		 * NVMe-oF sends the host ID during Connect and doesn't allow
1347 		 * Set Features - Host Identifier after Connect, so we don't need to do anything here.
1348 		 */
1349 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "NVMe-oF transport - not sending Set Features - Host ID\n");
1350 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1351 		return 0;
1352 	}
1353 
1354 	if (ctrlr->cdata.ctratt.host_id_exhid_supported) {
1355 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 128-bit extended host identifier\n");
1356 		host_id = ctrlr->opts.extended_host_id;
1357 		host_id_size = sizeof(ctrlr->opts.extended_host_id);
1358 	} else {
1359 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 64-bit host identifier\n");
1360 		host_id = ctrlr->opts.host_id;
1361 		host_id_size = sizeof(ctrlr->opts.host_id);
1362 	}
1363 
1364 	/* If the user specified an all-zeroes host identifier, don't send the command. */
1365 	if (spdk_mem_all_zero(host_id, host_id_size)) {
1366 		SPDK_DEBUGLOG(SPDK_LOG_NVME,
1367 			      "User did not specify host ID - not sending Set Features - Host ID\n");
1368 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1369 		return 0;
1370 	}
1371 
1372 	SPDK_LOGDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size);
1373 
1374 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
1375 			     ctrlr->opts.admin_timeout_ms);
1376 
1377 	rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
1378 	if (rc != 0) {
1379 		SPDK_ERRLOG("Set Features - Host ID failed: %d\n", rc);
1380 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1381 		return rc;
1382 	}
1383 
1384 	return 0;
1385 }
1386 
1387 static void
1388 nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1389 {
1390 	if (ctrlr->ns) {
1391 		uint32_t i, num_ns = ctrlr->num_ns;
1392 
1393 		for (i = 0; i < num_ns; i++) {
1394 			nvme_ns_destruct(&ctrlr->ns[i]);
1395 		}
1396 
1397 		spdk_free(ctrlr->ns);
1398 		ctrlr->ns = NULL;
1399 		ctrlr->num_ns = 0;
1400 	}
1401 
1402 	if (ctrlr->nsdata) {
1403 		spdk_free(ctrlr->nsdata);
1404 		ctrlr->nsdata = NULL;
1405 	}
1406 
1407 	spdk_dma_free(ctrlr->active_ns_list);
1408 	ctrlr->active_ns_list = NULL;
1409 }
1410 
1411 static void
1412 nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1413 {
1414 	uint32_t i, nn = ctrlr->cdata.nn;
1415 	struct spdk_nvme_ns_data *nsdata;
1416 
1417 	for (i = 0; i < nn; i++) {
1418 		struct spdk_nvme_ns	*ns = &ctrlr->ns[i];
1419 		uint32_t		nsid = i + 1;
1420 		nsdata			= &ctrlr->nsdata[nsid - 1];
1421 
1422 		if ((nsdata->ncap == 0) && spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
1423 			if (nvme_ns_construct(ns, nsid, ctrlr) != 0) {
1424 				continue;
1425 			}
1426 		}
1427 
1428 		if (nsdata->ncap && !spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
1429 			nvme_ns_destruct(ns);
1430 		}
1431 	}
1432 }
1433 
1434 static int
1435 nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1436 {
1437 	int rc = 0;
1438 	uint32_t nn = ctrlr->cdata.nn;
1439 	uint64_t phys_addr = 0;
1440 
1441 	/* ctrlr->num_ns may be 0 (startup) or a different number of namespaces (reset),
1442 	 * so check if we need to reallocate.
1443 	 */
1444 	if (nn != ctrlr->num_ns) {
1445 		nvme_ctrlr_destruct_namespaces(ctrlr);
1446 
1447 		if (nn == 0) {
1448 			SPDK_WARNLOG("controller has 0 namespaces\n");
1449 			return 0;
1450 		}
1451 
1452 		ctrlr->ns = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64,
1453 					 &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
1454 		if (ctrlr->ns == NULL) {
1455 			rc = -ENOMEM;
1456 			goto fail;
1457 		}
1458 
1459 		ctrlr->nsdata = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64,
1460 					     &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
1461 		if (ctrlr->nsdata == NULL) {
1462 			rc = -ENOMEM;
1463 			goto fail;
1464 		}
1465 
1466 		ctrlr->num_ns = nn;
1467 	}
1468 
1469 	return 0;
1470 
1471 fail:
1472 	nvme_ctrlr_destruct_namespaces(ctrlr);
1473 	return rc;
1474 }
1475 
1476 static void
1477 nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
1478 {
1479 	struct nvme_async_event_request	*aer = arg;
1480 	struct spdk_nvme_ctrlr		*ctrlr = aer->ctrlr;
1481 	struct spdk_nvme_ctrlr_process	*active_proc;
1482 	union spdk_nvme_async_event_completion	event;
1483 	int					rc;
1484 
1485 	if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
1486 	    cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
1487 		/*
1488 		 *  This is simulated when controller is being shut down, to
1489 		 *  effectively abort outstanding asynchronous event requests
1490 		 *  and make sure all memory is freed.  Do not repost the
1491 		 *  request in this case.
1492 		 */
1493 		return;
1494 	}
1495 
1496 	if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
1497 	    cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
1498 		/*
1499 		 *  SPDK will only send as many AERs as the device says it supports,
1500 		 *  so this status code indicates an out-of-spec device.  Do not repost
1501 		 *  the request in this case.
1502 		 */
1503 		SPDK_ERRLOG("Controller appears out-of-spec for asynchronous event request\n"
1504 			    "handling.  Do not repost this AER.\n");
1505 		return;
1506 	}
1507 
1508 	event.raw = cpl->cdw0;
1509 	if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
1510 	    (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
1511 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
1512 		if (rc) {
1513 			return;
1514 		}
1515 		nvme_ctrlr_update_namespaces(ctrlr);
1516 	}
1517 
1518 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1519 	if (active_proc && active_proc->aer_cb_fn) {
1520 		active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
1521 	}
1522 
1523 	/*
1524 	 * Repost another asynchronous event request to replace the one
1525 	 *  that just completed.
1526 	 */
1527 	if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
1528 		/*
1529 		 * We can't do anything to recover from a failure here,
1530 		 * so just print a warning message and leave the AER unsubmitted.
1531 		 */
1532 		SPDK_ERRLOG("resubmitting AER failed!\n");
1533 	}
1534 }
1535 
1536 static int
1537 nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
1538 				    struct nvme_async_event_request *aer)
1539 {
1540 	struct nvme_request *req;
1541 
1542 	aer->ctrlr = ctrlr;
1543 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
1544 	aer->req = req;
1545 	if (req == NULL) {
1546 		return -1;
1547 	}
1548 
1549 	req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1550 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
1551 }
1552 
1553 static void
1554 nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
1555 {
1556 	struct nvme_async_event_request		*aer;
1557 	int					rc;
1558 	uint32_t				i;
1559 	struct spdk_nvme_ctrlr *ctrlr =	(struct spdk_nvme_ctrlr *)arg;
1560 
1561 	if (spdk_nvme_cpl_is_error(cpl)) {
1562 		SPDK_NOTICELOG("nvme_ctrlr_configure_aer failed!\n");
1563 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
1564 				     ctrlr->opts.admin_timeout_ms);
1565 		return;
1566 	}
1567 
1568 	/* aerl is a zero-based value, so we need to add 1 here. */
1569 	ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
1570 
1571 	for (i = 0; i < ctrlr->num_aers; i++) {
1572 		aer = &ctrlr->aer[i];
1573 		rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
1574 		if (rc) {
1575 			SPDK_ERRLOG("nvme_ctrlr_construct_and_submit_aer failed!\n");
1576 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1577 			return;
1578 		}
1579 	}
1580 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
1581 			     ctrlr->opts.admin_timeout_ms);
1582 }
1583 
1584 static int
1585 nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
1586 {
1587 	union spdk_nvme_feat_async_event_configuration	config;
1588 	int						rc;
1589 
1590 	config.raw = 0;
1591 	config.bits.crit_warn.bits.available_spare = 1;
1592 	config.bits.crit_warn.bits.temperature = 1;
1593 	config.bits.crit_warn.bits.device_reliability = 1;
1594 	config.bits.crit_warn.bits.read_only = 1;
1595 	config.bits.crit_warn.bits.volatile_memory_backup = 1;
1596 
1597 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
1598 		if (ctrlr->cdata.oaes.ns_attribute_notices) {
1599 			config.bits.ns_attr_notice = 1;
1600 		}
1601 		if (ctrlr->cdata.oaes.fw_activation_notices) {
1602 			config.bits.fw_activation_notice = 1;
1603 		}
1604 	}
1605 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
1606 		config.bits.telemetry_log_notice = 1;
1607 	}
1608 
1609 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
1610 			     ctrlr->opts.admin_timeout_ms);
1611 
1612 	rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
1613 			nvme_ctrlr_configure_aer_done,
1614 			ctrlr);
1615 	if (rc != 0) {
1616 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1617 		return rc;
1618 	}
1619 
1620 	return 0;
1621 }
1622 
1623 struct spdk_nvme_ctrlr_process *
1624 spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
1625 {
1626 	struct spdk_nvme_ctrlr_process	*active_proc;
1627 
1628 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
1629 		if (active_proc->pid == pid) {
1630 			return active_proc;
1631 		}
1632 	}
1633 
1634 	return NULL;
1635 }
1636 
1637 struct spdk_nvme_ctrlr_process *
1638 spdk_nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
1639 {
1640 	return spdk_nvme_ctrlr_get_process(ctrlr, getpid());
1641 }
1642 
1643 /**
1644  * This function will be called when a process is using the controller.
1645  *  1. For the primary process, it is called when constructing the controller.
1646  *  2. For the secondary process, it is called at probing the controller.
1647  * Note: will check whether the process is already added for the same process.
1648  */
1649 int
1650 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
1651 {
1652 	struct spdk_nvme_ctrlr_process	*ctrlr_proc;
1653 	pid_t				pid = getpid();
1654 
1655 	/* Check whether the process is already added or not */
1656 	if (spdk_nvme_ctrlr_get_process(ctrlr, pid)) {
1657 		return 0;
1658 	}
1659 
1660 	/* Initialize the per process properties for this ctrlr */
1661 	ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
1662 				  64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
1663 	if (ctrlr_proc == NULL) {
1664 		SPDK_ERRLOG("failed to allocate memory to track the process props\n");
1665 
1666 		return -1;
1667 	}
1668 
1669 	ctrlr_proc->is_primary = spdk_process_is_primary();
1670 	ctrlr_proc->pid = pid;
1671 	STAILQ_INIT(&ctrlr_proc->active_reqs);
1672 	ctrlr_proc->devhandle = devhandle;
1673 	ctrlr_proc->ref = 0;
1674 	TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
1675 
1676 	TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
1677 
1678 	return 0;
1679 }
1680 
1681 /**
1682  * This function will be called when the process detaches the controller.
1683  * Note: the ctrlr_lock must be held when calling this function.
1684  */
1685 static void
1686 nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
1687 			  struct spdk_nvme_ctrlr_process *proc)
1688 {
1689 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
1690 
1691 	assert(STAILQ_EMPTY(&proc->active_reqs));
1692 
1693 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
1694 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1695 	}
1696 
1697 	TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
1698 
1699 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
1700 		spdk_pci_device_detach(proc->devhandle);
1701 	}
1702 
1703 	spdk_dma_free(proc);
1704 }
1705 
1706 /**
1707  * This function will be called when the process exited unexpectedly
1708  *  in order to free any incomplete nvme request, allocated IO qpairs
1709  *  and allocated memory.
1710  * Note: the ctrlr_lock must be held when calling this function.
1711  */
1712 static void
1713 nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
1714 {
1715 	struct nvme_request	*req, *tmp_req;
1716 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
1717 
1718 	STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
1719 		STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
1720 
1721 		assert(req->pid == proc->pid);
1722 
1723 		nvme_free_request(req);
1724 	}
1725 
1726 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
1727 		TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
1728 
1729 		/*
1730 		 * The process may have been killed while some qpairs were in their
1731 		 *  completion context.  Clear that flag here to allow these IO
1732 		 *  qpairs to be deleted.
1733 		 */
1734 		qpair->in_completion_context = 0;
1735 
1736 		qpair->no_deletion_notification_needed = 1;
1737 
1738 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1739 	}
1740 
1741 	spdk_dma_free(proc);
1742 }
1743 
1744 /**
1745  * This function will be called when destructing the controller.
1746  *  1. There is no more admin request on this controller.
1747  *  2. Clean up any left resource allocation when its associated process is gone.
1748  */
1749 void
1750 nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
1751 {
1752 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
1753 
1754 	/* Free all the processes' properties and make sure no pending admin IOs */
1755 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
1756 		TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
1757 
1758 		assert(STAILQ_EMPTY(&active_proc->active_reqs));
1759 
1760 		spdk_free(active_proc);
1761 	}
1762 }
1763 
1764 /**
1765  * This function will be called when any other process attaches or
1766  *  detaches the controller in order to cleanup those unexpectedly
1767  *  terminated processes.
1768  * Note: the ctrlr_lock must be held when calling this function.
1769  */
1770 static int
1771 nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
1772 {
1773 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
1774 	int				active_proc_count = 0;
1775 
1776 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
1777 		if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
1778 			SPDK_ERRLOG("process %d terminated unexpected\n", active_proc->pid);
1779 
1780 			TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
1781 
1782 			nvme_ctrlr_cleanup_process(active_proc);
1783 		} else {
1784 			active_proc_count++;
1785 		}
1786 	}
1787 
1788 	return active_proc_count;
1789 }
1790 
1791 void
1792 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
1793 {
1794 	struct spdk_nvme_ctrlr_process	*active_proc;
1795 
1796 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1797 
1798 	nvme_ctrlr_remove_inactive_proc(ctrlr);
1799 
1800 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1801 	if (active_proc) {
1802 		active_proc->ref++;
1803 	}
1804 
1805 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1806 }
1807 
1808 void
1809 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
1810 {
1811 	struct spdk_nvme_ctrlr_process	*active_proc;
1812 	int				proc_count;
1813 
1814 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1815 
1816 	proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
1817 
1818 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1819 	if (active_proc) {
1820 		active_proc->ref--;
1821 		assert(active_proc->ref >= 0);
1822 
1823 		/*
1824 		 * The last active process will be removed at the end of
1825 		 * the destruction of the controller.
1826 		 */
1827 		if (active_proc->ref == 0 && proc_count != 1) {
1828 			nvme_ctrlr_remove_process(ctrlr, active_proc);
1829 		}
1830 	}
1831 
1832 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1833 }
1834 
1835 int
1836 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
1837 {
1838 	struct spdk_nvme_ctrlr_process	*active_proc;
1839 	int				ref = 0;
1840 
1841 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1842 
1843 	nvme_ctrlr_remove_inactive_proc(ctrlr);
1844 
1845 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
1846 		ref += active_proc->ref;
1847 	}
1848 
1849 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1850 
1851 	return ref;
1852 }
1853 
1854 /**
1855  *  Get the PCI device handle which is only visible to its associated process.
1856  */
1857 struct spdk_pci_device *
1858 nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
1859 {
1860 	struct spdk_nvme_ctrlr_process	*active_proc;
1861 	struct spdk_pci_device		*devhandle = NULL;
1862 
1863 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1864 
1865 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1866 	if (active_proc) {
1867 		devhandle = active_proc->devhandle;
1868 	}
1869 
1870 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1871 
1872 	return devhandle;
1873 }
1874 
1875 static void
1876 nvme_ctrlr_enable_admin_queue(struct spdk_nvme_ctrlr *ctrlr)
1877 {
1878 	nvme_transport_qpair_reset(ctrlr->adminq);
1879 	nvme_qpair_enable(ctrlr->adminq);
1880 }
1881 
1882 /**
1883  * This function will be called repeatedly during initialization until the controller is ready.
1884  */
1885 int
1886 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
1887 {
1888 	union spdk_nvme_cc_register cc;
1889 	union spdk_nvme_csts_register csts;
1890 	uint32_t ready_timeout_in_ms;
1891 	int rc = 0;
1892 
1893 	/*
1894 	 * May need to avoid accessing any register on the target controller
1895 	 * for a while. Return early without touching the FSM.
1896 	 * Check sleep_timeout_tsc > 0 for unit test.
1897 	 */
1898 	if ((ctrlr->sleep_timeout_tsc > 0) &&
1899 	    (spdk_get_ticks() <= ctrlr->sleep_timeout_tsc)) {
1900 		return 0;
1901 	}
1902 	ctrlr->sleep_timeout_tsc = 0;
1903 
1904 	if (nvme_ctrlr_get_cc(ctrlr, &cc) ||
1905 	    nvme_ctrlr_get_csts(ctrlr, &csts)) {
1906 		if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
1907 			/* While a device is resetting, it may be unable to service MMIO reads
1908 			 * temporarily. Allow for this case.
1909 			 */
1910 			SPDK_ERRLOG("Get registers failed while waiting for CSTS.RDY == 0\n");
1911 			goto init_timeout;
1912 		}
1913 		SPDK_ERRLOG("Failed to read CC and CSTS in state %d\n", ctrlr->state);
1914 		nvme_ctrlr_fail(ctrlr, false);
1915 		return -EIO;
1916 	}
1917 
1918 	ready_timeout_in_ms = 500 * ctrlr->cap.bits.to;
1919 
1920 	/*
1921 	 * Check if the current initialization step is done or has timed out.
1922 	 */
1923 	switch (ctrlr->state) {
1924 	case NVME_CTRLR_STATE_INIT_DELAY:
1925 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
1926 		/*
1927 		 * Controller may need some delay before it's enabled.
1928 		 *
1929 		 * This is a workaround for an issue where the PCIe-attached NVMe controller
1930 		 * is not ready after VFIO reset. We delay the initialization rather than the
1931 		 * enabling itself, because this is required only for the very first enabling
1932 		 * - directly after a VFIO reset.
1933 		 *
1934 		 * TODO: Figure out what is actually going wrong.
1935 		 */
1936 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Adding 2 second delay before initializing the controller\n");
1937 		ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2000 * spdk_get_ticks_hz() / 1000);
1938 		break;
1939 
1940 	case NVME_CTRLR_STATE_INIT:
1941 		/* Begin the hardware initialization by making sure the controller is disabled. */
1942 		if (cc.bits.en) {
1943 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1\n");
1944 			/*
1945 			 * Controller is currently enabled. We need to disable it to cause a reset.
1946 			 *
1947 			 * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
1948 			 *  Wait for the ready bit to be 1 before disabling the controller.
1949 			 */
1950 			if (csts.bits.rdy == 0) {
1951 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
1952 				nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
1953 				return 0;
1954 			}
1955 
1956 			/* CC.EN = 1 && CSTS.RDY == 1, so we can immediately disable the controller. */
1957 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
1958 			cc.bits.en = 0;
1959 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
1960 				SPDK_ERRLOG("set_cc() failed\n");
1961 				nvme_ctrlr_fail(ctrlr, false);
1962 				return -EIO;
1963 			}
1964 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
1965 
1966 			/*
1967 			 * Wait 2.5 seconds before accessing PCI registers.
1968 			 * Not using sleep() to avoid blocking other controller's initialization.
1969 			 */
1970 			if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
1971 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "Applying quirk: delay 2.5 seconds before reading registers\n");
1972 				ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
1973 			}
1974 			return 0;
1975 		} else {
1976 			if (csts.bits.rdy == 1) {
1977 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 1 - waiting for shutdown to complete\n");
1978 			}
1979 
1980 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
1981 			return 0;
1982 		}
1983 		break;
1984 
1985 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
1986 		if (csts.bits.rdy == 1) {
1987 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - disabling controller\n");
1988 			/* CC.EN = 1 && CSTS.RDY = 1, so we can set CC.EN = 0 now. */
1989 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
1990 			cc.bits.en = 0;
1991 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
1992 				SPDK_ERRLOG("set_cc() failed\n");
1993 				nvme_ctrlr_fail(ctrlr, false);
1994 				return -EIO;
1995 			}
1996 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
1997 			return 0;
1998 		}
1999 		break;
2000 
2001 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
2002 		if (csts.bits.rdy == 0) {
2003 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 0\n");
2004 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
2005 			/*
2006 			 * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
2007 			 *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
2008 			 */
2009 			spdk_delay_us(100);
2010 			return 0;
2011 		}
2012 		break;
2013 
2014 	case NVME_CTRLR_STATE_ENABLE:
2015 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 1\n");
2016 		rc = nvme_ctrlr_enable(ctrlr);
2017 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
2018 		return rc;
2019 
2020 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
2021 		if (csts.bits.rdy == 1) {
2022 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
2023 			/*
2024 			 * The controller has been enabled.
2025 			 *  Perform the rest of initialization serially.
2026 			 */
2027 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE,
2028 					     ctrlr->opts.admin_timeout_ms);
2029 			return 0;
2030 		}
2031 		break;
2032 
2033 	case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE:
2034 		nvme_ctrlr_enable_admin_queue(ctrlr);
2035 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY,
2036 				     ctrlr->opts.admin_timeout_ms);
2037 		break;
2038 
2039 	case NVME_CTRLR_STATE_IDENTIFY:
2040 		rc = nvme_ctrlr_identify(ctrlr);
2041 		break;
2042 
2043 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
2044 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2045 		break;
2046 
2047 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
2048 		rc = nvme_ctrlr_set_num_queues(ctrlr);
2049 		break;
2050 
2051 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
2052 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2053 		break;
2054 
2055 	case NVME_CTRLR_STATE_GET_NUM_QUEUES:
2056 		rc = nvme_ctrlr_get_num_queues(ctrlr);
2057 		break;
2058 
2059 	case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES:
2060 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2061 		break;
2062 
2063 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
2064 		rc = nvme_ctrlr_construct_namespaces(ctrlr);
2065 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
2066 				     ctrlr->opts.admin_timeout_ms);
2067 		break;
2068 
2069 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
2070 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
2071 		if (rc < 0) {
2072 			nvme_ctrlr_destruct_namespaces(ctrlr);
2073 		}
2074 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS,
2075 				     ctrlr->opts.admin_timeout_ms);
2076 		break;
2077 
2078 	case NVME_CTRLR_STATE_IDENTIFY_NS:
2079 		rc = nvme_ctrlr_identify_namespaces(ctrlr);
2080 		break;
2081 
2082 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
2083 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2084 		break;
2085 
2086 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
2087 		rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
2088 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
2089 				     ctrlr->opts.admin_timeout_ms);
2090 		break;
2091 
2092 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
2093 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2094 		break;
2095 
2096 	case NVME_CTRLR_STATE_CONFIGURE_AER:
2097 		rc = nvme_ctrlr_configure_aer(ctrlr);
2098 		break;
2099 
2100 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
2101 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2102 		break;
2103 
2104 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
2105 		rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
2106 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
2107 				     ctrlr->opts.admin_timeout_ms);
2108 		break;
2109 
2110 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
2111 		nvme_ctrlr_set_supported_features(ctrlr);
2112 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
2113 				     ctrlr->opts.admin_timeout_ms);
2114 		break;
2115 
2116 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
2117 		rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
2118 		break;
2119 
2120 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
2121 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2122 		break;
2123 
2124 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
2125 		rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
2126 		break;
2127 
2128 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
2129 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2130 		break;
2131 
2132 	case NVME_CTRLR_STATE_SET_HOST_ID:
2133 		rc = nvme_ctrlr_set_host_id(ctrlr);
2134 		break;
2135 
2136 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
2137 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2138 		break;
2139 
2140 	case NVME_CTRLR_STATE_READY:
2141 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Ctrlr already in ready state\n");
2142 		return 0;
2143 
2144 	case NVME_CTRLR_STATE_ERROR:
2145 		SPDK_ERRLOG("Ctrlr %s is in error state\n", ctrlr->trid.traddr);
2146 		return -1;
2147 
2148 	default:
2149 		assert(0);
2150 		nvme_ctrlr_fail(ctrlr, false);
2151 		return -1;
2152 	}
2153 
2154 init_timeout:
2155 	if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
2156 	    spdk_get_ticks() > ctrlr->state_timeout_tsc) {
2157 		SPDK_ERRLOG("Initialization timed out in state %d\n", ctrlr->state);
2158 		nvme_ctrlr_fail(ctrlr, false);
2159 		return -1;
2160 	}
2161 
2162 	return rc;
2163 }
2164 
2165 int
2166 nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
2167 {
2168 	pthread_mutexattr_t attr;
2169 	int rc = 0;
2170 
2171 	if (pthread_mutexattr_init(&attr)) {
2172 		return -1;
2173 	}
2174 	if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
2175 #ifndef __FreeBSD__
2176 	    pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
2177 	    pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
2178 #endif
2179 	    pthread_mutex_init(mtx, &attr)) {
2180 		rc = -1;
2181 	}
2182 	pthread_mutexattr_destroy(&attr);
2183 	return rc;
2184 }
2185 
2186 int
2187 nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
2188 {
2189 	int rc;
2190 
2191 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
2192 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
2193 	} else {
2194 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
2195 	}
2196 
2197 	ctrlr->flags = 0;
2198 	ctrlr->free_io_qids = NULL;
2199 	ctrlr->is_resetting = false;
2200 	ctrlr->is_failed = false;
2201 
2202 	TAILQ_INIT(&ctrlr->active_io_qpairs);
2203 	STAILQ_INIT(&ctrlr->queued_aborts);
2204 	ctrlr->outstanding_aborts = 0;
2205 
2206 	rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
2207 	if (rc != 0) {
2208 		return rc;
2209 	}
2210 
2211 	TAILQ_INIT(&ctrlr->active_procs);
2212 
2213 	return rc;
2214 }
2215 
2216 /* This function should be called once at ctrlr initialization to set up constant properties. */
2217 void
2218 nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
2219 		    const union spdk_nvme_vs_register *vs)
2220 {
2221 	ctrlr->cap = *cap;
2222 	ctrlr->vs = *vs;
2223 
2224 	ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
2225 
2226 	/* For now, always select page_size == min_page_size. */
2227 	ctrlr->page_size = ctrlr->min_page_size;
2228 
2229 	ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
2230 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
2231 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
2232 
2233 	ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
2234 }
2235 
2236 void
2237 nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
2238 {
2239 	pthread_mutex_destroy(&ctrlr->ctrlr_lock);
2240 }
2241 
2242 void
2243 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
2244 {
2245 	struct spdk_nvme_qpair *qpair, *tmp;
2246 
2247 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Prepare to destruct SSD: %s\n", ctrlr->trid.traddr);
2248 	TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
2249 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2250 	}
2251 
2252 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
2253 
2254 	nvme_ctrlr_shutdown(ctrlr);
2255 
2256 	nvme_ctrlr_destruct_namespaces(ctrlr);
2257 
2258 	spdk_bit_array_free(&ctrlr->free_io_qids);
2259 
2260 	nvme_transport_ctrlr_destruct(ctrlr);
2261 }
2262 
2263 int
2264 nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
2265 				struct nvme_request *req)
2266 {
2267 	return nvme_qpair_submit_request(ctrlr->adminq, req);
2268 }
2269 
2270 static void
2271 nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
2272 {
2273 	/* Do nothing */
2274 }
2275 
2276 /*
2277  * Check if we need to send a Keep Alive command.
2278  * Caller must hold ctrlr->ctrlr_lock.
2279  */
2280 static void
2281 nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
2282 {
2283 	uint64_t now;
2284 	struct nvme_request *req;
2285 	struct spdk_nvme_cmd *cmd;
2286 	int rc;
2287 
2288 	now = spdk_get_ticks();
2289 	if (now < ctrlr->next_keep_alive_tick) {
2290 		return;
2291 	}
2292 
2293 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
2294 	if (req == NULL) {
2295 		return;
2296 	}
2297 
2298 	cmd = &req->cmd;
2299 	cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
2300 
2301 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
2302 	if (rc != 0) {
2303 		SPDK_ERRLOG("Submitting Keep Alive failed\n");
2304 	}
2305 
2306 	ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
2307 }
2308 
2309 int32_t
2310 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
2311 {
2312 	int32_t num_completions;
2313 
2314 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2315 	if (ctrlr->keep_alive_interval_ticks) {
2316 		nvme_ctrlr_keep_alive(ctrlr);
2317 	}
2318 	num_completions = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2319 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2320 
2321 	return num_completions;
2322 }
2323 
2324 const struct spdk_nvme_ctrlr_data *
2325 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
2326 {
2327 	return &ctrlr->cdata;
2328 }
2329 
2330 union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
2331 {
2332 	union spdk_nvme_csts_register csts;
2333 
2334 	if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
2335 		csts.raw = 0xFFFFFFFFu;
2336 	}
2337 	return csts;
2338 }
2339 
2340 union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
2341 {
2342 	return ctrlr->cap;
2343 }
2344 
2345 union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
2346 {
2347 	return ctrlr->vs;
2348 }
2349 
2350 uint32_t
2351 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
2352 {
2353 	return ctrlr->num_ns;
2354 }
2355 
2356 static int32_t
2357 spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2358 {
2359 	int32_t result = -1;
2360 
2361 	if (ctrlr->active_ns_list == NULL || nsid == 0 || nsid > ctrlr->num_ns) {
2362 		return result;
2363 	}
2364 
2365 	int32_t lower = 0;
2366 	int32_t upper = ctrlr->num_ns - 1;
2367 	int32_t mid;
2368 
2369 	while (lower <= upper) {
2370 		mid = lower + (upper - lower) / 2;
2371 		if (ctrlr->active_ns_list[mid] == nsid) {
2372 			result = mid;
2373 			break;
2374 		} else {
2375 			if (ctrlr->active_ns_list[mid] != 0 && ctrlr->active_ns_list[mid] < nsid) {
2376 				lower = mid + 1;
2377 			} else {
2378 				upper = mid - 1;
2379 			}
2380 
2381 		}
2382 	}
2383 
2384 	return result;
2385 }
2386 
2387 bool
2388 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2389 {
2390 	return spdk_nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1;
2391 }
2392 
2393 uint32_t
2394 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
2395 {
2396 	return ctrlr->active_ns_list ? ctrlr->active_ns_list[0] : 0;
2397 }
2398 
2399 uint32_t
2400 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
2401 {
2402 	int32_t nsid_idx = spdk_nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid);
2403 	if (ctrlr->active_ns_list && nsid_idx >= 0 && (uint32_t)nsid_idx < ctrlr->num_ns - 1) {
2404 		return ctrlr->active_ns_list[nsid_idx + 1];
2405 	}
2406 	return 0;
2407 }
2408 
2409 struct spdk_nvme_ns *
2410 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2411 {
2412 	if (nsid < 1 || nsid > ctrlr->num_ns) {
2413 		return NULL;
2414 	}
2415 
2416 	return &ctrlr->ns[nsid - 1];
2417 }
2418 
2419 struct spdk_pci_device *
2420 spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
2421 {
2422 	if (ctrlr == NULL) {
2423 		return NULL;
2424 	}
2425 
2426 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
2427 		return NULL;
2428 	}
2429 
2430 	return nvme_ctrlr_proc_get_devhandle(ctrlr);
2431 }
2432 
2433 uint32_t
2434 spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
2435 {
2436 	return ctrlr->max_xfer_size;
2437 }
2438 
2439 void
2440 spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
2441 				      spdk_nvme_aer_cb aer_cb_fn,
2442 				      void *aer_cb_arg)
2443 {
2444 	struct spdk_nvme_ctrlr_process *active_proc;
2445 
2446 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2447 
2448 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2449 	if (active_proc) {
2450 		active_proc->aer_cb_fn = aer_cb_fn;
2451 		active_proc->aer_cb_arg = aer_cb_arg;
2452 	}
2453 
2454 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2455 }
2456 
2457 void
2458 spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
2459 		uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)
2460 {
2461 	struct spdk_nvme_ctrlr_process	*active_proc;
2462 
2463 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2464 
2465 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2466 	if (active_proc) {
2467 		active_proc->timeout_ticks = timeout_us * spdk_get_ticks_hz() / 1000000ULL;
2468 		active_proc->timeout_cb_fn = cb_fn;
2469 		active_proc->timeout_cb_arg = cb_arg;
2470 	}
2471 
2472 	ctrlr->timeout_enabled = true;
2473 
2474 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2475 }
2476 
2477 bool
2478 spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
2479 {
2480 	/* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
2481 	SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
2482 	return ctrlr->log_page_supported[log_page];
2483 }
2484 
2485 bool
2486 spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
2487 {
2488 	/* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
2489 	SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
2490 	return ctrlr->feature_supported[feature_code];
2491 }
2492 
2493 int
2494 spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2495 			  struct spdk_nvme_ctrlr_list *payload)
2496 {
2497 	struct nvme_completion_poll_status	status;
2498 	int					res;
2499 	struct spdk_nvme_ns			*ns;
2500 
2501 	res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
2502 				       nvme_completion_poll_cb, &status);
2503 	if (res) {
2504 		return res;
2505 	}
2506 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2507 		SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
2508 		return -ENXIO;
2509 	}
2510 
2511 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2512 	if (res) {
2513 		return res;
2514 	}
2515 
2516 	ns = &ctrlr->ns[nsid - 1];
2517 	return nvme_ns_construct(ns, nsid, ctrlr);
2518 }
2519 
2520 int
2521 spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2522 			  struct spdk_nvme_ctrlr_list *payload)
2523 {
2524 	struct nvme_completion_poll_status	status;
2525 	int					res;
2526 	struct spdk_nvme_ns			*ns;
2527 
2528 	res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
2529 				       nvme_completion_poll_cb, &status);
2530 	if (res) {
2531 		return res;
2532 	}
2533 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2534 		SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
2535 		return -ENXIO;
2536 	}
2537 
2538 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2539 	if (res) {
2540 		return res;
2541 	}
2542 
2543 	ns = &ctrlr->ns[nsid - 1];
2544 	/* Inactive NS */
2545 	nvme_ns_destruct(ns);
2546 
2547 	return 0;
2548 }
2549 
2550 uint32_t
2551 spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
2552 {
2553 	struct nvme_completion_poll_status	status;
2554 	int					res;
2555 	uint32_t				nsid;
2556 	struct spdk_nvme_ns			*ns;
2557 
2558 	res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status);
2559 	if (res) {
2560 		return 0;
2561 	}
2562 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2563 		SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
2564 		return 0;
2565 	}
2566 
2567 	nsid = status.cpl.cdw0;
2568 	ns = &ctrlr->ns[nsid - 1];
2569 	/* Inactive NS */
2570 	res = nvme_ns_construct(ns, nsid, ctrlr);
2571 	if (res) {
2572 		return 0;
2573 	}
2574 
2575 	/* Return the namespace ID that was created */
2576 	return nsid;
2577 }
2578 
2579 int
2580 spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2581 {
2582 	struct nvme_completion_poll_status	status;
2583 	int					res;
2584 	struct spdk_nvme_ns			*ns;
2585 
2586 	res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status);
2587 	if (res) {
2588 		return res;
2589 	}
2590 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2591 		SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
2592 		return -ENXIO;
2593 	}
2594 
2595 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2596 	if (res) {
2597 		return res;
2598 	}
2599 
2600 	ns = &ctrlr->ns[nsid - 1];
2601 	nvme_ns_destruct(ns);
2602 
2603 	return 0;
2604 }
2605 
2606 int
2607 spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2608 		       struct spdk_nvme_format *format)
2609 {
2610 	struct nvme_completion_poll_status	status;
2611 	int					res;
2612 
2613 	res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
2614 				    &status);
2615 	if (res) {
2616 		return res;
2617 	}
2618 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2619 		SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
2620 		return -ENXIO;
2621 	}
2622 
2623 	return spdk_nvme_ctrlr_reset(ctrlr);
2624 }
2625 
2626 int
2627 spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
2628 				int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
2629 {
2630 	struct spdk_nvme_fw_commit		fw_commit;
2631 	struct nvme_completion_poll_status	status;
2632 	int					res;
2633 	unsigned int				size_remaining;
2634 	unsigned int				offset;
2635 	unsigned int				transfer;
2636 	void					*p;
2637 
2638 	if (!completion_status) {
2639 		return -EINVAL;
2640 	}
2641 	memset(completion_status, 0, sizeof(struct spdk_nvme_status));
2642 	if (size % 4) {
2643 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid size!\n");
2644 		return -1;
2645 	}
2646 
2647 	/* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
2648 	 * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
2649 	 */
2650 	if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
2651 	    (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
2652 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid command!\n");
2653 		return -1;
2654 	}
2655 
2656 	/* Firmware download */
2657 	size_remaining = size;
2658 	offset = 0;
2659 	p = payload;
2660 
2661 	while (size_remaining > 0) {
2662 		transfer = spdk_min(size_remaining, ctrlr->min_page_size);
2663 
2664 		res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
2665 						       nvme_completion_poll_cb,
2666 						       &status);
2667 		if (res) {
2668 			return res;
2669 		}
2670 
2671 		if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2672 			SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
2673 			return -ENXIO;
2674 		}
2675 		p += transfer;
2676 		offset += transfer;
2677 		size_remaining -= transfer;
2678 	}
2679 
2680 	/* Firmware commit */
2681 	memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
2682 	fw_commit.fs = slot;
2683 	fw_commit.ca = commit_action;
2684 
2685 	res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
2686 				       &status);
2687 	if (res) {
2688 		return res;
2689 	}
2690 
2691 	res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock);
2692 
2693 	memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status));
2694 
2695 	if (res) {
2696 		if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
2697 		    status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
2698 			if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
2699 			    status.cpl.status.sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
2700 				SPDK_NOTICELOG("firmware activation requires conventional reset to be performed. !\n");
2701 			} else {
2702 				SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n");
2703 			}
2704 			return -ENXIO;
2705 		}
2706 	}
2707 
2708 	return spdk_nvme_ctrlr_reset(ctrlr);
2709 }
2710 
2711 void *
2712 spdk_nvme_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
2713 {
2714 	void *buf;
2715 
2716 	if (size == 0) {
2717 		return NULL;
2718 	}
2719 
2720 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2721 	buf = nvme_transport_ctrlr_alloc_cmb_io_buffer(ctrlr, size);
2722 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2723 
2724 	return buf;
2725 }
2726 
2727 void
2728 spdk_nvme_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
2729 {
2730 	if (buf && size) {
2731 		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2732 		nvme_transport_ctrlr_free_cmb_io_buffer(ctrlr, buf, size);
2733 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2734 	}
2735 }
2736 
2737 bool
2738 spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
2739 {
2740 	assert(ctrlr);
2741 
2742 	return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
2743 			strlen(SPDK_NVMF_DISCOVERY_NQN));
2744 }
2745 
2746 int
2747 spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
2748 				 uint16_t spsp, uint8_t nssf, void *payload, size_t size)
2749 {
2750 	struct nvme_completion_poll_status	status;
2751 	int					res;
2752 
2753 	res = nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
2754 					      nvme_completion_poll_cb, &status);
2755 	if (res) {
2756 		return res;
2757 	}
2758 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2759 		SPDK_ERRLOG("spdk_nvme_ctrlr_security_receive failed!\n");
2760 		return -ENXIO;
2761 	}
2762 
2763 	return 0;
2764 }
2765 
2766 int
2767 spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
2768 			      uint16_t spsp, uint8_t nssf, void *payload, size_t size)
2769 {
2770 	struct nvme_completion_poll_status	status;
2771 	int					res;
2772 
2773 	res = nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size, nvme_completion_poll_cb,
2774 					   &status);
2775 	if (res) {
2776 		return res;
2777 	}
2778 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2779 		SPDK_ERRLOG("spdk_nvme_ctrlr_security_send failed!\n");
2780 		return -ENXIO;
2781 	}
2782 
2783 	return 0;
2784 }
2785