xref: /spdk/lib/nvme/nvme_ctrlr.c (revision b78e763c1af2ace4c19d2932065a43357e3f5d3e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvme_internal.h"
37 
38 #include "spdk/env.h"
39 #include "spdk/string.h"
40 
41 static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
42 		struct nvme_async_event_request *aer);
43 static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
44 static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
45 
46 static int
47 nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
48 {
49 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
50 					      &cc->raw);
51 }
52 
53 static int
54 nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
55 {
56 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
57 					      &csts->raw);
58 }
59 
60 int
61 nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
62 {
63 	return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
64 					      &cap->raw);
65 }
66 
67 int
68 nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
69 {
70 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
71 					      &vs->raw);
72 }
73 
74 static int
75 nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
76 {
77 	return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
78 					      cc->raw);
79 }
80 
81 void
82 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
83 {
84 	char host_id_str[SPDK_UUID_STRING_LEN];
85 
86 	assert(opts);
87 
88 	memset(opts, 0, opts_size);
89 
90 #define FIELD_OK(field) \
91 	offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
92 
93 	if (FIELD_OK(num_io_queues)) {
94 		opts->num_io_queues = DEFAULT_MAX_IO_QUEUES;
95 	}
96 
97 	if (FIELD_OK(use_cmb_sqs)) {
98 		opts->use_cmb_sqs = true;
99 	}
100 
101 	if (FIELD_OK(arb_mechanism)) {
102 		opts->arb_mechanism = SPDK_NVME_CC_AMS_RR;
103 	}
104 
105 	if (FIELD_OK(keep_alive_timeout_ms)) {
106 		opts->keep_alive_timeout_ms = MIN_KEEP_ALIVE_TIMEOUT_IN_MS;
107 	}
108 
109 	if (FIELD_OK(io_queue_size)) {
110 		opts->io_queue_size = DEFAULT_IO_QUEUE_SIZE;
111 	}
112 
113 	if (FIELD_OK(io_queue_requests)) {
114 		opts->io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
115 	}
116 
117 	if (FIELD_OK(host_id)) {
118 		memset(opts->host_id, 0, sizeof(opts->host_id));
119 	}
120 
121 	if (nvme_driver_init() == 0) {
122 		if (FIELD_OK(extended_host_id)) {
123 			memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
124 			       sizeof(opts->extended_host_id));
125 		}
126 
127 		if (FIELD_OK(hostnqn)) {
128 			spdk_uuid_fmt_lower(host_id_str, sizeof(host_id_str),
129 					    &g_spdk_nvme_driver->default_extended_host_id);
130 			snprintf(opts->hostnqn, sizeof(opts->hostnqn), "2014-08.org.nvmexpress:uuid:%s", host_id_str);
131 		}
132 	}
133 
134 	if (FIELD_OK(src_addr)) {
135 		memset(opts->src_addr, 0, sizeof(opts->src_addr));
136 	}
137 
138 	if (FIELD_OK(src_svcid)) {
139 		memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
140 	}
141 
142 	if (FIELD_OK(command_set)) {
143 		opts->command_set = SPDK_NVME_CC_CSS_NVM;
144 	}
145 
146 	if (FIELD_OK(admin_timeout_ms)) {
147 		opts->admin_timeout_ms = NVME_MAX_TIMEOUT_PERIOD * 1000;
148 	}
149 
150 	if (FIELD_OK(header_digest)) {
151 		opts->header_digest = false;
152 	}
153 
154 	if (FIELD_OK(data_digest)) {
155 		opts->data_digest = false;
156 	}
157 #undef FIELD_OK
158 }
159 
160 /**
161  * This function will be called when the process allocates the IO qpair.
162  * Note: the ctrlr_lock must be held when calling this function.
163  */
164 static void
165 nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
166 {
167 	struct spdk_nvme_ctrlr_process	*active_proc;
168 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
169 
170 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
171 	if (active_proc) {
172 		TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
173 		qpair->active_proc = active_proc;
174 	}
175 }
176 
177 /**
178  * This function will be called when the process frees the IO qpair.
179  * Note: the ctrlr_lock must be held when calling this function.
180  */
181 static void
182 nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
183 {
184 	struct spdk_nvme_ctrlr_process	*active_proc;
185 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
186 	struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
187 
188 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
189 	if (!active_proc) {
190 		return;
191 	}
192 
193 	TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
194 			   per_process_tailq, tmp_qpair) {
195 		if (active_qpair == qpair) {
196 			TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
197 				     active_qpair, per_process_tailq);
198 
199 			break;
200 		}
201 	}
202 }
203 
204 void
205 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
206 		struct spdk_nvme_io_qpair_opts *opts,
207 		size_t opts_size)
208 {
209 	assert(ctrlr);
210 
211 	assert(opts);
212 
213 	memset(opts, 0, opts_size);
214 
215 #define FIELD_OK(field) \
216 	offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
217 
218 	if (FIELD_OK(qprio)) {
219 		opts->qprio = SPDK_NVME_QPRIO_URGENT;
220 	}
221 
222 	if (FIELD_OK(io_queue_size)) {
223 		opts->io_queue_size = ctrlr->opts.io_queue_size;
224 	}
225 
226 	if (FIELD_OK(io_queue_requests)) {
227 		opts->io_queue_requests = ctrlr->opts.io_queue_requests;
228 	}
229 
230 #undef FIELD_OK
231 }
232 
233 struct spdk_nvme_qpair *
234 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
235 			       const struct spdk_nvme_io_qpair_opts *user_opts,
236 			       size_t opts_size)
237 {
238 	uint32_t				qid;
239 	struct spdk_nvme_qpair			*qpair;
240 	union spdk_nvme_cc_register		cc;
241 	struct spdk_nvme_io_qpair_opts		opts;
242 
243 	if (!ctrlr) {
244 		return NULL;
245 	}
246 
247 	/*
248 	 * Get the default options, then overwrite them with the user-provided options
249 	 * up to opts_size.
250 	 *
251 	 * This allows for extensions of the opts structure without breaking
252 	 * ABI compatibility.
253 	 */
254 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
255 	if (user_opts) {
256 		memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
257 	}
258 
259 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
260 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
261 		SPDK_ERRLOG("get_cc failed\n");
262 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
263 		return NULL;
264 	}
265 
266 	/* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */
267 	if ((opts.qprio & 3) != opts.qprio) {
268 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
269 		return NULL;
270 	}
271 
272 	/*
273 	 * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
274 	 * default round robin arbitration method.
275 	 */
276 	if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts.qprio != SPDK_NVME_QPRIO_URGENT)) {
277 		SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
278 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
279 		return NULL;
280 	}
281 
282 	/*
283 	 * Get the first available I/O queue ID.
284 	 */
285 	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
286 	if (qid > ctrlr->opts.num_io_queues) {
287 		SPDK_ERRLOG("No free I/O queue IDs\n");
288 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
289 		return NULL;
290 	}
291 
292 	qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, &opts);
293 	if (qpair == NULL) {
294 		SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
295 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
296 		return NULL;
297 	}
298 	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
299 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
300 
301 	nvme_ctrlr_proc_add_io_qpair(qpair);
302 
303 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
304 
305 	if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
306 		spdk_delay_us(100);
307 	}
308 
309 	return qpair;
310 }
311 
312 int
313 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
314 {
315 	struct spdk_nvme_ctrlr *ctrlr;
316 
317 	if (qpair == NULL) {
318 		return 0;
319 	}
320 
321 	ctrlr = qpair->ctrlr;
322 
323 	if (qpair->in_completion_context) {
324 		/*
325 		 * There are many cases where it is convenient to delete an io qpair in the context
326 		 *  of that qpair's completion routine.  To handle this properly, set a flag here
327 		 *  so that the completion routine will perform an actual delete after the context
328 		 *  unwinds.
329 		 */
330 		qpair->delete_after_completion_context = 1;
331 		return 0;
332 	}
333 
334 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
335 
336 	nvme_ctrlr_proc_remove_io_qpair(qpair);
337 
338 	TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
339 	spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
340 
341 	if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
342 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
343 		return -1;
344 	}
345 
346 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
347 	return 0;
348 }
349 
350 static void
351 nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
352 		struct spdk_nvme_intel_log_page_directory *log_page_directory)
353 {
354 	if (log_page_directory == NULL) {
355 		return;
356 	}
357 
358 	if (ctrlr->cdata.vid != SPDK_PCI_VID_INTEL) {
359 		return;
360 	}
361 
362 	ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
363 
364 	if (log_page_directory->read_latency_log_len ||
365 	    (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
366 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
367 	}
368 	if (log_page_directory->write_latency_log_len ||
369 	    (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
370 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
371 	}
372 	if (log_page_directory->temperature_statistics_log_len) {
373 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
374 	}
375 	if (log_page_directory->smart_log_len) {
376 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
377 	}
378 	if (log_page_directory->marketing_description_log_len) {
379 		ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
380 	}
381 }
382 
383 static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
384 {
385 	int rc = 0;
386 	uint64_t phys_addr = 0;
387 	struct nvme_completion_poll_status	status;
388 	struct spdk_nvme_intel_log_page_directory *log_page_directory;
389 
390 	log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
391 					  64, &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
392 	if (log_page_directory == NULL) {
393 		SPDK_ERRLOG("could not allocate log_page_directory\n");
394 		return -ENXIO;
395 	}
396 
397 	rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
398 					      SPDK_NVME_GLOBAL_NS_TAG, log_page_directory,
399 					      sizeof(struct spdk_nvme_intel_log_page_directory),
400 					      0, nvme_completion_poll_cb, &status);
401 	if (rc != 0) {
402 		spdk_free(log_page_directory);
403 		return rc;
404 	}
405 
406 	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
407 		spdk_free(log_page_directory);
408 		SPDK_WARNLOG("Intel log pages not supported on Intel drive!\n");
409 		return 0;
410 	}
411 
412 	nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
413 	spdk_free(log_page_directory);
414 	return 0;
415 }
416 
417 static int
418 nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
419 {
420 	int	rc = 0;
421 
422 	memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
423 	/* Mandatory pages */
424 	ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
425 	ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
426 	ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
427 	if (ctrlr->cdata.lpa.celp) {
428 		ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
429 	}
430 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL && !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
431 		rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
432 	}
433 
434 	return rc;
435 }
436 
437 static void
438 nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
439 {
440 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
441 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
442 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
443 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
444 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
445 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
446 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
447 }
448 
449 static void
450 nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
451 {
452 	memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
453 	/* Mandatory features */
454 	ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
455 	ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
456 	ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
457 	ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
458 	ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
459 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
460 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
461 	ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
462 	ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
463 	/* Optional features */
464 	if (ctrlr->cdata.vwc.present) {
465 		ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
466 	}
467 	if (ctrlr->cdata.apsta.supported) {
468 		ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
469 	}
470 	if (ctrlr->cdata.hmpre) {
471 		ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
472 	}
473 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
474 		nvme_ctrlr_set_intel_supported_features(ctrlr);
475 	}
476 }
477 
478 void
479 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
480 {
481 	/*
482 	 * Set the flag here and leave the work failure of qpairs to
483 	 * spdk_nvme_qpair_process_completions().
484 	 */
485 	if (hot_remove) {
486 		ctrlr->is_removed = true;
487 	}
488 	ctrlr->is_failed = true;
489 	SPDK_ERRLOG("ctrlr %s in failed state.\n", ctrlr->trid.traddr);
490 }
491 
492 static void
493 nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr)
494 {
495 	union spdk_nvme_cc_register	cc;
496 	union spdk_nvme_csts_register	csts;
497 	uint32_t			ms_waited = 0;
498 	uint32_t			shutdown_timeout_ms;
499 
500 	if (ctrlr->is_removed) {
501 		return;
502 	}
503 
504 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
505 		SPDK_ERRLOG("get_cc() failed\n");
506 		return;
507 	}
508 
509 	cc.bits.shn = SPDK_NVME_SHN_NORMAL;
510 
511 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
512 		SPDK_ERRLOG("set_cc() failed\n");
513 		return;
514 	}
515 
516 	/*
517 	 * The NVMe specification defines RTD3E to be the time between
518 	 *  setting SHN = 1 until the controller will set SHST = 10b.
519 	 * If the device doesn't report RTD3 entry latency, or if it
520 	 *  reports RTD3 entry latency less than 10 seconds, pick
521 	 *  10 seconds as a reasonable amount of time to
522 	 *  wait before proceeding.
523 	 */
524 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
525 	shutdown_timeout_ms = (ctrlr->cdata.rtd3e + 999) / 1000;
526 	shutdown_timeout_ms = spdk_max(shutdown_timeout_ms, 10000);
527 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown timeout = %" PRIu32 " ms\n", shutdown_timeout_ms);
528 
529 	do {
530 		if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
531 			SPDK_ERRLOG("get_csts() failed\n");
532 			return;
533 		}
534 
535 		if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
536 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown complete in %u milliseconds\n",
537 				      ms_waited);
538 			ctrlr->is_shutdown = true;
539 			return;
540 		}
541 
542 		nvme_delay(1000);
543 		ms_waited++;
544 	} while (ms_waited < shutdown_timeout_ms);
545 
546 	SPDK_ERRLOG("did not shutdown within %u milliseconds\n", shutdown_timeout_ms);
547 }
548 
549 static int
550 nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
551 {
552 	union spdk_nvme_cc_register	cc;
553 	int				rc;
554 
555 	rc = nvme_transport_ctrlr_enable(ctrlr);
556 	if (rc != 0) {
557 		SPDK_ERRLOG("transport ctrlr_enable failed\n");
558 		return rc;
559 	}
560 
561 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
562 		SPDK_ERRLOG("get_cc() failed\n");
563 		return -EIO;
564 	}
565 
566 	if (cc.bits.en != 0) {
567 		SPDK_ERRLOG("%s called with CC.EN = 1\n", __func__);
568 		return -EINVAL;
569 	}
570 
571 	cc.bits.en = 1;
572 	cc.bits.css = 0;
573 	cc.bits.shn = 0;
574 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
575 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
576 
577 	/* Page size is 2 ^ (12 + mps). */
578 	cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
579 
580 	if (ctrlr->cap.bits.css == 0) {
581 		SPDK_INFOLOG(SPDK_LOG_NVME,
582 			     "Drive reports no command sets supported. Assuming NVM is supported.\n");
583 		ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
584 	}
585 
586 	if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
587 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Requested I/O command set %u but supported mask is 0x%x\n",
588 			      ctrlr->opts.command_set, ctrlr->cap.bits.css);
589 		return -EINVAL;
590 	}
591 
592 	cc.bits.css = ctrlr->opts.command_set;
593 
594 	switch (ctrlr->opts.arb_mechanism) {
595 	case SPDK_NVME_CC_AMS_RR:
596 		break;
597 	case SPDK_NVME_CC_AMS_WRR:
598 		if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
599 			break;
600 		}
601 		return -EINVAL;
602 	case SPDK_NVME_CC_AMS_VS:
603 		if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
604 			break;
605 		}
606 		return -EINVAL;
607 	default:
608 		return -EINVAL;
609 	}
610 
611 	cc.bits.ams = ctrlr->opts.arb_mechanism;
612 
613 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
614 		SPDK_ERRLOG("set_cc() failed\n");
615 		return -EIO;
616 	}
617 
618 	return 0;
619 }
620 
621 #ifdef DEBUG
622 static const char *
623 nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
624 {
625 	switch (state) {
626 	case NVME_CTRLR_STATE_INIT_DELAY:
627 		return "delay init";
628 	case NVME_CTRLR_STATE_INIT:
629 		return "init";
630 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
631 		return "disable and wait for CSTS.RDY = 1";
632 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
633 		return "disable and wait for CSTS.RDY = 0";
634 	case NVME_CTRLR_STATE_ENABLE:
635 		return "enable controller by writing CC.EN = 1";
636 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
637 		return "wait for CSTS.RDY = 1";
638 	case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE:
639 		return "enable admin queue";
640 	case NVME_CTRLR_STATE_IDENTIFY:
641 		return "identify controller";
642 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
643 		return "wait for identify controller";
644 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
645 		return "set number of queues";
646 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
647 		return "wait for set number of queues";
648 	case NVME_CTRLR_STATE_GET_NUM_QUEUES:
649 		return "get number of queues";
650 	case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES:
651 		return "wait for get number of queues";
652 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
653 		return "construct namespaces";
654 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
655 		return "identify active ns";
656 	case NVME_CTRLR_STATE_IDENTIFY_NS:
657 		return "identify ns";
658 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
659 		return "wait for identify ns";
660 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
661 		return "identify namespace id descriptors";
662 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
663 		return "wait for identify namespace id descriptors";
664 	case NVME_CTRLR_STATE_CONFIGURE_AER:
665 		return "configure AER";
666 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
667 		return "wait for configure aer";
668 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
669 		return "set supported log pages";
670 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
671 		return "set supported features";
672 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
673 		return "set doorbell buffer config";
674 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
675 		return "wait for doorbell buffer config";
676 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
677 		return "set keep alive timeout";
678 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
679 		return "wait for set keep alive timeout";
680 	case NVME_CTRLR_STATE_SET_HOST_ID:
681 		return "set host ID";
682 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
683 		return "wait for set host ID";
684 	case NVME_CTRLR_STATE_READY:
685 		return "ready";
686 	case NVME_CTRLR_STATE_ERROR:
687 		return "error";
688 	}
689 	return "unknown";
690 };
691 #endif /* DEBUG */
692 
693 static void
694 nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
695 		     uint64_t timeout_in_ms)
696 {
697 	ctrlr->state = state;
698 	if (timeout_in_ms == 0) {
699 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (no timeout)\n",
700 			      nvme_ctrlr_state_string(ctrlr->state));
701 		ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
702 	} else {
703 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (timeout %" PRIu64 " ms)\n",
704 			      nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms);
705 		ctrlr->state_timeout_tsc = spdk_get_ticks() + (timeout_in_ms * spdk_get_ticks_hz()) / 1000;
706 	}
707 }
708 
709 static void
710 nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
711 {
712 	if (ctrlr->shadow_doorbell) {
713 		spdk_dma_free(ctrlr->shadow_doorbell);
714 		ctrlr->shadow_doorbell = NULL;
715 	}
716 
717 	if (ctrlr->eventidx) {
718 		spdk_dma_free(ctrlr->eventidx);
719 		ctrlr->eventidx = NULL;
720 	}
721 }
722 
723 static void
724 nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
725 {
726 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
727 
728 	if (spdk_nvme_cpl_is_error(cpl)) {
729 		SPDK_WARNLOG("Doorbell buffer config failed\n");
730 	} else {
731 		SPDK_INFOLOG(SPDK_LOG_NVME, "NVMe controller: %s doorbell buffer config enabled\n",
732 			     ctrlr->trid.traddr);
733 	}
734 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
735 			     ctrlr->opts.admin_timeout_ms);
736 }
737 
738 static int
739 nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
740 {
741 	int rc = 0;
742 	uint64_t prp1, prp2;
743 
744 	if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
745 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
746 				     ctrlr->opts.admin_timeout_ms);
747 		return 0;
748 	}
749 
750 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
751 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
752 				     ctrlr->opts.admin_timeout_ms);
753 		return 0;
754 	}
755 
756 	/* only 1 page size for doorbell buffer */
757 	ctrlr->shadow_doorbell = spdk_dma_zmalloc(ctrlr->page_size, ctrlr->page_size,
758 				 &prp1);
759 	if (ctrlr->shadow_doorbell == NULL) {
760 		rc = -ENOMEM;
761 		goto error;
762 	}
763 
764 	ctrlr->eventidx = spdk_dma_zmalloc(ctrlr->page_size, ctrlr->page_size, &prp2);
765 	if (ctrlr->eventidx == NULL) {
766 		rc = -ENOMEM;
767 		goto error;
768 	}
769 
770 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
771 			     ctrlr->opts.admin_timeout_ms);
772 
773 	rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
774 			nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
775 	if (rc != 0) {
776 		goto error;
777 	}
778 
779 	return 0;
780 
781 error:
782 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
783 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
784 	return rc;
785 }
786 
787 int
788 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
789 {
790 	int rc = 0;
791 	struct spdk_nvme_qpair	*qpair;
792 	struct nvme_request	*req, *tmp;
793 
794 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
795 
796 	if (ctrlr->is_resetting || ctrlr->is_failed) {
797 		/*
798 		 * Controller is already resetting or has failed.  Return
799 		 *  immediately since there is no need to kick off another
800 		 *  reset in these cases.
801 		 */
802 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
803 		return 0;
804 	}
805 
806 	ctrlr->is_resetting = true;
807 
808 	SPDK_NOTICELOG("resetting controller\n");
809 
810 	/* Free all of the queued abort requests */
811 	STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
812 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
813 		nvme_free_request(req);
814 		ctrlr->outstanding_aborts--;
815 	}
816 
817 	/* Disable all queues before disabling the controller hardware. */
818 	nvme_qpair_disable(ctrlr->adminq);
819 	TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
820 		nvme_qpair_disable(qpair);
821 	}
822 
823 	/* Doorbell buffer config is invalid during reset */
824 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
825 
826 	/* Set the state back to INIT to cause a full hardware reset. */
827 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
828 
829 	while (ctrlr->state != NVME_CTRLR_STATE_READY) {
830 		if (nvme_ctrlr_process_init(ctrlr) != 0) {
831 			SPDK_ERRLOG("%s: controller reinitialization failed\n", __func__);
832 			nvme_ctrlr_fail(ctrlr, false);
833 			rc = -1;
834 			break;
835 		}
836 	}
837 
838 	if (!ctrlr->is_failed) {
839 		/* Reinitialize qpairs */
840 		TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
841 			if (nvme_transport_ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) {
842 				nvme_ctrlr_fail(ctrlr, false);
843 				rc = -1;
844 			}
845 		}
846 	}
847 
848 	ctrlr->is_resetting = false;
849 
850 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
851 
852 	return rc;
853 }
854 
855 static void
856 nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
857 {
858 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
859 
860 	if (spdk_nvme_cpl_is_error(cpl)) {
861 		SPDK_ERRLOG("nvme_identify_controller failed!\n");
862 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
863 		return;
864 	}
865 
866 	/*
867 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
868 	 *  controller supports.
869 	 */
870 	ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
871 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
872 	if (ctrlr->cdata.mdts > 0) {
873 		ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
874 						ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
875 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
876 	}
877 
878 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
879 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
880 		ctrlr->cntlid = ctrlr->cdata.cntlid;
881 	} else {
882 		/*
883 		 * Fabrics controllers should already have CNTLID from the Connect command.
884 		 *
885 		 * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
886 		 * trust the one from Connect.
887 		 */
888 		if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
889 			SPDK_DEBUGLOG(SPDK_LOG_NVME,
890 				      "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
891 				      ctrlr->cdata.cntlid, ctrlr->cntlid);
892 		}
893 	}
894 
895 	if (ctrlr->cdata.sgls.supported) {
896 		ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
897 		ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
898 	}
899 
900 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
901 			     ctrlr->opts.admin_timeout_ms);
902 }
903 
904 static int
905 nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
906 {
907 	int	rc;
908 
909 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
910 			     ctrlr->opts.admin_timeout_ms);
911 
912 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
913 				     &ctrlr->cdata, sizeof(ctrlr->cdata),
914 				     nvme_ctrlr_identify_done, ctrlr);
915 	if (rc != 0) {
916 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
917 		return rc;
918 	}
919 
920 	return 0;
921 }
922 
923 int
924 nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
925 {
926 	struct nvme_completion_poll_status	status;
927 	int					rc;
928 	uint32_t				i;
929 	uint32_t				num_pages;
930 	uint32_t				next_nsid = 0;
931 	uint32_t				*new_ns_list = NULL;
932 
933 	if (ctrlr->num_ns == 0) {
934 		spdk_dma_free(ctrlr->active_ns_list);
935 		ctrlr->active_ns_list = NULL;
936 
937 		return 0;
938 	}
939 
940 	/*
941 	 * The allocated size must be a multiple of sizeof(struct spdk_nvme_ns_list)
942 	 */
943 	num_pages = (ctrlr->num_ns * sizeof(new_ns_list[0]) - 1) / sizeof(struct spdk_nvme_ns_list) + 1;
944 	new_ns_list = spdk_dma_zmalloc(num_pages * sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
945 				       NULL);
946 	if (!new_ns_list) {
947 		SPDK_ERRLOG("Failed to allocate active_ns_list!\n");
948 		return -ENOMEM;
949 	}
950 
951 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 1, 0) && !(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
952 		/*
953 		 * Iterate through the pages and fetch each chunk of 1024 namespaces until
954 		 * there are no more active namespaces
955 		 */
956 		for (i = 0; i < num_pages; i++) {
957 			rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid,
958 						     &new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list),
959 						     nvme_completion_poll_cb, &status);
960 			if (rc != 0) {
961 				goto fail;
962 			}
963 			if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
964 				SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n");
965 				rc = -ENXIO;
966 				goto fail;
967 			}
968 			next_nsid = new_ns_list[1024 * i + 1023];
969 			if (next_nsid == 0) {
970 				/*
971 				 * No more active namespaces found, no need to fetch additional chunks
972 				 */
973 				break;
974 			}
975 		}
976 
977 	} else {
978 		/*
979 		 * Controller doesn't support active ns list CNS 0x02 so dummy up
980 		 * an active ns list
981 		 */
982 		for (i = 0; i < ctrlr->num_ns; i++) {
983 			new_ns_list[i] = i + 1;
984 		}
985 	}
986 
987 	/*
988 	 * Now that that the list is properly setup, we can swap it in to the ctrlr and
989 	 * free up the previous one.
990 	 */
991 	spdk_dma_free(ctrlr->active_ns_list);
992 	ctrlr->active_ns_list = new_ns_list;
993 
994 	return 0;
995 fail:
996 	spdk_dma_free(new_ns_list);
997 	return rc;
998 }
999 
1000 static void
1001 nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1002 {
1003 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1004 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1005 	uint32_t nsid;
1006 	int rc;
1007 
1008 	if (spdk_nvme_cpl_is_error(cpl)) {
1009 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1010 		return;
1011 	} else {
1012 		nvme_ns_set_identify_data(ns);
1013 	}
1014 
1015 	/* move on to the next active NS */
1016 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1017 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1018 	if (ns == NULL) {
1019 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
1020 				     ctrlr->opts.admin_timeout_ms);
1021 		return;
1022 	}
1023 	ns->ctrlr = ctrlr;
1024 	ns->id = nsid;
1025 
1026 	rc = nvme_ctrlr_identify_ns_async(ns);
1027 	if (rc) {
1028 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1029 	}
1030 }
1031 
1032 static int
1033 nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
1034 {
1035 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1036 	struct spdk_nvme_ns_data *nsdata;
1037 
1038 	nsdata = &ctrlr->nsdata[ns->id - 1];
1039 
1040 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
1041 			     ctrlr->opts.admin_timeout_ms);
1042 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
1043 				       nsdata, sizeof(*nsdata),
1044 				       nvme_ctrlr_identify_ns_async_done, ns);
1045 }
1046 
1047 static int
1048 nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1049 {
1050 	uint32_t nsid;
1051 	struct spdk_nvme_ns *ns;
1052 	int rc;
1053 
1054 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1055 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1056 	if (ns == NULL) {
1057 		/* No active NS, move on to the next state */
1058 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1059 				     ctrlr->opts.admin_timeout_ms);
1060 		return 0;
1061 	}
1062 
1063 	ns->ctrlr = ctrlr;
1064 	ns->id = nsid;
1065 
1066 	rc = nvme_ctrlr_identify_ns_async(ns);
1067 	if (rc) {
1068 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1069 	}
1070 
1071 	return rc;
1072 }
1073 
1074 static void
1075 nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1076 {
1077 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1078 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1079 	uint32_t nsid;
1080 	int rc;
1081 
1082 	if (spdk_nvme_cpl_is_error(cpl)) {
1083 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1084 				     ctrlr->opts.admin_timeout_ms);
1085 		return;
1086 	}
1087 
1088 	/* move on to the next active NS */
1089 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1090 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1091 	if (ns == NULL) {
1092 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1093 				     ctrlr->opts.admin_timeout_ms);
1094 		return;
1095 	}
1096 
1097 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1098 	if (rc) {
1099 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1100 	}
1101 }
1102 
1103 static int
1104 nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
1105 {
1106 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1107 
1108 	memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
1109 
1110 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
1111 			     ctrlr->opts.admin_timeout_ms);
1112 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
1113 				       0, ns->id, ns->id_desc_list, sizeof(ns->id_desc_list),
1114 				       nvme_ctrlr_identify_id_desc_async_done, ns);
1115 }
1116 
1117 static int
1118 nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1119 {
1120 	uint32_t nsid;
1121 	struct spdk_nvme_ns *ns;
1122 	int rc;
1123 
1124 	if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) ||
1125 	    (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1126 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
1127 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1128 				     ctrlr->opts.admin_timeout_ms);
1129 		return 0;
1130 	}
1131 
1132 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1133 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1134 	if (ns == NULL) {
1135 		/* No active NS, move on to the next state */
1136 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1137 				     ctrlr->opts.admin_timeout_ms);
1138 		return 0;
1139 	}
1140 
1141 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1142 	if (rc) {
1143 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1144 	}
1145 
1146 	return rc;
1147 }
1148 
1149 static void
1150 nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1151 {
1152 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1153 
1154 	if (spdk_nvme_cpl_is_error(cpl)) {
1155 		SPDK_ERRLOG("Set Features - Number of Queues failed!\n");
1156 	}
1157 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_NUM_QUEUES,
1158 			     ctrlr->opts.admin_timeout_ms);
1159 }
1160 
1161 static int
1162 nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1163 {
1164 	int rc;
1165 
1166 	if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
1167 		SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n",
1168 			       ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
1169 		ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
1170 	} else if (ctrlr->opts.num_io_queues < 1) {
1171 		SPDK_NOTICELOG("Requested num_io_queues 0, increasing to 1\n");
1172 		ctrlr->opts.num_io_queues = 1;
1173 	}
1174 
1175 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
1176 			     ctrlr->opts.admin_timeout_ms);
1177 
1178 	rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
1179 					   nvme_ctrlr_set_num_queues_done, ctrlr);
1180 	if (rc != 0) {
1181 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1182 		return rc;
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 static void
1189 nvme_ctrlr_get_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1190 {
1191 	uint32_t cq_allocated, sq_allocated, min_allocated, i;
1192 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1193 
1194 	if (spdk_nvme_cpl_is_error(cpl)) {
1195 		SPDK_ERRLOG("Get Features - Number of Queues failed!\n");
1196 		ctrlr->opts.num_io_queues = 0;
1197 	} else {
1198 		/*
1199 		 * Data in cdw0 is 0-based.
1200 		 * Lower 16-bits indicate number of submission queues allocated.
1201 		 * Upper 16-bits indicate number of completion queues allocated.
1202 		 */
1203 		sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
1204 		cq_allocated = (cpl->cdw0 >> 16) + 1;
1205 
1206 		/*
1207 		 * For 1:1 queue mapping, set number of allocated queues to be minimum of
1208 		 * submission and completion queues.
1209 		 */
1210 		min_allocated = spdk_min(sq_allocated, cq_allocated);
1211 
1212 		/* Set number of queues to be minimum of requested and actually allocated. */
1213 		ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
1214 	}
1215 
1216 	ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
1217 	if (ctrlr->free_io_qids == NULL) {
1218 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1219 		return;
1220 	}
1221 
1222 	/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */
1223 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1224 	for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
1225 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1226 	}
1227 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS,
1228 			     ctrlr->opts.admin_timeout_ms);
1229 }
1230 
1231 static int
1232 nvme_ctrlr_get_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1233 {
1234 	int rc;
1235 
1236 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES,
1237 			     ctrlr->opts.admin_timeout_ms);
1238 
1239 	/* Obtain the number of queues allocated using Get Features. */
1240 	rc = nvme_ctrlr_cmd_get_num_queues(ctrlr, nvme_ctrlr_get_num_queues_done, ctrlr);
1241 	if (rc != 0) {
1242 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1243 		return rc;
1244 	}
1245 
1246 	return 0;
1247 }
1248 
1249 static void
1250 nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
1251 {
1252 	uint32_t keep_alive_interval_ms;
1253 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1254 
1255 	if (spdk_nvme_cpl_is_error(cpl)) {
1256 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n",
1257 			    cpl->status.sc, cpl->status.sct);
1258 		ctrlr->opts.keep_alive_timeout_ms = 0;
1259 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1260 		return;
1261 	}
1262 
1263 	if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
1264 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller adjusted keep alive timeout to %u ms\n",
1265 			      cpl->cdw0);
1266 	}
1267 
1268 	ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
1269 
1270 	keep_alive_interval_ms = ctrlr->opts.keep_alive_timeout_ms / 2;
1271 	if (keep_alive_interval_ms == 0) {
1272 		keep_alive_interval_ms = 1;
1273 	}
1274 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Sending keep alive every %u ms\n", keep_alive_interval_ms);
1275 
1276 	ctrlr->keep_alive_interval_ticks = (keep_alive_interval_ms * spdk_get_ticks_hz()) / UINT64_C(1000);
1277 
1278 	/* Schedule the first Keep Alive to be sent as soon as possible. */
1279 	ctrlr->next_keep_alive_tick = spdk_get_ticks();
1280 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1281 			     ctrlr->opts.admin_timeout_ms);
1282 }
1283 
1284 static int
1285 nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
1286 {
1287 	int rc;
1288 
1289 	if (ctrlr->opts.keep_alive_timeout_ms == 0) {
1290 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1291 				     ctrlr->opts.admin_timeout_ms);
1292 		return 0;
1293 	}
1294 
1295 	if (ctrlr->cdata.kas == 0) {
1296 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller KAS is 0 - not enabling Keep Alive\n");
1297 		ctrlr->opts.keep_alive_timeout_ms = 0;
1298 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1299 				     ctrlr->opts.admin_timeout_ms);
1300 		return 0;
1301 	}
1302 
1303 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
1304 			     ctrlr->opts.admin_timeout_ms);
1305 
1306 	/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
1307 	rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
1308 					     nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
1309 	if (rc != 0) {
1310 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: %d\n", rc);
1311 		ctrlr->opts.keep_alive_timeout_ms = 0;
1312 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1313 		return rc;
1314 	}
1315 
1316 	return 0;
1317 }
1318 
1319 static void
1320 nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
1321 {
1322 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1323 
1324 	if (spdk_nvme_cpl_is_error(cpl)) {
1325 		/*
1326 		 * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
1327 		 * is optional.
1328 		 */
1329 		SPDK_WARNLOG("Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
1330 			     cpl->status.sc, cpl->status.sct);
1331 	} else {
1332 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Set Features - Host ID was successful\n");
1333 	}
1334 
1335 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1336 }
1337 
1338 static int
1339 nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
1340 {
1341 	uint8_t *host_id;
1342 	uint32_t host_id_size;
1343 	int rc;
1344 
1345 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
1346 		/*
1347 		 * NVMe-oF sends the host ID during Connect and doesn't allow
1348 		 * Set Features - Host Identifier after Connect, so we don't need to do anything here.
1349 		 */
1350 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "NVMe-oF transport - not sending Set Features - Host ID\n");
1351 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1352 		return 0;
1353 	}
1354 
1355 	if (ctrlr->cdata.ctratt.host_id_exhid_supported) {
1356 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 128-bit extended host identifier\n");
1357 		host_id = ctrlr->opts.extended_host_id;
1358 		host_id_size = sizeof(ctrlr->opts.extended_host_id);
1359 	} else {
1360 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 64-bit host identifier\n");
1361 		host_id = ctrlr->opts.host_id;
1362 		host_id_size = sizeof(ctrlr->opts.host_id);
1363 	}
1364 
1365 	/* If the user specified an all-zeroes host identifier, don't send the command. */
1366 	if (spdk_mem_all_zero(host_id, host_id_size)) {
1367 		SPDK_DEBUGLOG(SPDK_LOG_NVME,
1368 			      "User did not specify host ID - not sending Set Features - Host ID\n");
1369 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1370 		return 0;
1371 	}
1372 
1373 	SPDK_LOGDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size);
1374 
1375 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
1376 			     ctrlr->opts.admin_timeout_ms);
1377 
1378 	rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
1379 	if (rc != 0) {
1380 		SPDK_ERRLOG("Set Features - Host ID failed: %d\n", rc);
1381 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1382 		return rc;
1383 	}
1384 
1385 	return 0;
1386 }
1387 
1388 static void
1389 nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1390 {
1391 	if (ctrlr->ns) {
1392 		uint32_t i, num_ns = ctrlr->num_ns;
1393 
1394 		for (i = 0; i < num_ns; i++) {
1395 			nvme_ns_destruct(&ctrlr->ns[i]);
1396 		}
1397 
1398 		spdk_free(ctrlr->ns);
1399 		ctrlr->ns = NULL;
1400 		ctrlr->num_ns = 0;
1401 	}
1402 
1403 	if (ctrlr->nsdata) {
1404 		spdk_free(ctrlr->nsdata);
1405 		ctrlr->nsdata = NULL;
1406 	}
1407 
1408 	spdk_dma_free(ctrlr->active_ns_list);
1409 	ctrlr->active_ns_list = NULL;
1410 }
1411 
1412 static void
1413 nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1414 {
1415 	uint32_t i, nn = ctrlr->cdata.nn;
1416 	struct spdk_nvme_ns_data *nsdata;
1417 
1418 	for (i = 0; i < nn; i++) {
1419 		struct spdk_nvme_ns	*ns = &ctrlr->ns[i];
1420 		uint32_t		nsid = i + 1;
1421 		nsdata			= &ctrlr->nsdata[nsid - 1];
1422 
1423 		if ((nsdata->ncap == 0) && spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
1424 			if (nvme_ns_construct(ns, nsid, ctrlr) != 0) {
1425 				continue;
1426 			}
1427 		}
1428 
1429 		if (nsdata->ncap && !spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
1430 			nvme_ns_destruct(ns);
1431 		}
1432 	}
1433 }
1434 
1435 static int
1436 nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1437 {
1438 	int rc = 0;
1439 	uint32_t nn = ctrlr->cdata.nn;
1440 	uint64_t phys_addr = 0;
1441 
1442 	/* ctrlr->num_ns may be 0 (startup) or a different number of namespaces (reset),
1443 	 * so check if we need to reallocate.
1444 	 */
1445 	if (nn != ctrlr->num_ns) {
1446 		nvme_ctrlr_destruct_namespaces(ctrlr);
1447 
1448 		if (nn == 0) {
1449 			SPDK_WARNLOG("controller has 0 namespaces\n");
1450 			return 0;
1451 		}
1452 
1453 		ctrlr->ns = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64,
1454 					 &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
1455 		if (ctrlr->ns == NULL) {
1456 			rc = -ENOMEM;
1457 			goto fail;
1458 		}
1459 
1460 		ctrlr->nsdata = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64,
1461 					     &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
1462 		if (ctrlr->nsdata == NULL) {
1463 			rc = -ENOMEM;
1464 			goto fail;
1465 		}
1466 
1467 		ctrlr->num_ns = nn;
1468 	}
1469 
1470 	return 0;
1471 
1472 fail:
1473 	nvme_ctrlr_destruct_namespaces(ctrlr);
1474 	return rc;
1475 }
1476 
1477 static void
1478 nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
1479 {
1480 	struct nvme_async_event_request	*aer = arg;
1481 	struct spdk_nvme_ctrlr		*ctrlr = aer->ctrlr;
1482 	struct spdk_nvme_ctrlr_process	*active_proc;
1483 	union spdk_nvme_async_event_completion	event;
1484 	int					rc;
1485 
1486 	if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
1487 	    cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
1488 		/*
1489 		 *  This is simulated when controller is being shut down, to
1490 		 *  effectively abort outstanding asynchronous event requests
1491 		 *  and make sure all memory is freed.  Do not repost the
1492 		 *  request in this case.
1493 		 */
1494 		return;
1495 	}
1496 
1497 	if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
1498 	    cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
1499 		/*
1500 		 *  SPDK will only send as many AERs as the device says it supports,
1501 		 *  so this status code indicates an out-of-spec device.  Do not repost
1502 		 *  the request in this case.
1503 		 */
1504 		SPDK_ERRLOG("Controller appears out-of-spec for asynchronous event request\n"
1505 			    "handling.  Do not repost this AER.\n");
1506 		return;
1507 	}
1508 
1509 	event.raw = cpl->cdw0;
1510 	if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
1511 	    (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
1512 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
1513 		if (rc) {
1514 			return;
1515 		}
1516 		nvme_ctrlr_update_namespaces(ctrlr);
1517 	}
1518 
1519 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1520 	if (active_proc && active_proc->aer_cb_fn) {
1521 		active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
1522 	}
1523 
1524 	/* If the ctrlr is already shutdown, we should not send aer again */
1525 	if (ctrlr->is_shutdown) {
1526 		return;
1527 	}
1528 
1529 	/*
1530 	 * Repost another asynchronous event request to replace the one
1531 	 *  that just completed.
1532 	 */
1533 	if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
1534 		/*
1535 		 * We can't do anything to recover from a failure here,
1536 		 * so just print a warning message and leave the AER unsubmitted.
1537 		 */
1538 		SPDK_ERRLOG("resubmitting AER failed!\n");
1539 	}
1540 }
1541 
1542 static int
1543 nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
1544 				    struct nvme_async_event_request *aer)
1545 {
1546 	struct nvme_request *req;
1547 
1548 	aer->ctrlr = ctrlr;
1549 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
1550 	aer->req = req;
1551 	if (req == NULL) {
1552 		return -1;
1553 	}
1554 
1555 	req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1556 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
1557 }
1558 
1559 static void
1560 nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
1561 {
1562 	struct nvme_async_event_request		*aer;
1563 	int					rc;
1564 	uint32_t				i;
1565 	struct spdk_nvme_ctrlr *ctrlr =	(struct spdk_nvme_ctrlr *)arg;
1566 
1567 	if (spdk_nvme_cpl_is_error(cpl)) {
1568 		SPDK_NOTICELOG("nvme_ctrlr_configure_aer failed!\n");
1569 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
1570 				     ctrlr->opts.admin_timeout_ms);
1571 		return;
1572 	}
1573 
1574 	/* aerl is a zero-based value, so we need to add 1 here. */
1575 	ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
1576 
1577 	for (i = 0; i < ctrlr->num_aers; i++) {
1578 		aer = &ctrlr->aer[i];
1579 		rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
1580 		if (rc) {
1581 			SPDK_ERRLOG("nvme_ctrlr_construct_and_submit_aer failed!\n");
1582 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1583 			return;
1584 		}
1585 	}
1586 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
1587 			     ctrlr->opts.admin_timeout_ms);
1588 }
1589 
1590 static int
1591 nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
1592 {
1593 	union spdk_nvme_feat_async_event_configuration	config;
1594 	int						rc;
1595 
1596 	config.raw = 0;
1597 	config.bits.crit_warn.bits.available_spare = 1;
1598 	config.bits.crit_warn.bits.temperature = 1;
1599 	config.bits.crit_warn.bits.device_reliability = 1;
1600 	config.bits.crit_warn.bits.read_only = 1;
1601 	config.bits.crit_warn.bits.volatile_memory_backup = 1;
1602 
1603 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
1604 		if (ctrlr->cdata.oaes.ns_attribute_notices) {
1605 			config.bits.ns_attr_notice = 1;
1606 		}
1607 		if (ctrlr->cdata.oaes.fw_activation_notices) {
1608 			config.bits.fw_activation_notice = 1;
1609 		}
1610 	}
1611 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
1612 		config.bits.telemetry_log_notice = 1;
1613 	}
1614 
1615 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
1616 			     ctrlr->opts.admin_timeout_ms);
1617 
1618 	rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
1619 			nvme_ctrlr_configure_aer_done,
1620 			ctrlr);
1621 	if (rc != 0) {
1622 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1623 		return rc;
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 struct spdk_nvme_ctrlr_process *
1630 spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
1631 {
1632 	struct spdk_nvme_ctrlr_process	*active_proc;
1633 
1634 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
1635 		if (active_proc->pid == pid) {
1636 			return active_proc;
1637 		}
1638 	}
1639 
1640 	return NULL;
1641 }
1642 
1643 struct spdk_nvme_ctrlr_process *
1644 spdk_nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
1645 {
1646 	return spdk_nvme_ctrlr_get_process(ctrlr, getpid());
1647 }
1648 
1649 /**
1650  * This function will be called when a process is using the controller.
1651  *  1. For the primary process, it is called when constructing the controller.
1652  *  2. For the secondary process, it is called at probing the controller.
1653  * Note: will check whether the process is already added for the same process.
1654  */
1655 int
1656 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
1657 {
1658 	struct spdk_nvme_ctrlr_process	*ctrlr_proc;
1659 	pid_t				pid = getpid();
1660 
1661 	/* Check whether the process is already added or not */
1662 	if (spdk_nvme_ctrlr_get_process(ctrlr, pid)) {
1663 		return 0;
1664 	}
1665 
1666 	/* Initialize the per process properties for this ctrlr */
1667 	ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
1668 				  64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
1669 	if (ctrlr_proc == NULL) {
1670 		SPDK_ERRLOG("failed to allocate memory to track the process props\n");
1671 
1672 		return -1;
1673 	}
1674 
1675 	ctrlr_proc->is_primary = spdk_process_is_primary();
1676 	ctrlr_proc->pid = pid;
1677 	STAILQ_INIT(&ctrlr_proc->active_reqs);
1678 	ctrlr_proc->devhandle = devhandle;
1679 	ctrlr_proc->ref = 0;
1680 	TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
1681 
1682 	TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
1683 
1684 	return 0;
1685 }
1686 
1687 /**
1688  * This function will be called when the process detaches the controller.
1689  * Note: the ctrlr_lock must be held when calling this function.
1690  */
1691 static void
1692 nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
1693 			  struct spdk_nvme_ctrlr_process *proc)
1694 {
1695 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
1696 
1697 	assert(STAILQ_EMPTY(&proc->active_reqs));
1698 
1699 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
1700 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1701 	}
1702 
1703 	TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
1704 
1705 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
1706 		spdk_pci_device_detach(proc->devhandle);
1707 	}
1708 
1709 	spdk_dma_free(proc);
1710 }
1711 
1712 /**
1713  * This function will be called when the process exited unexpectedly
1714  *  in order to free any incomplete nvme request, allocated IO qpairs
1715  *  and allocated memory.
1716  * Note: the ctrlr_lock must be held when calling this function.
1717  */
1718 static void
1719 nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
1720 {
1721 	struct nvme_request	*req, *tmp_req;
1722 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
1723 
1724 	STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
1725 		STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
1726 
1727 		assert(req->pid == proc->pid);
1728 
1729 		nvme_free_request(req);
1730 	}
1731 
1732 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
1733 		TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
1734 
1735 		/*
1736 		 * The process may have been killed while some qpairs were in their
1737 		 *  completion context.  Clear that flag here to allow these IO
1738 		 *  qpairs to be deleted.
1739 		 */
1740 		qpair->in_completion_context = 0;
1741 
1742 		qpair->no_deletion_notification_needed = 1;
1743 
1744 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1745 	}
1746 
1747 	spdk_dma_free(proc);
1748 }
1749 
1750 /**
1751  * This function will be called when destructing the controller.
1752  *  1. There is no more admin request on this controller.
1753  *  2. Clean up any left resource allocation when its associated process is gone.
1754  */
1755 void
1756 nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
1757 {
1758 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
1759 
1760 	/* Free all the processes' properties and make sure no pending admin IOs */
1761 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
1762 		TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
1763 
1764 		assert(STAILQ_EMPTY(&active_proc->active_reqs));
1765 
1766 		spdk_free(active_proc);
1767 	}
1768 }
1769 
1770 /**
1771  * This function will be called when any other process attaches or
1772  *  detaches the controller in order to cleanup those unexpectedly
1773  *  terminated processes.
1774  * Note: the ctrlr_lock must be held when calling this function.
1775  */
1776 static int
1777 nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
1778 {
1779 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
1780 	int				active_proc_count = 0;
1781 
1782 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
1783 		if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
1784 			SPDK_ERRLOG("process %d terminated unexpected\n", active_proc->pid);
1785 
1786 			TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
1787 
1788 			nvme_ctrlr_cleanup_process(active_proc);
1789 		} else {
1790 			active_proc_count++;
1791 		}
1792 	}
1793 
1794 	return active_proc_count;
1795 }
1796 
1797 void
1798 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
1799 {
1800 	struct spdk_nvme_ctrlr_process	*active_proc;
1801 
1802 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1803 
1804 	nvme_ctrlr_remove_inactive_proc(ctrlr);
1805 
1806 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1807 	if (active_proc) {
1808 		active_proc->ref++;
1809 	}
1810 
1811 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1812 }
1813 
1814 void
1815 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
1816 {
1817 	struct spdk_nvme_ctrlr_process	*active_proc;
1818 	int				proc_count;
1819 
1820 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1821 
1822 	proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
1823 
1824 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1825 	if (active_proc) {
1826 		active_proc->ref--;
1827 		assert(active_proc->ref >= 0);
1828 
1829 		/*
1830 		 * The last active process will be removed at the end of
1831 		 * the destruction of the controller.
1832 		 */
1833 		if (active_proc->ref == 0 && proc_count != 1) {
1834 			nvme_ctrlr_remove_process(ctrlr, active_proc);
1835 		}
1836 	}
1837 
1838 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1839 }
1840 
1841 int
1842 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
1843 {
1844 	struct spdk_nvme_ctrlr_process	*active_proc;
1845 	int				ref = 0;
1846 
1847 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1848 
1849 	nvme_ctrlr_remove_inactive_proc(ctrlr);
1850 
1851 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
1852 		ref += active_proc->ref;
1853 	}
1854 
1855 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1856 
1857 	return ref;
1858 }
1859 
1860 /**
1861  *  Get the PCI device handle which is only visible to its associated process.
1862  */
1863 struct spdk_pci_device *
1864 nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
1865 {
1866 	struct spdk_nvme_ctrlr_process	*active_proc;
1867 	struct spdk_pci_device		*devhandle = NULL;
1868 
1869 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1870 
1871 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1872 	if (active_proc) {
1873 		devhandle = active_proc->devhandle;
1874 	}
1875 
1876 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1877 
1878 	return devhandle;
1879 }
1880 
1881 static void
1882 nvme_ctrlr_enable_admin_queue(struct spdk_nvme_ctrlr *ctrlr)
1883 {
1884 	nvme_transport_qpair_reset(ctrlr->adminq);
1885 	nvme_qpair_enable(ctrlr->adminq);
1886 }
1887 
1888 /**
1889  * This function will be called repeatedly during initialization until the controller is ready.
1890  */
1891 int
1892 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
1893 {
1894 	union spdk_nvme_cc_register cc;
1895 	union spdk_nvme_csts_register csts;
1896 	uint32_t ready_timeout_in_ms;
1897 	int rc = 0;
1898 
1899 	/*
1900 	 * May need to avoid accessing any register on the target controller
1901 	 * for a while. Return early without touching the FSM.
1902 	 * Check sleep_timeout_tsc > 0 for unit test.
1903 	 */
1904 	if ((ctrlr->sleep_timeout_tsc > 0) &&
1905 	    (spdk_get_ticks() <= ctrlr->sleep_timeout_tsc)) {
1906 		return 0;
1907 	}
1908 	ctrlr->sleep_timeout_tsc = 0;
1909 
1910 	if (nvme_ctrlr_get_cc(ctrlr, &cc) ||
1911 	    nvme_ctrlr_get_csts(ctrlr, &csts)) {
1912 		if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
1913 			/* While a device is resetting, it may be unable to service MMIO reads
1914 			 * temporarily. Allow for this case.
1915 			 */
1916 			SPDK_ERRLOG("Get registers failed while waiting for CSTS.RDY == 0\n");
1917 			goto init_timeout;
1918 		}
1919 		SPDK_ERRLOG("Failed to read CC and CSTS in state %d\n", ctrlr->state);
1920 		nvme_ctrlr_fail(ctrlr, false);
1921 		return -EIO;
1922 	}
1923 
1924 	ready_timeout_in_ms = 500 * ctrlr->cap.bits.to;
1925 
1926 	/*
1927 	 * Check if the current initialization step is done or has timed out.
1928 	 */
1929 	switch (ctrlr->state) {
1930 	case NVME_CTRLR_STATE_INIT_DELAY:
1931 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
1932 		/*
1933 		 * Controller may need some delay before it's enabled.
1934 		 *
1935 		 * This is a workaround for an issue where the PCIe-attached NVMe controller
1936 		 * is not ready after VFIO reset. We delay the initialization rather than the
1937 		 * enabling itself, because this is required only for the very first enabling
1938 		 * - directly after a VFIO reset.
1939 		 *
1940 		 * TODO: Figure out what is actually going wrong.
1941 		 */
1942 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Adding 2 second delay before initializing the controller\n");
1943 		ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2000 * spdk_get_ticks_hz() / 1000);
1944 		break;
1945 
1946 	case NVME_CTRLR_STATE_INIT:
1947 		/* Begin the hardware initialization by making sure the controller is disabled. */
1948 		if (cc.bits.en) {
1949 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1\n");
1950 			/*
1951 			 * Controller is currently enabled. We need to disable it to cause a reset.
1952 			 *
1953 			 * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
1954 			 *  Wait for the ready bit to be 1 before disabling the controller.
1955 			 */
1956 			if (csts.bits.rdy == 0) {
1957 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
1958 				nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
1959 				return 0;
1960 			}
1961 
1962 			/* CC.EN = 1 && CSTS.RDY == 1, so we can immediately disable the controller. */
1963 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
1964 			cc.bits.en = 0;
1965 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
1966 				SPDK_ERRLOG("set_cc() failed\n");
1967 				nvme_ctrlr_fail(ctrlr, false);
1968 				return -EIO;
1969 			}
1970 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
1971 
1972 			/*
1973 			 * Wait 2.5 seconds before accessing PCI registers.
1974 			 * Not using sleep() to avoid blocking other controller's initialization.
1975 			 */
1976 			if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
1977 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "Applying quirk: delay 2.5 seconds before reading registers\n");
1978 				ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
1979 			}
1980 			return 0;
1981 		} else {
1982 			if (csts.bits.rdy == 1) {
1983 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 1 - waiting for shutdown to complete\n");
1984 			}
1985 
1986 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
1987 			return 0;
1988 		}
1989 		break;
1990 
1991 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
1992 		if (csts.bits.rdy == 1) {
1993 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - disabling controller\n");
1994 			/* CC.EN = 1 && CSTS.RDY = 1, so we can set CC.EN = 0 now. */
1995 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
1996 			cc.bits.en = 0;
1997 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
1998 				SPDK_ERRLOG("set_cc() failed\n");
1999 				nvme_ctrlr_fail(ctrlr, false);
2000 				return -EIO;
2001 			}
2002 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2003 			return 0;
2004 		}
2005 		break;
2006 
2007 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
2008 		if (csts.bits.rdy == 0) {
2009 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 0\n");
2010 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
2011 			/*
2012 			 * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
2013 			 *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
2014 			 */
2015 			spdk_delay_us(100);
2016 			return 0;
2017 		}
2018 		break;
2019 
2020 	case NVME_CTRLR_STATE_ENABLE:
2021 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 1\n");
2022 		rc = nvme_ctrlr_enable(ctrlr);
2023 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
2024 		return rc;
2025 
2026 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
2027 		if (csts.bits.rdy == 1) {
2028 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
2029 			/*
2030 			 * The controller has been enabled.
2031 			 *  Perform the rest of initialization serially.
2032 			 */
2033 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE,
2034 					     ctrlr->opts.admin_timeout_ms);
2035 			return 0;
2036 		}
2037 		break;
2038 
2039 	case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE:
2040 		nvme_ctrlr_enable_admin_queue(ctrlr);
2041 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY,
2042 				     ctrlr->opts.admin_timeout_ms);
2043 		break;
2044 
2045 	case NVME_CTRLR_STATE_IDENTIFY:
2046 		rc = nvme_ctrlr_identify(ctrlr);
2047 		break;
2048 
2049 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
2050 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2051 		break;
2052 
2053 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
2054 		rc = nvme_ctrlr_set_num_queues(ctrlr);
2055 		break;
2056 
2057 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
2058 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2059 		break;
2060 
2061 	case NVME_CTRLR_STATE_GET_NUM_QUEUES:
2062 		rc = nvme_ctrlr_get_num_queues(ctrlr);
2063 		break;
2064 
2065 	case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES:
2066 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2067 		break;
2068 
2069 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
2070 		rc = nvme_ctrlr_construct_namespaces(ctrlr);
2071 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
2072 				     ctrlr->opts.admin_timeout_ms);
2073 		break;
2074 
2075 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
2076 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
2077 		if (rc < 0) {
2078 			nvme_ctrlr_destruct_namespaces(ctrlr);
2079 		}
2080 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS,
2081 				     ctrlr->opts.admin_timeout_ms);
2082 		break;
2083 
2084 	case NVME_CTRLR_STATE_IDENTIFY_NS:
2085 		rc = nvme_ctrlr_identify_namespaces(ctrlr);
2086 		break;
2087 
2088 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
2089 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2090 		break;
2091 
2092 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
2093 		rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
2094 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
2095 				     ctrlr->opts.admin_timeout_ms);
2096 		break;
2097 
2098 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
2099 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2100 		break;
2101 
2102 	case NVME_CTRLR_STATE_CONFIGURE_AER:
2103 		rc = nvme_ctrlr_configure_aer(ctrlr);
2104 		break;
2105 
2106 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
2107 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2108 		break;
2109 
2110 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
2111 		rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
2112 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
2113 				     ctrlr->opts.admin_timeout_ms);
2114 		break;
2115 
2116 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
2117 		nvme_ctrlr_set_supported_features(ctrlr);
2118 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
2119 				     ctrlr->opts.admin_timeout_ms);
2120 		break;
2121 
2122 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
2123 		rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
2124 		break;
2125 
2126 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
2127 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2128 		break;
2129 
2130 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
2131 		rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
2132 		break;
2133 
2134 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
2135 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2136 		break;
2137 
2138 	case NVME_CTRLR_STATE_SET_HOST_ID:
2139 		rc = nvme_ctrlr_set_host_id(ctrlr);
2140 		break;
2141 
2142 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
2143 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2144 		break;
2145 
2146 	case NVME_CTRLR_STATE_READY:
2147 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Ctrlr already in ready state\n");
2148 		return 0;
2149 
2150 	case NVME_CTRLR_STATE_ERROR:
2151 		SPDK_ERRLOG("Ctrlr %s is in error state\n", ctrlr->trid.traddr);
2152 		return -1;
2153 
2154 	default:
2155 		assert(0);
2156 		nvme_ctrlr_fail(ctrlr, false);
2157 		return -1;
2158 	}
2159 
2160 init_timeout:
2161 	if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
2162 	    spdk_get_ticks() > ctrlr->state_timeout_tsc) {
2163 		SPDK_ERRLOG("Initialization timed out in state %d\n", ctrlr->state);
2164 		nvme_ctrlr_fail(ctrlr, false);
2165 		return -1;
2166 	}
2167 
2168 	return rc;
2169 }
2170 
2171 int
2172 nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
2173 {
2174 	pthread_mutexattr_t attr;
2175 	int rc = 0;
2176 
2177 	if (pthread_mutexattr_init(&attr)) {
2178 		return -1;
2179 	}
2180 	if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
2181 #ifndef __FreeBSD__
2182 	    pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
2183 	    pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
2184 #endif
2185 	    pthread_mutex_init(mtx, &attr)) {
2186 		rc = -1;
2187 	}
2188 	pthread_mutexattr_destroy(&attr);
2189 	return rc;
2190 }
2191 
2192 int
2193 nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
2194 {
2195 	int rc;
2196 
2197 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
2198 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
2199 	} else {
2200 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
2201 	}
2202 
2203 	ctrlr->flags = 0;
2204 	ctrlr->free_io_qids = NULL;
2205 	ctrlr->is_resetting = false;
2206 	ctrlr->is_failed = false;
2207 	ctrlr->is_shutdown = false;
2208 
2209 	TAILQ_INIT(&ctrlr->active_io_qpairs);
2210 	STAILQ_INIT(&ctrlr->queued_aborts);
2211 	ctrlr->outstanding_aborts = 0;
2212 
2213 	rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
2214 	if (rc != 0) {
2215 		return rc;
2216 	}
2217 
2218 	TAILQ_INIT(&ctrlr->active_procs);
2219 
2220 	return rc;
2221 }
2222 
2223 /* This function should be called once at ctrlr initialization to set up constant properties. */
2224 void
2225 nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
2226 		    const union spdk_nvme_vs_register *vs)
2227 {
2228 	ctrlr->cap = *cap;
2229 	ctrlr->vs = *vs;
2230 
2231 	ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
2232 
2233 	/* For now, always select page_size == min_page_size. */
2234 	ctrlr->page_size = ctrlr->min_page_size;
2235 
2236 	ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
2237 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
2238 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
2239 
2240 	ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
2241 }
2242 
2243 void
2244 nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
2245 {
2246 	pthread_mutex_destroy(&ctrlr->ctrlr_lock);
2247 }
2248 
2249 void
2250 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
2251 {
2252 	struct spdk_nvme_qpair *qpair, *tmp;
2253 
2254 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Prepare to destruct SSD: %s\n", ctrlr->trid.traddr);
2255 	TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
2256 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2257 	}
2258 
2259 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
2260 
2261 	nvme_ctrlr_shutdown(ctrlr);
2262 
2263 	nvme_ctrlr_destruct_namespaces(ctrlr);
2264 
2265 	spdk_bit_array_free(&ctrlr->free_io_qids);
2266 
2267 	nvme_transport_ctrlr_destruct(ctrlr);
2268 }
2269 
2270 int
2271 nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
2272 				struct nvme_request *req)
2273 {
2274 	return nvme_qpair_submit_request(ctrlr->adminq, req);
2275 }
2276 
2277 static void
2278 nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
2279 {
2280 	/* Do nothing */
2281 }
2282 
2283 /*
2284  * Check if we need to send a Keep Alive command.
2285  * Caller must hold ctrlr->ctrlr_lock.
2286  */
2287 static void
2288 nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
2289 {
2290 	uint64_t now;
2291 	struct nvme_request *req;
2292 	struct spdk_nvme_cmd *cmd;
2293 	int rc;
2294 
2295 	now = spdk_get_ticks();
2296 	if (now < ctrlr->next_keep_alive_tick) {
2297 		return;
2298 	}
2299 
2300 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
2301 	if (req == NULL) {
2302 		return;
2303 	}
2304 
2305 	cmd = &req->cmd;
2306 	cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
2307 
2308 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
2309 	if (rc != 0) {
2310 		SPDK_ERRLOG("Submitting Keep Alive failed\n");
2311 	}
2312 
2313 	ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
2314 }
2315 
2316 int32_t
2317 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
2318 {
2319 	int32_t num_completions;
2320 
2321 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2322 	if (ctrlr->keep_alive_interval_ticks) {
2323 		nvme_ctrlr_keep_alive(ctrlr);
2324 	}
2325 	num_completions = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2326 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2327 
2328 	return num_completions;
2329 }
2330 
2331 const struct spdk_nvme_ctrlr_data *
2332 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
2333 {
2334 	return &ctrlr->cdata;
2335 }
2336 
2337 union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
2338 {
2339 	union spdk_nvme_csts_register csts;
2340 
2341 	if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
2342 		csts.raw = 0xFFFFFFFFu;
2343 	}
2344 	return csts;
2345 }
2346 
2347 union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
2348 {
2349 	return ctrlr->cap;
2350 }
2351 
2352 union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
2353 {
2354 	return ctrlr->vs;
2355 }
2356 
2357 uint32_t
2358 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
2359 {
2360 	return ctrlr->num_ns;
2361 }
2362 
2363 static int32_t
2364 spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2365 {
2366 	int32_t result = -1;
2367 
2368 	if (ctrlr->active_ns_list == NULL || nsid == 0 || nsid > ctrlr->num_ns) {
2369 		return result;
2370 	}
2371 
2372 	int32_t lower = 0;
2373 	int32_t upper = ctrlr->num_ns - 1;
2374 	int32_t mid;
2375 
2376 	while (lower <= upper) {
2377 		mid = lower + (upper - lower) / 2;
2378 		if (ctrlr->active_ns_list[mid] == nsid) {
2379 			result = mid;
2380 			break;
2381 		} else {
2382 			if (ctrlr->active_ns_list[mid] != 0 && ctrlr->active_ns_list[mid] < nsid) {
2383 				lower = mid + 1;
2384 			} else {
2385 				upper = mid - 1;
2386 			}
2387 
2388 		}
2389 	}
2390 
2391 	return result;
2392 }
2393 
2394 bool
2395 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2396 {
2397 	return spdk_nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1;
2398 }
2399 
2400 uint32_t
2401 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
2402 {
2403 	return ctrlr->active_ns_list ? ctrlr->active_ns_list[0] : 0;
2404 }
2405 
2406 uint32_t
2407 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
2408 {
2409 	int32_t nsid_idx = spdk_nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid);
2410 	if (ctrlr->active_ns_list && nsid_idx >= 0 && (uint32_t)nsid_idx < ctrlr->num_ns - 1) {
2411 		return ctrlr->active_ns_list[nsid_idx + 1];
2412 	}
2413 	return 0;
2414 }
2415 
2416 struct spdk_nvme_ns *
2417 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2418 {
2419 	if (nsid < 1 || nsid > ctrlr->num_ns) {
2420 		return NULL;
2421 	}
2422 
2423 	return &ctrlr->ns[nsid - 1];
2424 }
2425 
2426 struct spdk_pci_device *
2427 spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
2428 {
2429 	if (ctrlr == NULL) {
2430 		return NULL;
2431 	}
2432 
2433 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
2434 		return NULL;
2435 	}
2436 
2437 	return nvme_ctrlr_proc_get_devhandle(ctrlr);
2438 }
2439 
2440 uint32_t
2441 spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
2442 {
2443 	return ctrlr->max_xfer_size;
2444 }
2445 
2446 void
2447 spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
2448 				      spdk_nvme_aer_cb aer_cb_fn,
2449 				      void *aer_cb_arg)
2450 {
2451 	struct spdk_nvme_ctrlr_process *active_proc;
2452 
2453 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2454 
2455 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2456 	if (active_proc) {
2457 		active_proc->aer_cb_fn = aer_cb_fn;
2458 		active_proc->aer_cb_arg = aer_cb_arg;
2459 	}
2460 
2461 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2462 }
2463 
2464 void
2465 spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
2466 		uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)
2467 {
2468 	struct spdk_nvme_ctrlr_process	*active_proc;
2469 
2470 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2471 
2472 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2473 	if (active_proc) {
2474 		active_proc->timeout_ticks = timeout_us * spdk_get_ticks_hz() / 1000000ULL;
2475 		active_proc->timeout_cb_fn = cb_fn;
2476 		active_proc->timeout_cb_arg = cb_arg;
2477 	}
2478 
2479 	ctrlr->timeout_enabled = true;
2480 
2481 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2482 }
2483 
2484 bool
2485 spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
2486 {
2487 	/* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
2488 	SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
2489 	return ctrlr->log_page_supported[log_page];
2490 }
2491 
2492 bool
2493 spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
2494 {
2495 	/* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
2496 	SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
2497 	return ctrlr->feature_supported[feature_code];
2498 }
2499 
2500 int
2501 spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2502 			  struct spdk_nvme_ctrlr_list *payload)
2503 {
2504 	struct nvme_completion_poll_status	status;
2505 	int					res;
2506 	struct spdk_nvme_ns			*ns;
2507 
2508 	res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
2509 				       nvme_completion_poll_cb, &status);
2510 	if (res) {
2511 		return res;
2512 	}
2513 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2514 		SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
2515 		return -ENXIO;
2516 	}
2517 
2518 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2519 	if (res) {
2520 		return res;
2521 	}
2522 
2523 	ns = &ctrlr->ns[nsid - 1];
2524 	return nvme_ns_construct(ns, nsid, ctrlr);
2525 }
2526 
2527 int
2528 spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2529 			  struct spdk_nvme_ctrlr_list *payload)
2530 {
2531 	struct nvme_completion_poll_status	status;
2532 	int					res;
2533 	struct spdk_nvme_ns			*ns;
2534 
2535 	res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
2536 				       nvme_completion_poll_cb, &status);
2537 	if (res) {
2538 		return res;
2539 	}
2540 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2541 		SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
2542 		return -ENXIO;
2543 	}
2544 
2545 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2546 	if (res) {
2547 		return res;
2548 	}
2549 
2550 	ns = &ctrlr->ns[nsid - 1];
2551 	/* Inactive NS */
2552 	nvme_ns_destruct(ns);
2553 
2554 	return 0;
2555 }
2556 
2557 uint32_t
2558 spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
2559 {
2560 	struct nvme_completion_poll_status	status;
2561 	int					res;
2562 	uint32_t				nsid;
2563 	struct spdk_nvme_ns			*ns;
2564 
2565 	res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status);
2566 	if (res) {
2567 		return 0;
2568 	}
2569 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2570 		SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
2571 		return 0;
2572 	}
2573 
2574 	nsid = status.cpl.cdw0;
2575 	ns = &ctrlr->ns[nsid - 1];
2576 	/* Inactive NS */
2577 	res = nvme_ns_construct(ns, nsid, ctrlr);
2578 	if (res) {
2579 		return 0;
2580 	}
2581 
2582 	/* Return the namespace ID that was created */
2583 	return nsid;
2584 }
2585 
2586 int
2587 spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2588 {
2589 	struct nvme_completion_poll_status	status;
2590 	int					res;
2591 	struct spdk_nvme_ns			*ns;
2592 
2593 	res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status);
2594 	if (res) {
2595 		return res;
2596 	}
2597 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2598 		SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
2599 		return -ENXIO;
2600 	}
2601 
2602 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2603 	if (res) {
2604 		return res;
2605 	}
2606 
2607 	ns = &ctrlr->ns[nsid - 1];
2608 	nvme_ns_destruct(ns);
2609 
2610 	return 0;
2611 }
2612 
2613 int
2614 spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2615 		       struct spdk_nvme_format *format)
2616 {
2617 	struct nvme_completion_poll_status	status;
2618 	int					res;
2619 
2620 	res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
2621 				    &status);
2622 	if (res) {
2623 		return res;
2624 	}
2625 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2626 		SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
2627 		return -ENXIO;
2628 	}
2629 
2630 	return spdk_nvme_ctrlr_reset(ctrlr);
2631 }
2632 
2633 int
2634 spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
2635 				int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
2636 {
2637 	struct spdk_nvme_fw_commit		fw_commit;
2638 	struct nvme_completion_poll_status	status;
2639 	int					res;
2640 	unsigned int				size_remaining;
2641 	unsigned int				offset;
2642 	unsigned int				transfer;
2643 	void					*p;
2644 
2645 	if (!completion_status) {
2646 		return -EINVAL;
2647 	}
2648 	memset(completion_status, 0, sizeof(struct spdk_nvme_status));
2649 	if (size % 4) {
2650 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid size!\n");
2651 		return -1;
2652 	}
2653 
2654 	/* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
2655 	 * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
2656 	 */
2657 	if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
2658 	    (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
2659 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid command!\n");
2660 		return -1;
2661 	}
2662 
2663 	/* Firmware download */
2664 	size_remaining = size;
2665 	offset = 0;
2666 	p = payload;
2667 
2668 	while (size_remaining > 0) {
2669 		transfer = spdk_min(size_remaining, ctrlr->min_page_size);
2670 
2671 		res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
2672 						       nvme_completion_poll_cb,
2673 						       &status);
2674 		if (res) {
2675 			return res;
2676 		}
2677 
2678 		if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2679 			SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
2680 			return -ENXIO;
2681 		}
2682 		p += transfer;
2683 		offset += transfer;
2684 		size_remaining -= transfer;
2685 	}
2686 
2687 	/* Firmware commit */
2688 	memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
2689 	fw_commit.fs = slot;
2690 	fw_commit.ca = commit_action;
2691 
2692 	res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
2693 				       &status);
2694 	if (res) {
2695 		return res;
2696 	}
2697 
2698 	res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock);
2699 
2700 	memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status));
2701 
2702 	if (res) {
2703 		if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
2704 		    status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
2705 			if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
2706 			    status.cpl.status.sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
2707 				SPDK_NOTICELOG("firmware activation requires conventional reset to be performed. !\n");
2708 			} else {
2709 				SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n");
2710 			}
2711 			return -ENXIO;
2712 		}
2713 	}
2714 
2715 	return spdk_nvme_ctrlr_reset(ctrlr);
2716 }
2717 
2718 void *
2719 spdk_nvme_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
2720 {
2721 	void *buf;
2722 
2723 	if (size == 0) {
2724 		return NULL;
2725 	}
2726 
2727 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2728 	buf = nvme_transport_ctrlr_alloc_cmb_io_buffer(ctrlr, size);
2729 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2730 
2731 	return buf;
2732 }
2733 
2734 void
2735 spdk_nvme_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
2736 {
2737 	if (buf && size) {
2738 		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2739 		nvme_transport_ctrlr_free_cmb_io_buffer(ctrlr, buf, size);
2740 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2741 	}
2742 }
2743 
2744 bool
2745 spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
2746 {
2747 	assert(ctrlr);
2748 
2749 	return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
2750 			strlen(SPDK_NVMF_DISCOVERY_NQN));
2751 }
2752 
2753 int
2754 spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
2755 				 uint16_t spsp, uint8_t nssf, void *payload, size_t size)
2756 {
2757 	struct nvme_completion_poll_status	status;
2758 	int					res;
2759 
2760 	res = nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
2761 					      nvme_completion_poll_cb, &status);
2762 	if (res) {
2763 		return res;
2764 	}
2765 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2766 		SPDK_ERRLOG("spdk_nvme_ctrlr_security_receive failed!\n");
2767 		return -ENXIO;
2768 	}
2769 
2770 	return 0;
2771 }
2772 
2773 int
2774 spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
2775 			      uint16_t spsp, uint8_t nssf, void *payload, size_t size)
2776 {
2777 	struct nvme_completion_poll_status	status;
2778 	int					res;
2779 
2780 	res = nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size, nvme_completion_poll_cb,
2781 					   &status);
2782 	if (res) {
2783 		return res;
2784 	}
2785 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2786 		SPDK_ERRLOG("spdk_nvme_ctrlr_security_send failed!\n");
2787 		return -ENXIO;
2788 	}
2789 
2790 	return 0;
2791 }
2792