xref: /spdk/lib/nvme/nvme_ctrlr.c (revision 1fc4165fe9bf8512483356ad8e6d27f793f2e3db)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvme_internal.h"
37 
38 #include "spdk/env.h"
39 #include "spdk/string.h"
40 
41 static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
42 		struct nvme_async_event_request *aer);
43 static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
44 static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
45 
46 static int
47 nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
48 {
49 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
50 					      &cc->raw);
51 }
52 
53 static int
54 nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
55 {
56 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
57 					      &csts->raw);
58 }
59 
60 int
61 nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
62 {
63 	return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
64 					      &cap->raw);
65 }
66 
67 int
68 nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
69 {
70 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
71 					      &vs->raw);
72 }
73 
74 static int
75 nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
76 {
77 	return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
78 					      cc->raw);
79 }
80 
81 int
82 nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
83 {
84 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
85 					      &cmbsz->raw);
86 }
87 
88 void
89 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
90 {
91 	char host_id_str[SPDK_UUID_STRING_LEN];
92 
93 	assert(opts);
94 
95 	memset(opts, 0, opts_size);
96 
97 #define FIELD_OK(field) \
98 	offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
99 
100 	if (FIELD_OK(num_io_queues)) {
101 		opts->num_io_queues = DEFAULT_MAX_IO_QUEUES;
102 	}
103 
104 	if (FIELD_OK(use_cmb_sqs)) {
105 		opts->use_cmb_sqs = true;
106 	}
107 
108 	if (FIELD_OK(arb_mechanism)) {
109 		opts->arb_mechanism = SPDK_NVME_CC_AMS_RR;
110 	}
111 
112 	if (FIELD_OK(keep_alive_timeout_ms)) {
113 		opts->keep_alive_timeout_ms = MIN_KEEP_ALIVE_TIMEOUT_IN_MS;
114 	}
115 
116 	if (FIELD_OK(io_queue_size)) {
117 		opts->io_queue_size = DEFAULT_IO_QUEUE_SIZE;
118 	}
119 
120 	if (FIELD_OK(io_queue_requests)) {
121 		opts->io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
122 	}
123 
124 	if (FIELD_OK(host_id)) {
125 		memset(opts->host_id, 0, sizeof(opts->host_id));
126 	}
127 
128 	if (nvme_driver_init() == 0) {
129 		if (FIELD_OK(extended_host_id)) {
130 			memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
131 			       sizeof(opts->extended_host_id));
132 		}
133 
134 		if (FIELD_OK(hostnqn)) {
135 			spdk_uuid_fmt_lower(host_id_str, sizeof(host_id_str),
136 					    &g_spdk_nvme_driver->default_extended_host_id);
137 			snprintf(opts->hostnqn, sizeof(opts->hostnqn), "2014-08.org.nvmexpress:uuid:%s", host_id_str);
138 		}
139 	}
140 
141 	if (FIELD_OK(src_addr)) {
142 		memset(opts->src_addr, 0, sizeof(opts->src_addr));
143 	}
144 
145 	if (FIELD_OK(src_svcid)) {
146 		memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
147 	}
148 
149 	if (FIELD_OK(command_set)) {
150 		opts->command_set = SPDK_NVME_CC_CSS_NVM;
151 	}
152 
153 	if (FIELD_OK(admin_timeout_ms)) {
154 		opts->admin_timeout_ms = NVME_MAX_TIMEOUT_PERIOD * 1000;
155 	}
156 
157 	if (FIELD_OK(header_digest)) {
158 		opts->header_digest = false;
159 	}
160 
161 	if (FIELD_OK(data_digest)) {
162 		opts->data_digest = false;
163 	}
164 #undef FIELD_OK
165 }
166 
167 /**
168  * This function will be called when the process allocates the IO qpair.
169  * Note: the ctrlr_lock must be held when calling this function.
170  */
171 static void
172 nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
173 {
174 	struct spdk_nvme_ctrlr_process	*active_proc;
175 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
176 
177 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
178 	if (active_proc) {
179 		TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
180 		qpair->active_proc = active_proc;
181 	}
182 }
183 
184 /**
185  * This function will be called when the process frees the IO qpair.
186  * Note: the ctrlr_lock must be held when calling this function.
187  */
188 static void
189 nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
190 {
191 	struct spdk_nvme_ctrlr_process	*active_proc;
192 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
193 	struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
194 
195 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
196 	if (!active_proc) {
197 		return;
198 	}
199 
200 	TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
201 			   per_process_tailq, tmp_qpair) {
202 		if (active_qpair == qpair) {
203 			TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
204 				     active_qpair, per_process_tailq);
205 
206 			break;
207 		}
208 	}
209 }
210 
211 void
212 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
213 		struct spdk_nvme_io_qpair_opts *opts,
214 		size_t opts_size)
215 {
216 	assert(ctrlr);
217 
218 	assert(opts);
219 
220 	memset(opts, 0, opts_size);
221 
222 #define FIELD_OK(field) \
223 	offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
224 
225 	if (FIELD_OK(qprio)) {
226 		opts->qprio = SPDK_NVME_QPRIO_URGENT;
227 	}
228 
229 	if (FIELD_OK(io_queue_size)) {
230 		opts->io_queue_size = ctrlr->opts.io_queue_size;
231 	}
232 
233 	if (FIELD_OK(io_queue_requests)) {
234 		opts->io_queue_requests = ctrlr->opts.io_queue_requests;
235 	}
236 
237 #undef FIELD_OK
238 }
239 
240 struct spdk_nvme_qpair *
241 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
242 			       const struct spdk_nvme_io_qpair_opts *user_opts,
243 			       size_t opts_size)
244 {
245 	uint32_t				qid;
246 	struct spdk_nvme_qpair			*qpair;
247 	union spdk_nvme_cc_register		cc;
248 	struct spdk_nvme_io_qpair_opts		opts;
249 
250 	if (!ctrlr) {
251 		return NULL;
252 	}
253 
254 	/*
255 	 * Get the default options, then overwrite them with the user-provided options
256 	 * up to opts_size.
257 	 *
258 	 * This allows for extensions of the opts structure without breaking
259 	 * ABI compatibility.
260 	 */
261 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
262 	if (user_opts) {
263 		memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
264 	}
265 
266 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
267 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
268 		SPDK_ERRLOG("get_cc failed\n");
269 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
270 		return NULL;
271 	}
272 
273 	/* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */
274 	if ((opts.qprio & 3) != opts.qprio) {
275 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
276 		return NULL;
277 	}
278 
279 	/*
280 	 * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
281 	 * default round robin arbitration method.
282 	 */
283 	if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts.qprio != SPDK_NVME_QPRIO_URGENT)) {
284 		SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
285 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
286 		return NULL;
287 	}
288 
289 	/*
290 	 * Get the first available I/O queue ID.
291 	 */
292 	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
293 	if (qid > ctrlr->opts.num_io_queues) {
294 		SPDK_ERRLOG("No free I/O queue IDs\n");
295 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
296 		return NULL;
297 	}
298 
299 	qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, &opts);
300 	if (qpair == NULL) {
301 		SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
302 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
303 		return NULL;
304 	}
305 	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
306 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
307 
308 	nvme_ctrlr_proc_add_io_qpair(qpair);
309 
310 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
311 
312 	if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
313 		spdk_delay_us(100);
314 	}
315 
316 	return qpair;
317 }
318 
319 int
320 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
321 {
322 	struct spdk_nvme_ctrlr *ctrlr;
323 
324 	if (qpair == NULL) {
325 		return 0;
326 	}
327 
328 	ctrlr = qpair->ctrlr;
329 
330 	if (qpair->in_completion_context) {
331 		/*
332 		 * There are many cases where it is convenient to delete an io qpair in the context
333 		 *  of that qpair's completion routine.  To handle this properly, set a flag here
334 		 *  so that the completion routine will perform an actual delete after the context
335 		 *  unwinds.
336 		 */
337 		qpair->delete_after_completion_context = 1;
338 		return 0;
339 	}
340 
341 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
342 
343 	nvme_ctrlr_proc_remove_io_qpair(qpair);
344 
345 	TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
346 	spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
347 
348 	if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
349 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
350 		return -1;
351 	}
352 
353 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
354 	return 0;
355 }
356 
357 static void
358 nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
359 		struct spdk_nvme_intel_log_page_directory *log_page_directory)
360 {
361 	if (log_page_directory == NULL) {
362 		return;
363 	}
364 
365 	if (ctrlr->cdata.vid != SPDK_PCI_VID_INTEL) {
366 		return;
367 	}
368 
369 	ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
370 
371 	if (log_page_directory->read_latency_log_len ||
372 	    (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
373 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
374 	}
375 	if (log_page_directory->write_latency_log_len ||
376 	    (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
377 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
378 	}
379 	if (log_page_directory->temperature_statistics_log_len) {
380 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
381 	}
382 	if (log_page_directory->smart_log_len) {
383 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
384 	}
385 	if (log_page_directory->marketing_description_log_len) {
386 		ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
387 	}
388 }
389 
390 static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
391 {
392 	int rc = 0;
393 	uint64_t phys_addr = 0;
394 	struct nvme_completion_poll_status	status;
395 	struct spdk_nvme_intel_log_page_directory *log_page_directory;
396 
397 	log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
398 					  64, &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
399 	if (log_page_directory == NULL) {
400 		SPDK_ERRLOG("could not allocate log_page_directory\n");
401 		return -ENXIO;
402 	}
403 
404 	rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
405 					      SPDK_NVME_GLOBAL_NS_TAG, log_page_directory,
406 					      sizeof(struct spdk_nvme_intel_log_page_directory),
407 					      0, nvme_completion_poll_cb, &status);
408 	if (rc != 0) {
409 		spdk_free(log_page_directory);
410 		return rc;
411 	}
412 
413 	if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, &status,
414 			ctrlr->opts.admin_timeout_ms / 1000)) {
415 		spdk_free(log_page_directory);
416 		SPDK_WARNLOG("Intel log pages not supported on Intel drive!\n");
417 		return 0;
418 	}
419 
420 	nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
421 	spdk_free(log_page_directory);
422 	return 0;
423 }
424 
425 static int
426 nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
427 {
428 	int	rc = 0;
429 
430 	memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
431 	/* Mandatory pages */
432 	ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
433 	ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
434 	ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
435 	if (ctrlr->cdata.lpa.celp) {
436 		ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
437 	}
438 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL && !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
439 		rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
440 	}
441 
442 	return rc;
443 }
444 
445 static void
446 nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
447 {
448 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
449 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
450 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
451 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
452 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
453 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
454 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
455 }
456 
457 static void
458 nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
459 {
460 	memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
461 	/* Mandatory features */
462 	ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
463 	ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
464 	ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
465 	ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
466 	ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
467 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
468 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
469 	ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
470 	ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
471 	/* Optional features */
472 	if (ctrlr->cdata.vwc.present) {
473 		ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
474 	}
475 	if (ctrlr->cdata.apsta.supported) {
476 		ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
477 	}
478 	if (ctrlr->cdata.hmpre) {
479 		ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
480 	}
481 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
482 		nvme_ctrlr_set_intel_supported_features(ctrlr);
483 	}
484 }
485 
486 void
487 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
488 {
489 	/*
490 	 * Set the flag here and leave the work failure of qpairs to
491 	 * spdk_nvme_qpair_process_completions().
492 	 */
493 	if (hot_remove) {
494 		ctrlr->is_removed = true;
495 	}
496 	ctrlr->is_failed = true;
497 	SPDK_ERRLOG("ctrlr %s in failed state.\n", ctrlr->trid.traddr);
498 }
499 
500 static void
501 nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr)
502 {
503 	union spdk_nvme_cc_register	cc;
504 	union spdk_nvme_csts_register	csts;
505 	uint32_t			ms_waited = 0;
506 	uint32_t			shutdown_timeout_ms;
507 
508 	if (ctrlr->is_removed) {
509 		return;
510 	}
511 
512 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
513 		SPDK_ERRLOG("get_cc() failed\n");
514 		return;
515 	}
516 
517 	cc.bits.shn = SPDK_NVME_SHN_NORMAL;
518 
519 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
520 		SPDK_ERRLOG("set_cc() failed\n");
521 		return;
522 	}
523 
524 	/*
525 	 * The NVMe specification defines RTD3E to be the time between
526 	 *  setting SHN = 1 until the controller will set SHST = 10b.
527 	 * If the device doesn't report RTD3 entry latency, or if it
528 	 *  reports RTD3 entry latency less than 10 seconds, pick
529 	 *  10 seconds as a reasonable amount of time to
530 	 *  wait before proceeding.
531 	 */
532 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
533 	shutdown_timeout_ms = (ctrlr->cdata.rtd3e + 999) / 1000;
534 	shutdown_timeout_ms = spdk_max(shutdown_timeout_ms, 10000);
535 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown timeout = %" PRIu32 " ms\n", shutdown_timeout_ms);
536 
537 	do {
538 		if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
539 			SPDK_ERRLOG("get_csts() failed\n");
540 			return;
541 		}
542 
543 		if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
544 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown complete in %u milliseconds\n",
545 				      ms_waited);
546 			ctrlr->is_shutdown = true;
547 			return;
548 		}
549 
550 		nvme_delay(1000);
551 		ms_waited++;
552 	} while (ms_waited < shutdown_timeout_ms);
553 
554 	SPDK_ERRLOG("did not shutdown within %u milliseconds\n", shutdown_timeout_ms);
555 	if (ctrlr->quirks & NVME_QUIRK_SHST_COMPLETE) {
556 		SPDK_ERRLOG("likely due to shutdown handling in the VMWare emulated NVMe SSD\n");
557 	}
558 }
559 
560 static int
561 nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
562 {
563 	union spdk_nvme_cc_register	cc;
564 	int				rc;
565 
566 	rc = nvme_transport_ctrlr_enable(ctrlr);
567 	if (rc != 0) {
568 		SPDK_ERRLOG("transport ctrlr_enable failed\n");
569 		return rc;
570 	}
571 
572 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
573 		SPDK_ERRLOG("get_cc() failed\n");
574 		return -EIO;
575 	}
576 
577 	if (cc.bits.en != 0) {
578 		SPDK_ERRLOG("%s called with CC.EN = 1\n", __func__);
579 		return -EINVAL;
580 	}
581 
582 	cc.bits.en = 1;
583 	cc.bits.css = 0;
584 	cc.bits.shn = 0;
585 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
586 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
587 
588 	/* Page size is 2 ^ (12 + mps). */
589 	cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
590 
591 	if (ctrlr->cap.bits.css == 0) {
592 		SPDK_INFOLOG(SPDK_LOG_NVME,
593 			     "Drive reports no command sets supported. Assuming NVM is supported.\n");
594 		ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
595 	}
596 
597 	if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
598 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Requested I/O command set %u but supported mask is 0x%x\n",
599 			      ctrlr->opts.command_set, ctrlr->cap.bits.css);
600 		return -EINVAL;
601 	}
602 
603 	cc.bits.css = ctrlr->opts.command_set;
604 
605 	switch (ctrlr->opts.arb_mechanism) {
606 	case SPDK_NVME_CC_AMS_RR:
607 		break;
608 	case SPDK_NVME_CC_AMS_WRR:
609 		if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
610 			break;
611 		}
612 		return -EINVAL;
613 	case SPDK_NVME_CC_AMS_VS:
614 		if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
615 			break;
616 		}
617 		return -EINVAL;
618 	default:
619 		return -EINVAL;
620 	}
621 
622 	cc.bits.ams = ctrlr->opts.arb_mechanism;
623 
624 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
625 		SPDK_ERRLOG("set_cc() failed\n");
626 		return -EIO;
627 	}
628 
629 	return 0;
630 }
631 
632 #ifdef DEBUG
633 static const char *
634 nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
635 {
636 	switch (state) {
637 	case NVME_CTRLR_STATE_INIT_DELAY:
638 		return "delay init";
639 	case NVME_CTRLR_STATE_INIT:
640 		return "init";
641 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
642 		return "disable and wait for CSTS.RDY = 1";
643 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
644 		return "disable and wait for CSTS.RDY = 0";
645 	case NVME_CTRLR_STATE_ENABLE:
646 		return "enable controller by writing CC.EN = 1";
647 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
648 		return "wait for CSTS.RDY = 1";
649 	case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE:
650 		return "enable admin queue";
651 	case NVME_CTRLR_STATE_IDENTIFY:
652 		return "identify controller";
653 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
654 		return "wait for identify controller";
655 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
656 		return "set number of queues";
657 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
658 		return "wait for set number of queues";
659 	case NVME_CTRLR_STATE_GET_NUM_QUEUES:
660 		return "get number of queues";
661 	case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES:
662 		return "wait for get number of queues";
663 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
664 		return "construct namespaces";
665 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
666 		return "identify active ns";
667 	case NVME_CTRLR_STATE_IDENTIFY_NS:
668 		return "identify ns";
669 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
670 		return "wait for identify ns";
671 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
672 		return "identify namespace id descriptors";
673 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
674 		return "wait for identify namespace id descriptors";
675 	case NVME_CTRLR_STATE_CONFIGURE_AER:
676 		return "configure AER";
677 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
678 		return "wait for configure aer";
679 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
680 		return "set supported log pages";
681 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
682 		return "set supported features";
683 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
684 		return "set doorbell buffer config";
685 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
686 		return "wait for doorbell buffer config";
687 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
688 		return "set keep alive timeout";
689 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
690 		return "wait for set keep alive timeout";
691 	case NVME_CTRLR_STATE_SET_HOST_ID:
692 		return "set host ID";
693 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
694 		return "wait for set host ID";
695 	case NVME_CTRLR_STATE_READY:
696 		return "ready";
697 	case NVME_CTRLR_STATE_ERROR:
698 		return "error";
699 	}
700 	return "unknown";
701 };
702 #endif /* DEBUG */
703 
704 static void
705 nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
706 		     uint64_t timeout_in_ms)
707 {
708 	ctrlr->state = state;
709 	if (timeout_in_ms == 0) {
710 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (no timeout)\n",
711 			      nvme_ctrlr_state_string(ctrlr->state));
712 		ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
713 	} else {
714 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (timeout %" PRIu64 " ms)\n",
715 			      nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms);
716 		ctrlr->state_timeout_tsc = spdk_get_ticks() + (timeout_in_ms * spdk_get_ticks_hz()) / 1000;
717 	}
718 }
719 
720 static void
721 nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
722 {
723 	if (ctrlr->shadow_doorbell) {
724 		spdk_dma_free(ctrlr->shadow_doorbell);
725 		ctrlr->shadow_doorbell = NULL;
726 	}
727 
728 	if (ctrlr->eventidx) {
729 		spdk_dma_free(ctrlr->eventidx);
730 		ctrlr->eventidx = NULL;
731 	}
732 }
733 
734 static void
735 nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
736 {
737 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
738 
739 	if (spdk_nvme_cpl_is_error(cpl)) {
740 		SPDK_WARNLOG("Doorbell buffer config failed\n");
741 	} else {
742 		SPDK_INFOLOG(SPDK_LOG_NVME, "NVMe controller: %s doorbell buffer config enabled\n",
743 			     ctrlr->trid.traddr);
744 	}
745 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
746 			     ctrlr->opts.admin_timeout_ms);
747 }
748 
749 static int
750 nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
751 {
752 	int rc = 0;
753 	uint64_t prp1, prp2;
754 
755 	if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
756 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
757 				     ctrlr->opts.admin_timeout_ms);
758 		return 0;
759 	}
760 
761 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
762 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
763 				     ctrlr->opts.admin_timeout_ms);
764 		return 0;
765 	}
766 
767 	/* only 1 page size for doorbell buffer */
768 	ctrlr->shadow_doorbell = spdk_dma_zmalloc(ctrlr->page_size, ctrlr->page_size,
769 				 &prp1);
770 	if (ctrlr->shadow_doorbell == NULL) {
771 		rc = -ENOMEM;
772 		goto error;
773 	}
774 
775 	ctrlr->eventidx = spdk_dma_zmalloc(ctrlr->page_size, ctrlr->page_size, &prp2);
776 	if (ctrlr->eventidx == NULL) {
777 		rc = -ENOMEM;
778 		goto error;
779 	}
780 
781 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
782 			     ctrlr->opts.admin_timeout_ms);
783 
784 	rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
785 			nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
786 	if (rc != 0) {
787 		goto error;
788 	}
789 
790 	return 0;
791 
792 error:
793 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
794 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
795 	return rc;
796 }
797 
798 int
799 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
800 {
801 	int rc = 0;
802 	struct spdk_nvme_qpair	*qpair;
803 	struct nvme_request	*req, *tmp;
804 
805 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
806 
807 	if (ctrlr->is_resetting || ctrlr->is_failed) {
808 		/*
809 		 * Controller is already resetting or has failed.  Return
810 		 *  immediately since there is no need to kick off another
811 		 *  reset in these cases.
812 		 */
813 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
814 		return 0;
815 	}
816 
817 	ctrlr->is_resetting = true;
818 
819 	SPDK_NOTICELOG("resetting controller\n");
820 
821 	/* Free all of the queued abort requests */
822 	STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
823 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
824 		nvme_free_request(req);
825 		ctrlr->outstanding_aborts--;
826 	}
827 
828 	/* Disable all queues before disabling the controller hardware. */
829 	nvme_qpair_disable(ctrlr->adminq);
830 	TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
831 		nvme_qpair_disable(qpair);
832 	}
833 
834 	/* Doorbell buffer config is invalid during reset */
835 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
836 
837 	/* Set the state back to INIT to cause a full hardware reset. */
838 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
839 
840 	while (ctrlr->state != NVME_CTRLR_STATE_READY) {
841 		if (nvme_ctrlr_process_init(ctrlr) != 0) {
842 			SPDK_ERRLOG("%s: controller reinitialization failed\n", __func__);
843 			nvme_ctrlr_fail(ctrlr, false);
844 			rc = -1;
845 			break;
846 		}
847 	}
848 
849 	if (!ctrlr->is_failed) {
850 		/* Reinitialize qpairs */
851 		TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
852 			if (nvme_transport_ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) {
853 				nvme_ctrlr_fail(ctrlr, false);
854 				rc = -1;
855 			}
856 		}
857 	}
858 
859 	ctrlr->is_resetting = false;
860 
861 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
862 
863 	return rc;
864 }
865 
866 static void
867 nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
868 {
869 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
870 
871 	if (spdk_nvme_cpl_is_error(cpl)) {
872 		SPDK_ERRLOG("nvme_identify_controller failed!\n");
873 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
874 		return;
875 	}
876 
877 	/*
878 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
879 	 *  controller supports.
880 	 */
881 	ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
882 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
883 	if (ctrlr->cdata.mdts > 0) {
884 		ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
885 						ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
886 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
887 	}
888 
889 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
890 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
891 		ctrlr->cntlid = ctrlr->cdata.cntlid;
892 	} else {
893 		/*
894 		 * Fabrics controllers should already have CNTLID from the Connect command.
895 		 *
896 		 * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
897 		 * trust the one from Connect.
898 		 */
899 		if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
900 			SPDK_DEBUGLOG(SPDK_LOG_NVME,
901 				      "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
902 				      ctrlr->cdata.cntlid, ctrlr->cntlid);
903 		}
904 	}
905 
906 	if (ctrlr->cdata.sgls.supported) {
907 		ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
908 		ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
909 	}
910 
911 	if (ctrlr->cdata.oacs.security) {
912 		ctrlr->flags |= SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED;
913 	}
914 
915 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
916 			     ctrlr->opts.admin_timeout_ms);
917 }
918 
919 static int
920 nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
921 {
922 	int	rc;
923 
924 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
925 			     ctrlr->opts.admin_timeout_ms);
926 
927 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
928 				     &ctrlr->cdata, sizeof(ctrlr->cdata),
929 				     nvme_ctrlr_identify_done, ctrlr);
930 	if (rc != 0) {
931 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
932 		return rc;
933 	}
934 
935 	return 0;
936 }
937 
938 int
939 nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
940 {
941 	struct nvme_completion_poll_status	status;
942 	int					rc;
943 	uint32_t				i;
944 	uint32_t				num_pages;
945 	uint32_t				next_nsid = 0;
946 	uint32_t				*new_ns_list = NULL;
947 
948 	if (ctrlr->num_ns == 0) {
949 		spdk_dma_free(ctrlr->active_ns_list);
950 		ctrlr->active_ns_list = NULL;
951 
952 		return 0;
953 	}
954 
955 	/*
956 	 * The allocated size must be a multiple of sizeof(struct spdk_nvme_ns_list)
957 	 */
958 	num_pages = (ctrlr->num_ns * sizeof(new_ns_list[0]) - 1) / sizeof(struct spdk_nvme_ns_list) + 1;
959 	new_ns_list = spdk_dma_zmalloc(num_pages * sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
960 				       NULL);
961 	if (!new_ns_list) {
962 		SPDK_ERRLOG("Failed to allocate active_ns_list!\n");
963 		return -ENOMEM;
964 	}
965 
966 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 1, 0) && !(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
967 		/*
968 		 * Iterate through the pages and fetch each chunk of 1024 namespaces until
969 		 * there are no more active namespaces
970 		 */
971 		for (i = 0; i < num_pages; i++) {
972 			rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid,
973 						     &new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list),
974 						     nvme_completion_poll_cb, &status);
975 			if (rc != 0) {
976 				goto fail;
977 			}
978 			if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
979 				SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n");
980 				rc = -ENXIO;
981 				goto fail;
982 			}
983 			next_nsid = new_ns_list[1024 * i + 1023];
984 			if (next_nsid == 0) {
985 				/*
986 				 * No more active namespaces found, no need to fetch additional chunks
987 				 */
988 				break;
989 			}
990 		}
991 
992 	} else {
993 		/*
994 		 * Controller doesn't support active ns list CNS 0x02 so dummy up
995 		 * an active ns list
996 		 */
997 		for (i = 0; i < ctrlr->num_ns; i++) {
998 			new_ns_list[i] = i + 1;
999 		}
1000 	}
1001 
1002 	/*
1003 	 * Now that that the list is properly setup, we can swap it in to the ctrlr and
1004 	 * free up the previous one.
1005 	 */
1006 	spdk_dma_free(ctrlr->active_ns_list);
1007 	ctrlr->active_ns_list = new_ns_list;
1008 
1009 	return 0;
1010 fail:
1011 	spdk_dma_free(new_ns_list);
1012 	return rc;
1013 }
1014 
1015 static void
1016 nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1017 {
1018 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1019 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1020 	uint32_t nsid;
1021 	int rc;
1022 
1023 	if (spdk_nvme_cpl_is_error(cpl)) {
1024 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1025 		return;
1026 	} else {
1027 		nvme_ns_set_identify_data(ns);
1028 	}
1029 
1030 	/* move on to the next active NS */
1031 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1032 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1033 	if (ns == NULL) {
1034 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
1035 				     ctrlr->opts.admin_timeout_ms);
1036 		return;
1037 	}
1038 	ns->ctrlr = ctrlr;
1039 	ns->id = nsid;
1040 
1041 	rc = nvme_ctrlr_identify_ns_async(ns);
1042 	if (rc) {
1043 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1044 	}
1045 }
1046 
1047 static int
1048 nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
1049 {
1050 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1051 	struct spdk_nvme_ns_data *nsdata;
1052 
1053 	nsdata = &ctrlr->nsdata[ns->id - 1];
1054 
1055 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
1056 			     ctrlr->opts.admin_timeout_ms);
1057 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
1058 				       nsdata, sizeof(*nsdata),
1059 				       nvme_ctrlr_identify_ns_async_done, ns);
1060 }
1061 
1062 static int
1063 nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1064 {
1065 	uint32_t nsid;
1066 	struct spdk_nvme_ns *ns;
1067 	int rc;
1068 
1069 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1070 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1071 	if (ns == NULL) {
1072 		/* No active NS, move on to the next state */
1073 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1074 				     ctrlr->opts.admin_timeout_ms);
1075 		return 0;
1076 	}
1077 
1078 	ns->ctrlr = ctrlr;
1079 	ns->id = nsid;
1080 
1081 	rc = nvme_ctrlr_identify_ns_async(ns);
1082 	if (rc) {
1083 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1084 	}
1085 
1086 	return rc;
1087 }
1088 
1089 static void
1090 nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1091 {
1092 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1093 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1094 	uint32_t nsid;
1095 	int rc;
1096 
1097 	if (spdk_nvme_cpl_is_error(cpl)) {
1098 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1099 				     ctrlr->opts.admin_timeout_ms);
1100 		return;
1101 	}
1102 
1103 	/* move on to the next active NS */
1104 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1105 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1106 	if (ns == NULL) {
1107 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1108 				     ctrlr->opts.admin_timeout_ms);
1109 		return;
1110 	}
1111 
1112 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1113 	if (rc) {
1114 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1115 	}
1116 }
1117 
1118 static int
1119 nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
1120 {
1121 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1122 
1123 	memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
1124 
1125 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
1126 			     ctrlr->opts.admin_timeout_ms);
1127 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
1128 				       0, ns->id, ns->id_desc_list, sizeof(ns->id_desc_list),
1129 				       nvme_ctrlr_identify_id_desc_async_done, ns);
1130 }
1131 
1132 static int
1133 nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1134 {
1135 	uint32_t nsid;
1136 	struct spdk_nvme_ns *ns;
1137 	int rc;
1138 
1139 	if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) ||
1140 	    (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1141 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
1142 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1143 				     ctrlr->opts.admin_timeout_ms);
1144 		return 0;
1145 	}
1146 
1147 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1148 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1149 	if (ns == NULL) {
1150 		/* No active NS, move on to the next state */
1151 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1152 				     ctrlr->opts.admin_timeout_ms);
1153 		return 0;
1154 	}
1155 
1156 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1157 	if (rc) {
1158 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1159 	}
1160 
1161 	return rc;
1162 }
1163 
1164 static void
1165 nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1166 {
1167 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1168 
1169 	if (spdk_nvme_cpl_is_error(cpl)) {
1170 		SPDK_ERRLOG("Set Features - Number of Queues failed!\n");
1171 	}
1172 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_NUM_QUEUES,
1173 			     ctrlr->opts.admin_timeout_ms);
1174 }
1175 
1176 static int
1177 nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1178 {
1179 	int rc;
1180 
1181 	if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
1182 		SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n",
1183 			       ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
1184 		ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
1185 	} else if (ctrlr->opts.num_io_queues < 1) {
1186 		SPDK_NOTICELOG("Requested num_io_queues 0, increasing to 1\n");
1187 		ctrlr->opts.num_io_queues = 1;
1188 	}
1189 
1190 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
1191 			     ctrlr->opts.admin_timeout_ms);
1192 
1193 	rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
1194 					   nvme_ctrlr_set_num_queues_done, ctrlr);
1195 	if (rc != 0) {
1196 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1197 		return rc;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
1203 static void
1204 nvme_ctrlr_get_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1205 {
1206 	uint32_t cq_allocated, sq_allocated, min_allocated, i;
1207 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1208 
1209 	if (spdk_nvme_cpl_is_error(cpl)) {
1210 		SPDK_ERRLOG("Get Features - Number of Queues failed!\n");
1211 		ctrlr->opts.num_io_queues = 0;
1212 	} else {
1213 		/*
1214 		 * Data in cdw0 is 0-based.
1215 		 * Lower 16-bits indicate number of submission queues allocated.
1216 		 * Upper 16-bits indicate number of completion queues allocated.
1217 		 */
1218 		sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
1219 		cq_allocated = (cpl->cdw0 >> 16) + 1;
1220 
1221 		/*
1222 		 * For 1:1 queue mapping, set number of allocated queues to be minimum of
1223 		 * submission and completion queues.
1224 		 */
1225 		min_allocated = spdk_min(sq_allocated, cq_allocated);
1226 
1227 		/* Set number of queues to be minimum of requested and actually allocated. */
1228 		ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
1229 	}
1230 
1231 	ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
1232 	if (ctrlr->free_io_qids == NULL) {
1233 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1234 		return;
1235 	}
1236 
1237 	/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */
1238 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1239 	for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
1240 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1241 	}
1242 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS,
1243 			     ctrlr->opts.admin_timeout_ms);
1244 }
1245 
1246 static int
1247 nvme_ctrlr_get_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1248 {
1249 	int rc;
1250 
1251 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES,
1252 			     ctrlr->opts.admin_timeout_ms);
1253 
1254 	/* Obtain the number of queues allocated using Get Features. */
1255 	rc = nvme_ctrlr_cmd_get_num_queues(ctrlr, nvme_ctrlr_get_num_queues_done, ctrlr);
1256 	if (rc != 0) {
1257 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1258 		return rc;
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 static void
1265 nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
1266 {
1267 	uint32_t keep_alive_interval_ms;
1268 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1269 
1270 	if (spdk_nvme_cpl_is_error(cpl)) {
1271 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n",
1272 			    cpl->status.sc, cpl->status.sct);
1273 		ctrlr->opts.keep_alive_timeout_ms = 0;
1274 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1275 		return;
1276 	}
1277 
1278 	if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
1279 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller adjusted keep alive timeout to %u ms\n",
1280 			      cpl->cdw0);
1281 	}
1282 
1283 	ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
1284 
1285 	keep_alive_interval_ms = ctrlr->opts.keep_alive_timeout_ms / 2;
1286 	if (keep_alive_interval_ms == 0) {
1287 		keep_alive_interval_ms = 1;
1288 	}
1289 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Sending keep alive every %u ms\n", keep_alive_interval_ms);
1290 
1291 	ctrlr->keep_alive_interval_ticks = (keep_alive_interval_ms * spdk_get_ticks_hz()) / UINT64_C(1000);
1292 
1293 	/* Schedule the first Keep Alive to be sent as soon as possible. */
1294 	ctrlr->next_keep_alive_tick = spdk_get_ticks();
1295 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1296 			     ctrlr->opts.admin_timeout_ms);
1297 }
1298 
1299 static int
1300 nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
1301 {
1302 	int rc;
1303 
1304 	if (ctrlr->opts.keep_alive_timeout_ms == 0) {
1305 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1306 				     ctrlr->opts.admin_timeout_ms);
1307 		return 0;
1308 	}
1309 
1310 	if (ctrlr->cdata.kas == 0) {
1311 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller KAS is 0 - not enabling Keep Alive\n");
1312 		ctrlr->opts.keep_alive_timeout_ms = 0;
1313 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1314 				     ctrlr->opts.admin_timeout_ms);
1315 		return 0;
1316 	}
1317 
1318 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
1319 			     ctrlr->opts.admin_timeout_ms);
1320 
1321 	/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
1322 	rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
1323 					     nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
1324 	if (rc != 0) {
1325 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: %d\n", rc);
1326 		ctrlr->opts.keep_alive_timeout_ms = 0;
1327 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1328 		return rc;
1329 	}
1330 
1331 	return 0;
1332 }
1333 
1334 static void
1335 nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
1336 {
1337 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1338 
1339 	if (spdk_nvme_cpl_is_error(cpl)) {
1340 		/*
1341 		 * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
1342 		 * is optional.
1343 		 */
1344 		SPDK_WARNLOG("Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
1345 			     cpl->status.sc, cpl->status.sct);
1346 	} else {
1347 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Set Features - Host ID was successful\n");
1348 	}
1349 
1350 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1351 }
1352 
1353 static int
1354 nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
1355 {
1356 	uint8_t *host_id;
1357 	uint32_t host_id_size;
1358 	int rc;
1359 
1360 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
1361 		/*
1362 		 * NVMe-oF sends the host ID during Connect and doesn't allow
1363 		 * Set Features - Host Identifier after Connect, so we don't need to do anything here.
1364 		 */
1365 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "NVMe-oF transport - not sending Set Features - Host ID\n");
1366 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1367 		return 0;
1368 	}
1369 
1370 	if (ctrlr->cdata.ctratt.host_id_exhid_supported) {
1371 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 128-bit extended host identifier\n");
1372 		host_id = ctrlr->opts.extended_host_id;
1373 		host_id_size = sizeof(ctrlr->opts.extended_host_id);
1374 	} else {
1375 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 64-bit host identifier\n");
1376 		host_id = ctrlr->opts.host_id;
1377 		host_id_size = sizeof(ctrlr->opts.host_id);
1378 	}
1379 
1380 	/* If the user specified an all-zeroes host identifier, don't send the command. */
1381 	if (spdk_mem_all_zero(host_id, host_id_size)) {
1382 		SPDK_DEBUGLOG(SPDK_LOG_NVME,
1383 			      "User did not specify host ID - not sending Set Features - Host ID\n");
1384 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1385 		return 0;
1386 	}
1387 
1388 	SPDK_LOGDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size);
1389 
1390 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
1391 			     ctrlr->opts.admin_timeout_ms);
1392 
1393 	rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
1394 	if (rc != 0) {
1395 		SPDK_ERRLOG("Set Features - Host ID failed: %d\n", rc);
1396 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1397 		return rc;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 static void
1404 nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1405 {
1406 	if (ctrlr->ns) {
1407 		uint32_t i, num_ns = ctrlr->num_ns;
1408 
1409 		for (i = 0; i < num_ns; i++) {
1410 			nvme_ns_destruct(&ctrlr->ns[i]);
1411 		}
1412 
1413 		spdk_free(ctrlr->ns);
1414 		ctrlr->ns = NULL;
1415 		ctrlr->num_ns = 0;
1416 	}
1417 
1418 	if (ctrlr->nsdata) {
1419 		spdk_free(ctrlr->nsdata);
1420 		ctrlr->nsdata = NULL;
1421 	}
1422 
1423 	spdk_dma_free(ctrlr->active_ns_list);
1424 	ctrlr->active_ns_list = NULL;
1425 }
1426 
1427 static void
1428 nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1429 {
1430 	uint32_t i, nn = ctrlr->cdata.nn;
1431 	struct spdk_nvme_ns_data *nsdata;
1432 
1433 	for (i = 0; i < nn; i++) {
1434 		struct spdk_nvme_ns	*ns = &ctrlr->ns[i];
1435 		uint32_t		nsid = i + 1;
1436 
1437 		nsdata = &ctrlr->nsdata[nsid - 1];
1438 
1439 		if ((nsdata->ncap == 0) && spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
1440 			if (nvme_ns_construct(ns, nsid, ctrlr) != 0) {
1441 				continue;
1442 			}
1443 		}
1444 
1445 		if (nsdata->ncap && !spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
1446 			nvme_ns_destruct(ns);
1447 		}
1448 	}
1449 }
1450 
1451 static int
1452 nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1453 {
1454 	int rc = 0;
1455 	uint32_t nn = ctrlr->cdata.nn;
1456 	uint64_t phys_addr = 0;
1457 
1458 	/* ctrlr->num_ns may be 0 (startup) or a different number of namespaces (reset),
1459 	 * so check if we need to reallocate.
1460 	 */
1461 	if (nn != ctrlr->num_ns) {
1462 		nvme_ctrlr_destruct_namespaces(ctrlr);
1463 
1464 		if (nn == 0) {
1465 			SPDK_WARNLOG("controller has 0 namespaces\n");
1466 			return 0;
1467 		}
1468 
1469 		ctrlr->ns = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64,
1470 					 &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
1471 		if (ctrlr->ns == NULL) {
1472 			rc = -ENOMEM;
1473 			goto fail;
1474 		}
1475 
1476 		ctrlr->nsdata = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64,
1477 					     &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
1478 		if (ctrlr->nsdata == NULL) {
1479 			rc = -ENOMEM;
1480 			goto fail;
1481 		}
1482 
1483 		ctrlr->num_ns = nn;
1484 	}
1485 
1486 	return 0;
1487 
1488 fail:
1489 	nvme_ctrlr_destruct_namespaces(ctrlr);
1490 	return rc;
1491 }
1492 
1493 static void
1494 nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
1495 {
1496 	struct nvme_async_event_request	*aer = arg;
1497 	struct spdk_nvme_ctrlr		*ctrlr = aer->ctrlr;
1498 	struct spdk_nvme_ctrlr_process	*active_proc;
1499 	union spdk_nvme_async_event_completion	event;
1500 	int					rc;
1501 
1502 	if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
1503 	    cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
1504 		/*
1505 		 *  This is simulated when controller is being shut down, to
1506 		 *  effectively abort outstanding asynchronous event requests
1507 		 *  and make sure all memory is freed.  Do not repost the
1508 		 *  request in this case.
1509 		 */
1510 		return;
1511 	}
1512 
1513 	if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
1514 	    cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
1515 		/*
1516 		 *  SPDK will only send as many AERs as the device says it supports,
1517 		 *  so this status code indicates an out-of-spec device.  Do not repost
1518 		 *  the request in this case.
1519 		 */
1520 		SPDK_ERRLOG("Controller appears out-of-spec for asynchronous event request\n"
1521 			    "handling.  Do not repost this AER.\n");
1522 		return;
1523 	}
1524 
1525 	event.raw = cpl->cdw0;
1526 	if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
1527 	    (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
1528 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
1529 		if (rc) {
1530 			return;
1531 		}
1532 		nvme_ctrlr_update_namespaces(ctrlr);
1533 	}
1534 
1535 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1536 	if (active_proc && active_proc->aer_cb_fn) {
1537 		active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
1538 	}
1539 
1540 	/* If the ctrlr is already shutdown, we should not send aer again */
1541 	if (ctrlr->is_shutdown) {
1542 		return;
1543 	}
1544 
1545 	/*
1546 	 * Repost another asynchronous event request to replace the one
1547 	 *  that just completed.
1548 	 */
1549 	if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
1550 		/*
1551 		 * We can't do anything to recover from a failure here,
1552 		 * so just print a warning message and leave the AER unsubmitted.
1553 		 */
1554 		SPDK_ERRLOG("resubmitting AER failed!\n");
1555 	}
1556 }
1557 
1558 static int
1559 nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
1560 				    struct nvme_async_event_request *aer)
1561 {
1562 	struct nvme_request *req;
1563 
1564 	aer->ctrlr = ctrlr;
1565 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
1566 	aer->req = req;
1567 	if (req == NULL) {
1568 		return -1;
1569 	}
1570 
1571 	req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1572 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
1573 }
1574 
1575 static void
1576 nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
1577 {
1578 	struct nvme_async_event_request		*aer;
1579 	int					rc;
1580 	uint32_t				i;
1581 	struct spdk_nvme_ctrlr *ctrlr =	(struct spdk_nvme_ctrlr *)arg;
1582 
1583 	if (spdk_nvme_cpl_is_error(cpl)) {
1584 		SPDK_NOTICELOG("nvme_ctrlr_configure_aer failed!\n");
1585 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
1586 				     ctrlr->opts.admin_timeout_ms);
1587 		return;
1588 	}
1589 
1590 	/* aerl is a zero-based value, so we need to add 1 here. */
1591 	ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
1592 
1593 	for (i = 0; i < ctrlr->num_aers; i++) {
1594 		aer = &ctrlr->aer[i];
1595 		rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
1596 		if (rc) {
1597 			SPDK_ERRLOG("nvme_ctrlr_construct_and_submit_aer failed!\n");
1598 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1599 			return;
1600 		}
1601 	}
1602 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
1603 			     ctrlr->opts.admin_timeout_ms);
1604 }
1605 
1606 static int
1607 nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
1608 {
1609 	union spdk_nvme_feat_async_event_configuration	config;
1610 	int						rc;
1611 
1612 	config.raw = 0;
1613 	config.bits.crit_warn.bits.available_spare = 1;
1614 	config.bits.crit_warn.bits.temperature = 1;
1615 	config.bits.crit_warn.bits.device_reliability = 1;
1616 	config.bits.crit_warn.bits.read_only = 1;
1617 	config.bits.crit_warn.bits.volatile_memory_backup = 1;
1618 
1619 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
1620 		if (ctrlr->cdata.oaes.ns_attribute_notices) {
1621 			config.bits.ns_attr_notice = 1;
1622 		}
1623 		if (ctrlr->cdata.oaes.fw_activation_notices) {
1624 			config.bits.fw_activation_notice = 1;
1625 		}
1626 	}
1627 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
1628 		config.bits.telemetry_log_notice = 1;
1629 	}
1630 
1631 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
1632 			     ctrlr->opts.admin_timeout_ms);
1633 
1634 	rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
1635 			nvme_ctrlr_configure_aer_done,
1636 			ctrlr);
1637 	if (rc != 0) {
1638 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1639 		return rc;
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 struct spdk_nvme_ctrlr_process *
1646 spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
1647 {
1648 	struct spdk_nvme_ctrlr_process	*active_proc;
1649 
1650 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
1651 		if (active_proc->pid == pid) {
1652 			return active_proc;
1653 		}
1654 	}
1655 
1656 	return NULL;
1657 }
1658 
1659 struct spdk_nvme_ctrlr_process *
1660 spdk_nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
1661 {
1662 	return spdk_nvme_ctrlr_get_process(ctrlr, getpid());
1663 }
1664 
1665 /**
1666  * This function will be called when a process is using the controller.
1667  *  1. For the primary process, it is called when constructing the controller.
1668  *  2. For the secondary process, it is called at probing the controller.
1669  * Note: will check whether the process is already added for the same process.
1670  */
1671 int
1672 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
1673 {
1674 	struct spdk_nvme_ctrlr_process	*ctrlr_proc;
1675 	pid_t				pid = getpid();
1676 
1677 	/* Check whether the process is already added or not */
1678 	if (spdk_nvme_ctrlr_get_process(ctrlr, pid)) {
1679 		return 0;
1680 	}
1681 
1682 	/* Initialize the per process properties for this ctrlr */
1683 	ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
1684 				  64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
1685 	if (ctrlr_proc == NULL) {
1686 		SPDK_ERRLOG("failed to allocate memory to track the process props\n");
1687 
1688 		return -1;
1689 	}
1690 
1691 	ctrlr_proc->is_primary = spdk_process_is_primary();
1692 	ctrlr_proc->pid = pid;
1693 	STAILQ_INIT(&ctrlr_proc->active_reqs);
1694 	ctrlr_proc->devhandle = devhandle;
1695 	ctrlr_proc->ref = 0;
1696 	TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
1697 
1698 	TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
1699 
1700 	return 0;
1701 }
1702 
1703 /**
1704  * This function will be called when the process detaches the controller.
1705  * Note: the ctrlr_lock must be held when calling this function.
1706  */
1707 static void
1708 nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
1709 			  struct spdk_nvme_ctrlr_process *proc)
1710 {
1711 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
1712 
1713 	assert(STAILQ_EMPTY(&proc->active_reqs));
1714 
1715 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
1716 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1717 	}
1718 
1719 	TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
1720 
1721 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
1722 		spdk_pci_device_detach(proc->devhandle);
1723 	}
1724 
1725 	spdk_dma_free(proc);
1726 }
1727 
1728 /**
1729  * This function will be called when the process exited unexpectedly
1730  *  in order to free any incomplete nvme request, allocated IO qpairs
1731  *  and allocated memory.
1732  * Note: the ctrlr_lock must be held when calling this function.
1733  */
1734 static void
1735 nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
1736 {
1737 	struct nvme_request	*req, *tmp_req;
1738 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
1739 
1740 	STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
1741 		STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
1742 
1743 		assert(req->pid == proc->pid);
1744 
1745 		nvme_free_request(req);
1746 	}
1747 
1748 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
1749 		TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
1750 
1751 		/*
1752 		 * The process may have been killed while some qpairs were in their
1753 		 *  completion context.  Clear that flag here to allow these IO
1754 		 *  qpairs to be deleted.
1755 		 */
1756 		qpair->in_completion_context = 0;
1757 
1758 		qpair->no_deletion_notification_needed = 1;
1759 
1760 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1761 	}
1762 
1763 	spdk_dma_free(proc);
1764 }
1765 
1766 /**
1767  * This function will be called when destructing the controller.
1768  *  1. There is no more admin request on this controller.
1769  *  2. Clean up any left resource allocation when its associated process is gone.
1770  */
1771 void
1772 nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
1773 {
1774 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
1775 
1776 	/* Free all the processes' properties and make sure no pending admin IOs */
1777 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
1778 		TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
1779 
1780 		assert(STAILQ_EMPTY(&active_proc->active_reqs));
1781 
1782 		spdk_free(active_proc);
1783 	}
1784 }
1785 
1786 /**
1787  * This function will be called when any other process attaches or
1788  *  detaches the controller in order to cleanup those unexpectedly
1789  *  terminated processes.
1790  * Note: the ctrlr_lock must be held when calling this function.
1791  */
1792 static int
1793 nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
1794 {
1795 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
1796 	int				active_proc_count = 0;
1797 
1798 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
1799 		if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
1800 			SPDK_ERRLOG("process %d terminated unexpected\n", active_proc->pid);
1801 
1802 			TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
1803 
1804 			nvme_ctrlr_cleanup_process(active_proc);
1805 		} else {
1806 			active_proc_count++;
1807 		}
1808 	}
1809 
1810 	return active_proc_count;
1811 }
1812 
1813 void
1814 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
1815 {
1816 	struct spdk_nvme_ctrlr_process	*active_proc;
1817 
1818 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1819 
1820 	nvme_ctrlr_remove_inactive_proc(ctrlr);
1821 
1822 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1823 	if (active_proc) {
1824 		active_proc->ref++;
1825 	}
1826 
1827 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1828 }
1829 
1830 void
1831 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
1832 {
1833 	struct spdk_nvme_ctrlr_process	*active_proc;
1834 	int				proc_count;
1835 
1836 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1837 
1838 	proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
1839 
1840 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1841 	if (active_proc) {
1842 		active_proc->ref--;
1843 		assert(active_proc->ref >= 0);
1844 
1845 		/*
1846 		 * The last active process will be removed at the end of
1847 		 * the destruction of the controller.
1848 		 */
1849 		if (active_proc->ref == 0 && proc_count != 1) {
1850 			nvme_ctrlr_remove_process(ctrlr, active_proc);
1851 		}
1852 	}
1853 
1854 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1855 }
1856 
1857 int
1858 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
1859 {
1860 	struct spdk_nvme_ctrlr_process	*active_proc;
1861 	int				ref = 0;
1862 
1863 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1864 
1865 	nvme_ctrlr_remove_inactive_proc(ctrlr);
1866 
1867 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
1868 		ref += active_proc->ref;
1869 	}
1870 
1871 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1872 
1873 	return ref;
1874 }
1875 
1876 /**
1877  *  Get the PCI device handle which is only visible to its associated process.
1878  */
1879 struct spdk_pci_device *
1880 nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
1881 {
1882 	struct spdk_nvme_ctrlr_process	*active_proc;
1883 	struct spdk_pci_device		*devhandle = NULL;
1884 
1885 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1886 
1887 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1888 	if (active_proc) {
1889 		devhandle = active_proc->devhandle;
1890 	}
1891 
1892 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1893 
1894 	return devhandle;
1895 }
1896 
1897 static void
1898 nvme_ctrlr_enable_admin_queue(struct spdk_nvme_ctrlr *ctrlr)
1899 {
1900 	nvme_transport_qpair_reset(ctrlr->adminq);
1901 	nvme_qpair_enable(ctrlr->adminq);
1902 }
1903 
1904 /**
1905  * This function will be called repeatedly during initialization until the controller is ready.
1906  */
1907 int
1908 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
1909 {
1910 	union spdk_nvme_cc_register cc;
1911 	union spdk_nvme_csts_register csts;
1912 	uint32_t ready_timeout_in_ms;
1913 	int rc = 0;
1914 
1915 	/*
1916 	 * May need to avoid accessing any register on the target controller
1917 	 * for a while. Return early without touching the FSM.
1918 	 * Check sleep_timeout_tsc > 0 for unit test.
1919 	 */
1920 	if ((ctrlr->sleep_timeout_tsc > 0) &&
1921 	    (spdk_get_ticks() <= ctrlr->sleep_timeout_tsc)) {
1922 		return 0;
1923 	}
1924 	ctrlr->sleep_timeout_tsc = 0;
1925 
1926 	if (nvme_ctrlr_get_cc(ctrlr, &cc) ||
1927 	    nvme_ctrlr_get_csts(ctrlr, &csts)) {
1928 		if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
1929 			/* While a device is resetting, it may be unable to service MMIO reads
1930 			 * temporarily. Allow for this case.
1931 			 */
1932 			SPDK_ERRLOG("Get registers failed while waiting for CSTS.RDY == 0\n");
1933 			goto init_timeout;
1934 		}
1935 		SPDK_ERRLOG("Failed to read CC and CSTS in state %d\n", ctrlr->state);
1936 		nvme_ctrlr_fail(ctrlr, false);
1937 		return -EIO;
1938 	}
1939 
1940 	ready_timeout_in_ms = 500 * ctrlr->cap.bits.to;
1941 
1942 	/*
1943 	 * Check if the current initialization step is done or has timed out.
1944 	 */
1945 	switch (ctrlr->state) {
1946 	case NVME_CTRLR_STATE_INIT_DELAY:
1947 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
1948 		/*
1949 		 * Controller may need some delay before it's enabled.
1950 		 *
1951 		 * This is a workaround for an issue where the PCIe-attached NVMe controller
1952 		 * is not ready after VFIO reset. We delay the initialization rather than the
1953 		 * enabling itself, because this is required only for the very first enabling
1954 		 * - directly after a VFIO reset.
1955 		 *
1956 		 * TODO: Figure out what is actually going wrong.
1957 		 */
1958 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Adding 2 second delay before initializing the controller\n");
1959 		ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2000 * spdk_get_ticks_hz() / 1000);
1960 		break;
1961 
1962 	case NVME_CTRLR_STATE_INIT:
1963 		/* Begin the hardware initialization by making sure the controller is disabled. */
1964 		if (cc.bits.en) {
1965 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1\n");
1966 			/*
1967 			 * Controller is currently enabled. We need to disable it to cause a reset.
1968 			 *
1969 			 * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
1970 			 *  Wait for the ready bit to be 1 before disabling the controller.
1971 			 */
1972 			if (csts.bits.rdy == 0) {
1973 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
1974 				nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
1975 				return 0;
1976 			}
1977 
1978 			/* CC.EN = 1 && CSTS.RDY == 1, so we can immediately disable the controller. */
1979 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
1980 			cc.bits.en = 0;
1981 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
1982 				SPDK_ERRLOG("set_cc() failed\n");
1983 				nvme_ctrlr_fail(ctrlr, false);
1984 				return -EIO;
1985 			}
1986 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
1987 
1988 			/*
1989 			 * Wait 2.5 seconds before accessing PCI registers.
1990 			 * Not using sleep() to avoid blocking other controller's initialization.
1991 			 */
1992 			if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
1993 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "Applying quirk: delay 2.5 seconds before reading registers\n");
1994 				ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
1995 			}
1996 			return 0;
1997 		} else {
1998 			if (csts.bits.rdy == 1) {
1999 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 1 - waiting for shutdown to complete\n");
2000 			}
2001 
2002 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2003 			return 0;
2004 		}
2005 		break;
2006 
2007 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
2008 		if (csts.bits.rdy == 1) {
2009 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - disabling controller\n");
2010 			/* CC.EN = 1 && CSTS.RDY = 1, so we can set CC.EN = 0 now. */
2011 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
2012 			cc.bits.en = 0;
2013 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
2014 				SPDK_ERRLOG("set_cc() failed\n");
2015 				nvme_ctrlr_fail(ctrlr, false);
2016 				return -EIO;
2017 			}
2018 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2019 			return 0;
2020 		}
2021 		break;
2022 
2023 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
2024 		if (csts.bits.rdy == 0) {
2025 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 0\n");
2026 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
2027 			/*
2028 			 * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
2029 			 *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
2030 			 */
2031 			spdk_delay_us(100);
2032 			return 0;
2033 		}
2034 		break;
2035 
2036 	case NVME_CTRLR_STATE_ENABLE:
2037 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 1\n");
2038 		rc = nvme_ctrlr_enable(ctrlr);
2039 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
2040 		return rc;
2041 
2042 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
2043 		if (csts.bits.rdy == 1) {
2044 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
2045 			/*
2046 			 * The controller has been enabled.
2047 			 *  Perform the rest of initialization serially.
2048 			 */
2049 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE,
2050 					     ctrlr->opts.admin_timeout_ms);
2051 			return 0;
2052 		}
2053 		break;
2054 
2055 	case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE:
2056 		nvme_ctrlr_enable_admin_queue(ctrlr);
2057 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY,
2058 				     ctrlr->opts.admin_timeout_ms);
2059 		break;
2060 
2061 	case NVME_CTRLR_STATE_IDENTIFY:
2062 		rc = nvme_ctrlr_identify(ctrlr);
2063 		break;
2064 
2065 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
2066 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2067 		break;
2068 
2069 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
2070 		rc = nvme_ctrlr_set_num_queues(ctrlr);
2071 		break;
2072 
2073 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
2074 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2075 		break;
2076 
2077 	case NVME_CTRLR_STATE_GET_NUM_QUEUES:
2078 		rc = nvme_ctrlr_get_num_queues(ctrlr);
2079 		break;
2080 
2081 	case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES:
2082 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2083 		break;
2084 
2085 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
2086 		rc = nvme_ctrlr_construct_namespaces(ctrlr);
2087 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
2088 				     ctrlr->opts.admin_timeout_ms);
2089 		break;
2090 
2091 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
2092 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
2093 		if (rc < 0) {
2094 			nvme_ctrlr_destruct_namespaces(ctrlr);
2095 		}
2096 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS,
2097 				     ctrlr->opts.admin_timeout_ms);
2098 		break;
2099 
2100 	case NVME_CTRLR_STATE_IDENTIFY_NS:
2101 		rc = nvme_ctrlr_identify_namespaces(ctrlr);
2102 		break;
2103 
2104 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
2105 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2106 		break;
2107 
2108 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
2109 		rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
2110 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
2111 				     ctrlr->opts.admin_timeout_ms);
2112 		break;
2113 
2114 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
2115 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2116 		break;
2117 
2118 	case NVME_CTRLR_STATE_CONFIGURE_AER:
2119 		rc = nvme_ctrlr_configure_aer(ctrlr);
2120 		break;
2121 
2122 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
2123 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2124 		break;
2125 
2126 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
2127 		rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
2128 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
2129 				     ctrlr->opts.admin_timeout_ms);
2130 		break;
2131 
2132 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
2133 		nvme_ctrlr_set_supported_features(ctrlr);
2134 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
2135 				     ctrlr->opts.admin_timeout_ms);
2136 		break;
2137 
2138 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
2139 		rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
2140 		break;
2141 
2142 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
2143 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2144 		break;
2145 
2146 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
2147 		rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
2148 		break;
2149 
2150 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
2151 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2152 		break;
2153 
2154 	case NVME_CTRLR_STATE_SET_HOST_ID:
2155 		rc = nvme_ctrlr_set_host_id(ctrlr);
2156 		break;
2157 
2158 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
2159 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2160 		break;
2161 
2162 	case NVME_CTRLR_STATE_READY:
2163 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Ctrlr already in ready state\n");
2164 		return 0;
2165 
2166 	case NVME_CTRLR_STATE_ERROR:
2167 		SPDK_ERRLOG("Ctrlr %s is in error state\n", ctrlr->trid.traddr);
2168 		return -1;
2169 
2170 	default:
2171 		assert(0);
2172 		nvme_ctrlr_fail(ctrlr, false);
2173 		return -1;
2174 	}
2175 
2176 init_timeout:
2177 	if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
2178 	    spdk_get_ticks() > ctrlr->state_timeout_tsc) {
2179 		SPDK_ERRLOG("Initialization timed out in state %d\n", ctrlr->state);
2180 		nvme_ctrlr_fail(ctrlr, false);
2181 		return -1;
2182 	}
2183 
2184 	return rc;
2185 }
2186 
2187 int
2188 nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
2189 {
2190 	pthread_mutexattr_t attr;
2191 	int rc = 0;
2192 
2193 	if (pthread_mutexattr_init(&attr)) {
2194 		return -1;
2195 	}
2196 	if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
2197 #ifndef __FreeBSD__
2198 	    pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
2199 	    pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
2200 #endif
2201 	    pthread_mutex_init(mtx, &attr)) {
2202 		rc = -1;
2203 	}
2204 	pthread_mutexattr_destroy(&attr);
2205 	return rc;
2206 }
2207 
2208 int
2209 nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
2210 {
2211 	int rc;
2212 
2213 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
2214 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
2215 	} else {
2216 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
2217 	}
2218 
2219 	ctrlr->flags = 0;
2220 	ctrlr->free_io_qids = NULL;
2221 	ctrlr->is_resetting = false;
2222 	ctrlr->is_failed = false;
2223 	ctrlr->is_shutdown = false;
2224 
2225 	TAILQ_INIT(&ctrlr->active_io_qpairs);
2226 	STAILQ_INIT(&ctrlr->queued_aborts);
2227 	ctrlr->outstanding_aborts = 0;
2228 
2229 	rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
2230 	if (rc != 0) {
2231 		return rc;
2232 	}
2233 
2234 	TAILQ_INIT(&ctrlr->active_procs);
2235 
2236 	return rc;
2237 }
2238 
2239 /* This function should be called once at ctrlr initialization to set up constant properties. */
2240 void
2241 nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
2242 		    const union spdk_nvme_vs_register *vs)
2243 {
2244 	ctrlr->cap = *cap;
2245 	ctrlr->vs = *vs;
2246 
2247 	ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
2248 
2249 	/* For now, always select page_size == min_page_size. */
2250 	ctrlr->page_size = ctrlr->min_page_size;
2251 
2252 	ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
2253 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
2254 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
2255 
2256 	ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
2257 }
2258 
2259 void
2260 nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
2261 {
2262 	pthread_mutex_destroy(&ctrlr->ctrlr_lock);
2263 }
2264 
2265 void
2266 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
2267 {
2268 	struct spdk_nvme_qpair *qpair, *tmp;
2269 
2270 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Prepare to destruct SSD: %s\n", ctrlr->trid.traddr);
2271 	TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
2272 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2273 	}
2274 
2275 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
2276 
2277 	nvme_ctrlr_shutdown(ctrlr);
2278 
2279 	nvme_ctrlr_destruct_namespaces(ctrlr);
2280 
2281 	spdk_bit_array_free(&ctrlr->free_io_qids);
2282 
2283 	nvme_transport_ctrlr_destruct(ctrlr);
2284 }
2285 
2286 int
2287 nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
2288 				struct nvme_request *req)
2289 {
2290 	return nvme_qpair_submit_request(ctrlr->adminq, req);
2291 }
2292 
2293 static void
2294 nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
2295 {
2296 	/* Do nothing */
2297 }
2298 
2299 /*
2300  * Check if we need to send a Keep Alive command.
2301  * Caller must hold ctrlr->ctrlr_lock.
2302  */
2303 static void
2304 nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
2305 {
2306 	uint64_t now;
2307 	struct nvme_request *req;
2308 	struct spdk_nvme_cmd *cmd;
2309 	int rc;
2310 
2311 	now = spdk_get_ticks();
2312 	if (now < ctrlr->next_keep_alive_tick) {
2313 		return;
2314 	}
2315 
2316 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
2317 	if (req == NULL) {
2318 		return;
2319 	}
2320 
2321 	cmd = &req->cmd;
2322 	cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
2323 
2324 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
2325 	if (rc != 0) {
2326 		SPDK_ERRLOG("Submitting Keep Alive failed\n");
2327 	}
2328 
2329 	ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
2330 }
2331 
2332 int32_t
2333 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
2334 {
2335 	int32_t num_completions;
2336 
2337 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2338 	if (ctrlr->keep_alive_interval_ticks) {
2339 		nvme_ctrlr_keep_alive(ctrlr);
2340 	}
2341 	num_completions = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2342 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2343 
2344 	return num_completions;
2345 }
2346 
2347 const struct spdk_nvme_ctrlr_data *
2348 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
2349 {
2350 	return &ctrlr->cdata;
2351 }
2352 
2353 union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
2354 {
2355 	union spdk_nvme_csts_register csts;
2356 
2357 	if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
2358 		csts.raw = 0xFFFFFFFFu;
2359 	}
2360 	return csts;
2361 }
2362 
2363 union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
2364 {
2365 	return ctrlr->cap;
2366 }
2367 
2368 union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
2369 {
2370 	return ctrlr->vs;
2371 }
2372 
2373 union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr)
2374 {
2375 	union spdk_nvme_cmbsz_register cmbsz;
2376 
2377 	if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) {
2378 		cmbsz.raw = 0;
2379 	}
2380 
2381 	return cmbsz;
2382 }
2383 
2384 uint32_t
2385 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
2386 {
2387 	return ctrlr->num_ns;
2388 }
2389 
2390 static int32_t
2391 spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2392 {
2393 	int32_t result = -1;
2394 
2395 	if (ctrlr->active_ns_list == NULL || nsid == 0 || nsid > ctrlr->num_ns) {
2396 		return result;
2397 	}
2398 
2399 	int32_t lower = 0;
2400 	int32_t upper = ctrlr->num_ns - 1;
2401 	int32_t mid;
2402 
2403 	while (lower <= upper) {
2404 		mid = lower + (upper - lower) / 2;
2405 		if (ctrlr->active_ns_list[mid] == nsid) {
2406 			result = mid;
2407 			break;
2408 		} else {
2409 			if (ctrlr->active_ns_list[mid] != 0 && ctrlr->active_ns_list[mid] < nsid) {
2410 				lower = mid + 1;
2411 			} else {
2412 				upper = mid - 1;
2413 			}
2414 
2415 		}
2416 	}
2417 
2418 	return result;
2419 }
2420 
2421 bool
2422 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2423 {
2424 	return spdk_nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1;
2425 }
2426 
2427 uint32_t
2428 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
2429 {
2430 	return ctrlr->active_ns_list ? ctrlr->active_ns_list[0] : 0;
2431 }
2432 
2433 uint32_t
2434 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
2435 {
2436 	int32_t nsid_idx = spdk_nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid);
2437 	if (ctrlr->active_ns_list && nsid_idx >= 0 && (uint32_t)nsid_idx < ctrlr->num_ns - 1) {
2438 		return ctrlr->active_ns_list[nsid_idx + 1];
2439 	}
2440 	return 0;
2441 }
2442 
2443 struct spdk_nvme_ns *
2444 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2445 {
2446 	if (nsid < 1 || nsid > ctrlr->num_ns) {
2447 		return NULL;
2448 	}
2449 
2450 	return &ctrlr->ns[nsid - 1];
2451 }
2452 
2453 struct spdk_pci_device *
2454 spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
2455 {
2456 	if (ctrlr == NULL) {
2457 		return NULL;
2458 	}
2459 
2460 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
2461 		return NULL;
2462 	}
2463 
2464 	return nvme_ctrlr_proc_get_devhandle(ctrlr);
2465 }
2466 
2467 uint32_t
2468 spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
2469 {
2470 	return ctrlr->max_xfer_size;
2471 }
2472 
2473 void
2474 spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
2475 				      spdk_nvme_aer_cb aer_cb_fn,
2476 				      void *aer_cb_arg)
2477 {
2478 	struct spdk_nvme_ctrlr_process *active_proc;
2479 
2480 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2481 
2482 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2483 	if (active_proc) {
2484 		active_proc->aer_cb_fn = aer_cb_fn;
2485 		active_proc->aer_cb_arg = aer_cb_arg;
2486 	}
2487 
2488 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2489 }
2490 
2491 void
2492 spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
2493 		uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)
2494 {
2495 	struct spdk_nvme_ctrlr_process	*active_proc;
2496 
2497 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2498 
2499 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2500 	if (active_proc) {
2501 		active_proc->timeout_ticks = timeout_us * spdk_get_ticks_hz() / 1000000ULL;
2502 		active_proc->timeout_cb_fn = cb_fn;
2503 		active_proc->timeout_cb_arg = cb_arg;
2504 	}
2505 
2506 	ctrlr->timeout_enabled = true;
2507 
2508 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2509 }
2510 
2511 bool
2512 spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
2513 {
2514 	/* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
2515 	SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
2516 	return ctrlr->log_page_supported[log_page];
2517 }
2518 
2519 bool
2520 spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
2521 {
2522 	/* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
2523 	SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
2524 	return ctrlr->feature_supported[feature_code];
2525 }
2526 
2527 int
2528 spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2529 			  struct spdk_nvme_ctrlr_list *payload)
2530 {
2531 	struct nvme_completion_poll_status	status;
2532 	int					res;
2533 	struct spdk_nvme_ns			*ns;
2534 
2535 	res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
2536 				       nvme_completion_poll_cb, &status);
2537 	if (res) {
2538 		return res;
2539 	}
2540 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2541 		SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
2542 		return -ENXIO;
2543 	}
2544 
2545 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2546 	if (res) {
2547 		return res;
2548 	}
2549 
2550 	ns = &ctrlr->ns[nsid - 1];
2551 	return nvme_ns_construct(ns, nsid, ctrlr);
2552 }
2553 
2554 int
2555 spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2556 			  struct spdk_nvme_ctrlr_list *payload)
2557 {
2558 	struct nvme_completion_poll_status	status;
2559 	int					res;
2560 	struct spdk_nvme_ns			*ns;
2561 
2562 	res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
2563 				       nvme_completion_poll_cb, &status);
2564 	if (res) {
2565 		return res;
2566 	}
2567 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2568 		SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
2569 		return -ENXIO;
2570 	}
2571 
2572 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2573 	if (res) {
2574 		return res;
2575 	}
2576 
2577 	ns = &ctrlr->ns[nsid - 1];
2578 	/* Inactive NS */
2579 	nvme_ns_destruct(ns);
2580 
2581 	return 0;
2582 }
2583 
2584 uint32_t
2585 spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
2586 {
2587 	struct nvme_completion_poll_status	status;
2588 	int					res;
2589 	uint32_t				nsid;
2590 	struct spdk_nvme_ns			*ns;
2591 
2592 	res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status);
2593 	if (res) {
2594 		return 0;
2595 	}
2596 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2597 		SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
2598 		return 0;
2599 	}
2600 
2601 	nsid = status.cpl.cdw0;
2602 	ns = &ctrlr->ns[nsid - 1];
2603 	/* Inactive NS */
2604 	res = nvme_ns_construct(ns, nsid, ctrlr);
2605 	if (res) {
2606 		return 0;
2607 	}
2608 
2609 	/* Return the namespace ID that was created */
2610 	return nsid;
2611 }
2612 
2613 int
2614 spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2615 {
2616 	struct nvme_completion_poll_status	status;
2617 	int					res;
2618 	struct spdk_nvme_ns			*ns;
2619 
2620 	res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status);
2621 	if (res) {
2622 		return res;
2623 	}
2624 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2625 		SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
2626 		return -ENXIO;
2627 	}
2628 
2629 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2630 	if (res) {
2631 		return res;
2632 	}
2633 
2634 	ns = &ctrlr->ns[nsid - 1];
2635 	nvme_ns_destruct(ns);
2636 
2637 	return 0;
2638 }
2639 
2640 int
2641 spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2642 		       struct spdk_nvme_format *format)
2643 {
2644 	struct nvme_completion_poll_status	status;
2645 	int					res;
2646 
2647 	res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
2648 				    &status);
2649 	if (res) {
2650 		return res;
2651 	}
2652 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2653 		SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
2654 		return -ENXIO;
2655 	}
2656 
2657 	return spdk_nvme_ctrlr_reset(ctrlr);
2658 }
2659 
2660 int
2661 spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
2662 				int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
2663 {
2664 	struct spdk_nvme_fw_commit		fw_commit;
2665 	struct nvme_completion_poll_status	status;
2666 	int					res;
2667 	unsigned int				size_remaining;
2668 	unsigned int				offset;
2669 	unsigned int				transfer;
2670 	void					*p;
2671 
2672 	if (!completion_status) {
2673 		return -EINVAL;
2674 	}
2675 	memset(completion_status, 0, sizeof(struct spdk_nvme_status));
2676 	if (size % 4) {
2677 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid size!\n");
2678 		return -1;
2679 	}
2680 
2681 	/* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
2682 	 * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
2683 	 */
2684 	if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
2685 	    (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
2686 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid command!\n");
2687 		return -1;
2688 	}
2689 
2690 	/* Firmware download */
2691 	size_remaining = size;
2692 	offset = 0;
2693 	p = payload;
2694 
2695 	while (size_remaining > 0) {
2696 		transfer = spdk_min(size_remaining, ctrlr->min_page_size);
2697 
2698 		res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
2699 						       nvme_completion_poll_cb,
2700 						       &status);
2701 		if (res) {
2702 			return res;
2703 		}
2704 
2705 		if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2706 			SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
2707 			return -ENXIO;
2708 		}
2709 		p += transfer;
2710 		offset += transfer;
2711 		size_remaining -= transfer;
2712 	}
2713 
2714 	/* Firmware commit */
2715 	memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
2716 	fw_commit.fs = slot;
2717 	fw_commit.ca = commit_action;
2718 
2719 	res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
2720 				       &status);
2721 	if (res) {
2722 		return res;
2723 	}
2724 
2725 	res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock);
2726 
2727 	memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status));
2728 
2729 	if (res) {
2730 		if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
2731 		    status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
2732 			if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
2733 			    status.cpl.status.sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
2734 				SPDK_NOTICELOG("firmware activation requires conventional reset to be performed. !\n");
2735 			} else {
2736 				SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n");
2737 			}
2738 			return -ENXIO;
2739 		}
2740 	}
2741 
2742 	return spdk_nvme_ctrlr_reset(ctrlr);
2743 }
2744 
2745 void *
2746 spdk_nvme_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
2747 {
2748 	void *buf;
2749 
2750 	if (size == 0) {
2751 		return NULL;
2752 	}
2753 
2754 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2755 	buf = nvme_transport_ctrlr_alloc_cmb_io_buffer(ctrlr, size);
2756 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2757 
2758 	return buf;
2759 }
2760 
2761 void
2762 spdk_nvme_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
2763 {
2764 	if (buf && size) {
2765 		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2766 		nvme_transport_ctrlr_free_cmb_io_buffer(ctrlr, buf, size);
2767 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2768 	}
2769 }
2770 
2771 bool
2772 spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
2773 {
2774 	assert(ctrlr);
2775 
2776 	return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
2777 			strlen(SPDK_NVMF_DISCOVERY_NQN));
2778 }
2779 
2780 int
2781 spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
2782 				 uint16_t spsp, uint8_t nssf, void *payload, size_t size)
2783 {
2784 	struct nvme_completion_poll_status	status;
2785 	int					res;
2786 
2787 	res = nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
2788 					      nvme_completion_poll_cb, &status);
2789 	if (res) {
2790 		return res;
2791 	}
2792 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2793 		SPDK_ERRLOG("spdk_nvme_ctrlr_security_receive failed!\n");
2794 		return -ENXIO;
2795 	}
2796 
2797 	return 0;
2798 }
2799 
2800 int
2801 spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
2802 			      uint16_t spsp, uint8_t nssf, void *payload, size_t size)
2803 {
2804 	struct nvme_completion_poll_status	status;
2805 	int					res;
2806 
2807 	res = nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size, nvme_completion_poll_cb,
2808 					   &status);
2809 	if (res) {
2810 		return res;
2811 	}
2812 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2813 		SPDK_ERRLOG("spdk_nvme_ctrlr_security_send failed!\n");
2814 		return -ENXIO;
2815 	}
2816 
2817 	return 0;
2818 }
2819 
2820 uint64_t
2821 spdk_nvme_ctrlr_get_flags(struct spdk_nvme_ctrlr *ctrlr)
2822 {
2823 	return ctrlr->flags;
2824 }
2825