xref: /spdk/lib/nvme/nvme_ctrlr.c (revision 9889ab2dc80e40dae92dcef361d53dcba722043d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvme_internal.h"
37 #include "nvme_io_msg.h"
38 
39 #include "spdk/env.h"
40 #include "spdk/string.h"
41 
42 static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
43 		struct nvme_async_event_request *aer);
44 static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
45 static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
46 
47 static int
48 nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
49 {
50 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
51 					      &cc->raw);
52 }
53 
54 static int
55 nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
56 {
57 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
58 					      &csts->raw);
59 }
60 
61 int
62 nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
63 {
64 	return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
65 					      &cap->raw);
66 }
67 
68 int
69 nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
70 {
71 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
72 					      &vs->raw);
73 }
74 
75 static int
76 nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
77 {
78 	return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
79 					      cc->raw);
80 }
81 
82 int
83 nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
84 {
85 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
86 					      &cmbsz->raw);
87 }
88 
89 volatile struct spdk_nvme_registers *
90 spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
91 {
92 	return nvme_transport_ctrlr_get_registers(ctrlr);
93 }
94 
95 void
96 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
97 {
98 	char host_id_str[SPDK_UUID_STRING_LEN];
99 
100 	assert(opts);
101 
102 	memset(opts, 0, opts_size);
103 
104 #define FIELD_OK(field) \
105 	offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
106 
107 	if (FIELD_OK(num_io_queues)) {
108 		opts->num_io_queues = DEFAULT_MAX_IO_QUEUES;
109 	}
110 
111 	if (FIELD_OK(use_cmb_sqs)) {
112 		opts->use_cmb_sqs = true;
113 	}
114 
115 	if (FIELD_OK(no_shn_notification)) {
116 		opts->no_shn_notification = false;
117 	}
118 
119 	if (FIELD_OK(arb_mechanism)) {
120 		opts->arb_mechanism = SPDK_NVME_CC_AMS_RR;
121 	}
122 
123 	if (FIELD_OK(keep_alive_timeout_ms)) {
124 		opts->keep_alive_timeout_ms = MIN_KEEP_ALIVE_TIMEOUT_IN_MS;
125 	}
126 
127 	if (FIELD_OK(transport_retry_count)) {
128 		opts->transport_retry_count = SPDK_NVME_DEFAULT_RETRY_COUNT;
129 	}
130 
131 	if (FIELD_OK(io_queue_size)) {
132 		opts->io_queue_size = DEFAULT_IO_QUEUE_SIZE;
133 	}
134 
135 	if (FIELD_OK(io_queue_requests)) {
136 		opts->io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
137 	}
138 
139 	if (FIELD_OK(host_id)) {
140 		memset(opts->host_id, 0, sizeof(opts->host_id));
141 	}
142 
143 	if (nvme_driver_init() == 0) {
144 		if (FIELD_OK(extended_host_id)) {
145 			memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
146 			       sizeof(opts->extended_host_id));
147 		}
148 
149 		if (FIELD_OK(hostnqn)) {
150 			spdk_uuid_fmt_lower(host_id_str, sizeof(host_id_str),
151 					    &g_spdk_nvme_driver->default_extended_host_id);
152 			snprintf(opts->hostnqn, sizeof(opts->hostnqn), "2014-08.org.nvmexpress:uuid:%s", host_id_str);
153 		}
154 	}
155 
156 	if (FIELD_OK(src_addr)) {
157 		memset(opts->src_addr, 0, sizeof(opts->src_addr));
158 	}
159 
160 	if (FIELD_OK(src_svcid)) {
161 		memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
162 	}
163 
164 	if (FIELD_OK(command_set)) {
165 		opts->command_set = SPDK_NVME_CC_CSS_NVM;
166 	}
167 
168 	if (FIELD_OK(admin_timeout_ms)) {
169 		opts->admin_timeout_ms = NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000;
170 	}
171 
172 	if (FIELD_OK(header_digest)) {
173 		opts->header_digest = false;
174 	}
175 
176 	if (FIELD_OK(data_digest)) {
177 		opts->data_digest = false;
178 	}
179 
180 	if (FIELD_OK(disable_error_logging)) {
181 		opts->disable_error_logging = false;
182 	}
183 #undef FIELD_OK
184 }
185 
186 /**
187  * This function will be called when the process allocates the IO qpair.
188  * Note: the ctrlr_lock must be held when calling this function.
189  */
190 static void
191 nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
192 {
193 	struct spdk_nvme_ctrlr_process	*active_proc;
194 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
195 
196 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
197 	if (active_proc) {
198 		TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
199 		qpair->active_proc = active_proc;
200 	}
201 }
202 
203 /**
204  * This function will be called when the process frees the IO qpair.
205  * Note: the ctrlr_lock must be held when calling this function.
206  */
207 static void
208 nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
209 {
210 	struct spdk_nvme_ctrlr_process	*active_proc;
211 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
212 	struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
213 
214 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
215 	if (!active_proc) {
216 		return;
217 	}
218 
219 	TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
220 			   per_process_tailq, tmp_qpair) {
221 		if (active_qpair == qpair) {
222 			TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
223 				     active_qpair, per_process_tailq);
224 
225 			break;
226 		}
227 	}
228 }
229 
230 void
231 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
232 		struct spdk_nvme_io_qpair_opts *opts,
233 		size_t opts_size)
234 {
235 	assert(ctrlr);
236 
237 	assert(opts);
238 
239 	memset(opts, 0, opts_size);
240 
241 #define FIELD_OK(field) \
242 	offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
243 
244 	if (FIELD_OK(qprio)) {
245 		opts->qprio = SPDK_NVME_QPRIO_URGENT;
246 	}
247 
248 	if (FIELD_OK(io_queue_size)) {
249 		opts->io_queue_size = ctrlr->opts.io_queue_size;
250 	}
251 
252 	if (FIELD_OK(io_queue_requests)) {
253 		opts->io_queue_requests = ctrlr->opts.io_queue_requests;
254 	}
255 
256 	if (FIELD_OK(delay_pcie_doorbell)) {
257 		opts->delay_pcie_doorbell = false;
258 	}
259 
260 	if (FIELD_OK(sq.vaddr)) {
261 		opts->sq.vaddr = NULL;
262 	}
263 
264 	if (FIELD_OK(sq.paddr)) {
265 		opts->sq.paddr = 0;
266 	}
267 
268 	if (FIELD_OK(sq.buffer_size)) {
269 		opts->sq.buffer_size = 0;
270 	}
271 
272 	if (FIELD_OK(cq.vaddr)) {
273 		opts->cq.vaddr = NULL;
274 	}
275 
276 	if (FIELD_OK(cq.paddr)) {
277 		opts->cq.paddr = 0;
278 	}
279 
280 	if (FIELD_OK(cq.buffer_size)) {
281 		opts->cq.buffer_size = 0;
282 	}
283 
284 #undef FIELD_OK
285 }
286 
287 struct spdk_nvme_qpair *
288 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
289 			       const struct spdk_nvme_io_qpair_opts *user_opts,
290 			       size_t opts_size)
291 {
292 	uint32_t				qid;
293 	struct spdk_nvme_qpair			*qpair;
294 	union spdk_nvme_cc_register		cc;
295 	struct spdk_nvme_io_qpair_opts		opts;
296 
297 	if (!ctrlr) {
298 		return NULL;
299 	}
300 
301 	/*
302 	 * Get the default options, then overwrite them with the user-provided options
303 	 * up to opts_size.
304 	 *
305 	 * This allows for extensions of the opts structure without breaking
306 	 * ABI compatibility.
307 	 */
308 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
309 	if (user_opts) {
310 		memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
311 
312 		/* If user passes buffers, make sure they're big enough for the requested queue size */
313 		if (opts.sq.vaddr) {
314 			if (opts.sq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cmd))) {
315 				SPDK_ERRLOG("sq buffer size %lx is too small for sq size %lx\n",
316 					    opts.sq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cmd)));
317 				return NULL;
318 			}
319 		}
320 		if (opts.cq.vaddr) {
321 			if (opts.cq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cpl))) {
322 				SPDK_ERRLOG("cq buffer size %lx is too small for cq size %lx\n",
323 					    opts.cq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cpl)));
324 				return NULL;
325 			}
326 		}
327 	}
328 
329 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
330 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
331 		SPDK_ERRLOG("get_cc failed\n");
332 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
333 		return NULL;
334 	}
335 
336 	/* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */
337 	if ((opts.qprio & 3) != opts.qprio) {
338 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
339 		return NULL;
340 	}
341 
342 	/*
343 	 * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
344 	 * default round robin arbitration method.
345 	 */
346 	if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts.qprio != SPDK_NVME_QPRIO_URGENT)) {
347 		SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
348 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
349 		return NULL;
350 	}
351 
352 	/*
353 	 * Get the first available I/O queue ID.
354 	 */
355 	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
356 	if (qid > ctrlr->opts.num_io_queues) {
357 		SPDK_ERRLOG("No free I/O queue IDs\n");
358 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
359 		return NULL;
360 	}
361 
362 	qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, &opts);
363 	if (qpair == NULL) {
364 		SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
365 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
366 		return NULL;
367 	}
368 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
369 	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
370 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
371 
372 	nvme_ctrlr_proc_add_io_qpair(qpair);
373 
374 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
375 
376 	if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
377 		spdk_delay_us(100);
378 	}
379 
380 	return qpair;
381 }
382 
383 int
384 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
385 {
386 	struct spdk_nvme_ctrlr *ctrlr;
387 	int rc;
388 
389 	assert(qpair != NULL);
390 	assert(nvme_qpair_is_admin_queue(qpair) == false);
391 	assert(qpair->ctrlr != NULL);
392 
393 	ctrlr = qpair->ctrlr;
394 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
395 
396 	if (ctrlr->is_removed) {
397 		rc = -ENODEV;
398 		goto out;
399 	}
400 
401 	if (ctrlr->is_resetting) {
402 		rc = -EAGAIN;
403 		goto out;
404 	}
405 
406 	if (ctrlr->is_failed) {
407 		rc = -ENXIO;
408 		goto out;
409 	}
410 
411 	if (!qpair->transport_qp_is_failed) {
412 		rc = 0;
413 		goto out;
414 	}
415 
416 	/* We have to confirm that any old memory is cleaned up. */
417 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
418 
419 	rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
420 	if (rc) {
421 		nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
422 		qpair->transport_qp_is_failed = true;
423 		rc = -EAGAIN;
424 		goto out;
425 	}
426 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
427 	qpair->transport_qp_is_failed = false;
428 
429 out:
430 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
431 	return rc;
432 }
433 
434 /*
435  * This internal function will attempt to take the controller
436  * lock before calling disconnect on a controller qpair.
437  * Functions already holding the controller lock should
438  * call nvme_transport_ctrlr_disconnect_qpair directly.
439  */
440 void
441 nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair)
442 {
443 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
444 
445 	assert(ctrlr != NULL);
446 
447 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
448 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
449 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
450 }
451 
452 int
453 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
454 {
455 	struct spdk_nvme_ctrlr *ctrlr;
456 
457 	if (qpair == NULL) {
458 		return 0;
459 	}
460 
461 	ctrlr = qpair->ctrlr;
462 
463 	if (qpair->in_completion_context) {
464 		/*
465 		 * There are many cases where it is convenient to delete an io qpair in the context
466 		 *  of that qpair's completion routine.  To handle this properly, set a flag here
467 		 *  so that the completion routine will perform an actual delete after the context
468 		 *  unwinds.
469 		 */
470 		qpair->delete_after_completion_context = 1;
471 		return 0;
472 	}
473 
474 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
475 
476 	nvme_ctrlr_proc_remove_io_qpair(qpair);
477 
478 	TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
479 	spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
480 
481 	if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
482 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
483 		return -1;
484 	}
485 
486 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
487 	return 0;
488 }
489 
490 static void
491 nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
492 		struct spdk_nvme_intel_log_page_directory *log_page_directory)
493 {
494 	if (log_page_directory == NULL) {
495 		return;
496 	}
497 
498 	if (ctrlr->cdata.vid != SPDK_PCI_VID_INTEL) {
499 		return;
500 	}
501 
502 	ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
503 
504 	if (log_page_directory->read_latency_log_len ||
505 	    (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
506 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
507 	}
508 	if (log_page_directory->write_latency_log_len ||
509 	    (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
510 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
511 	}
512 	if (log_page_directory->temperature_statistics_log_len) {
513 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
514 	}
515 	if (log_page_directory->smart_log_len) {
516 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
517 	}
518 	if (log_page_directory->marketing_description_log_len) {
519 		ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
520 	}
521 }
522 
523 static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
524 {
525 	int rc = 0;
526 	struct nvme_completion_poll_status	status;
527 	struct spdk_nvme_intel_log_page_directory *log_page_directory;
528 
529 	log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
530 					  64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
531 	if (log_page_directory == NULL) {
532 		SPDK_ERRLOG("could not allocate log_page_directory\n");
533 		return -ENXIO;
534 	}
535 
536 	rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
537 					      SPDK_NVME_GLOBAL_NS_TAG, log_page_directory,
538 					      sizeof(struct spdk_nvme_intel_log_page_directory),
539 					      0, nvme_completion_poll_cb, &status);
540 	if (rc != 0) {
541 		spdk_free(log_page_directory);
542 		return rc;
543 	}
544 
545 	if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, &status,
546 			ctrlr->opts.admin_timeout_ms / 1000)) {
547 		spdk_free(log_page_directory);
548 		SPDK_WARNLOG("Intel log pages not supported on Intel drive!\n");
549 		return 0;
550 	}
551 
552 	nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
553 	spdk_free(log_page_directory);
554 	return 0;
555 }
556 
557 static int
558 nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
559 {
560 	int	rc = 0;
561 
562 	memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
563 	/* Mandatory pages */
564 	ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
565 	ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
566 	ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
567 	if (ctrlr->cdata.lpa.celp) {
568 		ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
569 	}
570 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL && !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
571 		rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
572 	}
573 
574 	return rc;
575 }
576 
577 static void
578 nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
579 {
580 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
581 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
582 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
583 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
584 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
585 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
586 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
587 }
588 
589 static void
590 nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
591 {
592 	uint32_t cdw11;
593 	struct nvme_completion_poll_status status;
594 
595 	if (ctrlr->opts.arbitration_burst == 0) {
596 		return;
597 	}
598 
599 	if (ctrlr->opts.arbitration_burst > 7) {
600 		SPDK_WARNLOG("Valid arbitration burst values is from 0-7\n");
601 		return;
602 	}
603 
604 	cdw11 = ctrlr->opts.arbitration_burst;
605 
606 	if (spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_WRR_SUPPORTED) {
607 		cdw11 |= (uint32_t)ctrlr->opts.low_priority_weight << 8;
608 		cdw11 |= (uint32_t)ctrlr->opts.medium_priority_weight << 16;
609 		cdw11 |= (uint32_t)ctrlr->opts.high_priority_weight << 24;
610 	}
611 
612 	if (spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ARBITRATION,
613 					    cdw11, 0, NULL, 0,
614 					    nvme_completion_poll_cb, &status) < 0) {
615 		SPDK_ERRLOG("Set arbitration feature failed\n");
616 		return;
617 	}
618 
619 	if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, &status,
620 			ctrlr->opts.admin_timeout_ms / 1000)) {
621 		SPDK_ERRLOG("Timeout to set arbitration feature\n");
622 	}
623 }
624 
625 static void
626 nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
627 {
628 	memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
629 	/* Mandatory features */
630 	ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
631 	ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
632 	ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
633 	ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
634 	ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
635 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
636 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
637 	ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
638 	ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
639 	/* Optional features */
640 	if (ctrlr->cdata.vwc.present) {
641 		ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
642 	}
643 	if (ctrlr->cdata.apsta.supported) {
644 		ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
645 	}
646 	if (ctrlr->cdata.hmpre) {
647 		ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
648 	}
649 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
650 		nvme_ctrlr_set_intel_supported_features(ctrlr);
651 	}
652 
653 	nvme_ctrlr_set_arbitration_feature(ctrlr);
654 }
655 
656 bool
657 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
658 {
659 	return ctrlr->is_failed;
660 }
661 
662 void
663 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
664 {
665 	/*
666 	 * Set the flag here and leave the work failure of qpairs to
667 	 * spdk_nvme_qpair_process_completions().
668 	 */
669 	if (hot_remove) {
670 		ctrlr->is_removed = true;
671 	}
672 	ctrlr->is_failed = true;
673 	SPDK_ERRLOG("ctrlr %s in failed state.\n", ctrlr->trid.traddr);
674 }
675 
676 /**
677  * This public API function will try to take the controller lock.
678  * Any private functions being called from a thread already holding
679  * the ctrlr lock should call nvme_ctrlr_fail directly.
680  */
681 void
682 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
683 {
684 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
685 	nvme_ctrlr_fail(ctrlr, false);
686 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
687 }
688 
689 static void
690 nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr)
691 {
692 	union spdk_nvme_cc_register	cc;
693 	union spdk_nvme_csts_register	csts;
694 	uint32_t			ms_waited = 0;
695 	uint32_t			shutdown_timeout_ms;
696 
697 	if (ctrlr->is_removed) {
698 		return;
699 	}
700 
701 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
702 		SPDK_ERRLOG("get_cc() failed\n");
703 		return;
704 	}
705 
706 	cc.bits.shn = SPDK_NVME_SHN_NORMAL;
707 
708 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
709 		SPDK_ERRLOG("set_cc() failed\n");
710 		return;
711 	}
712 
713 	/*
714 	 * The NVMe specification defines RTD3E to be the time between
715 	 *  setting SHN = 1 until the controller will set SHST = 10b.
716 	 * If the device doesn't report RTD3 entry latency, or if it
717 	 *  reports RTD3 entry latency less than 10 seconds, pick
718 	 *  10 seconds as a reasonable amount of time to
719 	 *  wait before proceeding.
720 	 */
721 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
722 	shutdown_timeout_ms = (ctrlr->cdata.rtd3e + 999) / 1000;
723 	shutdown_timeout_ms = spdk_max(shutdown_timeout_ms, 10000);
724 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown timeout = %" PRIu32 " ms\n", shutdown_timeout_ms);
725 
726 	do {
727 		if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
728 			SPDK_ERRLOG("get_csts() failed\n");
729 			return;
730 		}
731 
732 		if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
733 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown complete in %u milliseconds\n",
734 				      ms_waited);
735 			ctrlr->is_shutdown = true;
736 			return;
737 		}
738 
739 		nvme_delay(1000);
740 		ms_waited++;
741 	} while (ms_waited < shutdown_timeout_ms);
742 
743 	SPDK_ERRLOG("did not shutdown within %u milliseconds\n", shutdown_timeout_ms);
744 	if (ctrlr->quirks & NVME_QUIRK_SHST_COMPLETE) {
745 		SPDK_ERRLOG("likely due to shutdown handling in the VMWare emulated NVMe SSD\n");
746 	}
747 }
748 
749 static int
750 nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
751 {
752 	union spdk_nvme_cc_register	cc;
753 	int				rc;
754 
755 	rc = nvme_transport_ctrlr_enable(ctrlr);
756 	if (rc != 0) {
757 		SPDK_ERRLOG("transport ctrlr_enable failed\n");
758 		return rc;
759 	}
760 
761 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
762 		SPDK_ERRLOG("get_cc() failed\n");
763 		return -EIO;
764 	}
765 
766 	if (cc.bits.en != 0) {
767 		SPDK_ERRLOG("called with CC.EN = 1\n");
768 		return -EINVAL;
769 	}
770 
771 	cc.bits.en = 1;
772 	cc.bits.css = 0;
773 	cc.bits.shn = 0;
774 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
775 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
776 
777 	/* Page size is 2 ^ (12 + mps). */
778 	cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
779 
780 	if (ctrlr->cap.bits.css == 0) {
781 		SPDK_INFOLOG(SPDK_LOG_NVME,
782 			     "Drive reports no command sets supported. Assuming NVM is supported.\n");
783 		ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
784 	}
785 
786 	if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
787 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Requested I/O command set %u but supported mask is 0x%x\n",
788 			      ctrlr->opts.command_set, ctrlr->cap.bits.css);
789 		return -EINVAL;
790 	}
791 
792 	cc.bits.css = ctrlr->opts.command_set;
793 
794 	switch (ctrlr->opts.arb_mechanism) {
795 	case SPDK_NVME_CC_AMS_RR:
796 		break;
797 	case SPDK_NVME_CC_AMS_WRR:
798 		if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
799 			break;
800 		}
801 		return -EINVAL;
802 	case SPDK_NVME_CC_AMS_VS:
803 		if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
804 			break;
805 		}
806 		return -EINVAL;
807 	default:
808 		return -EINVAL;
809 	}
810 
811 	cc.bits.ams = ctrlr->opts.arb_mechanism;
812 
813 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
814 		SPDK_ERRLOG("set_cc() failed\n");
815 		return -EIO;
816 	}
817 
818 	return 0;
819 }
820 
821 static int
822 nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr)
823 {
824 	union spdk_nvme_cc_register	cc;
825 
826 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
827 		SPDK_ERRLOG("get_cc() failed\n");
828 		return -EIO;
829 	}
830 
831 	if (cc.bits.en == 0) {
832 		return 0;
833 	}
834 
835 	cc.bits.en = 0;
836 
837 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
838 		SPDK_ERRLOG("set_cc() failed\n");
839 		return -EIO;
840 	}
841 
842 	return 0;
843 }
844 
845 #ifdef DEBUG
846 static const char *
847 nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
848 {
849 	switch (state) {
850 	case NVME_CTRLR_STATE_INIT_DELAY:
851 		return "delay init";
852 	case NVME_CTRLR_STATE_INIT:
853 		return "init";
854 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
855 		return "disable and wait for CSTS.RDY = 1";
856 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
857 		return "disable and wait for CSTS.RDY = 0";
858 	case NVME_CTRLR_STATE_ENABLE:
859 		return "enable controller by writing CC.EN = 1";
860 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
861 		return "wait for CSTS.RDY = 1";
862 	case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
863 		return "reset admin queue";
864 	case NVME_CTRLR_STATE_IDENTIFY:
865 		return "identify controller";
866 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
867 		return "wait for identify controller";
868 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
869 		return "set number of queues";
870 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
871 		return "wait for set number of queues";
872 	case NVME_CTRLR_STATE_GET_NUM_QUEUES:
873 		return "get number of queues";
874 	case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES:
875 		return "wait for get number of queues";
876 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
877 		return "construct namespaces";
878 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
879 		return "identify active ns";
880 	case NVME_CTRLR_STATE_IDENTIFY_NS:
881 		return "identify ns";
882 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
883 		return "wait for identify ns";
884 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
885 		return "identify namespace id descriptors";
886 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
887 		return "wait for identify namespace id descriptors";
888 	case NVME_CTRLR_STATE_CONFIGURE_AER:
889 		return "configure AER";
890 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
891 		return "wait for configure aer";
892 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
893 		return "set supported log pages";
894 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
895 		return "set supported features";
896 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
897 		return "set doorbell buffer config";
898 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
899 		return "wait for doorbell buffer config";
900 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
901 		return "set keep alive timeout";
902 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
903 		return "wait for set keep alive timeout";
904 	case NVME_CTRLR_STATE_SET_HOST_ID:
905 		return "set host ID";
906 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
907 		return "wait for set host ID";
908 	case NVME_CTRLR_STATE_READY:
909 		return "ready";
910 	case NVME_CTRLR_STATE_ERROR:
911 		return "error";
912 	}
913 	return "unknown";
914 };
915 #endif /* DEBUG */
916 
917 static void
918 nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
919 		     uint64_t timeout_in_ms)
920 {
921 	uint64_t ticks_per_ms, timeout_in_ticks, now_ticks;
922 
923 	ctrlr->state = state;
924 	if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
925 		goto inf;
926 	}
927 
928 	ticks_per_ms = spdk_get_ticks_hz() / 1000;
929 	if (timeout_in_ms > UINT64_MAX / ticks_per_ms) {
930 		SPDK_ERRLOG("Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
931 		goto inf;
932 	}
933 
934 	now_ticks = spdk_get_ticks();
935 	timeout_in_ticks = timeout_in_ms * ticks_per_ms;
936 	if (timeout_in_ticks > UINT64_MAX - now_ticks) {
937 		SPDK_ERRLOG("Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
938 		goto inf;
939 	}
940 
941 	ctrlr->state_timeout_tsc = timeout_in_ticks + now_ticks;
942 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (timeout %" PRIu64 " ms)\n",
943 		      nvme_ctrlr_state_string(ctrlr->state), ctrlr->state_timeout_tsc);
944 	return;
945 inf:
946 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (no timeout)\n",
947 		      nvme_ctrlr_state_string(ctrlr->state));
948 	ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
949 }
950 
951 static void
952 nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
953 {
954 	if (ctrlr->shadow_doorbell) {
955 		spdk_free(ctrlr->shadow_doorbell);
956 		ctrlr->shadow_doorbell = NULL;
957 	}
958 
959 	if (ctrlr->eventidx) {
960 		spdk_free(ctrlr->eventidx);
961 		ctrlr->eventidx = NULL;
962 	}
963 }
964 
965 static void
966 nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
967 {
968 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
969 
970 	if (spdk_nvme_cpl_is_error(cpl)) {
971 		SPDK_WARNLOG("Doorbell buffer config failed\n");
972 	} else {
973 		SPDK_INFOLOG(SPDK_LOG_NVME, "NVMe controller: %s doorbell buffer config enabled\n",
974 			     ctrlr->trid.traddr);
975 	}
976 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
977 			     ctrlr->opts.admin_timeout_ms);
978 }
979 
980 static int
981 nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
982 {
983 	int rc = 0;
984 	uint64_t prp1, prp2, len;
985 
986 	if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
987 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
988 				     ctrlr->opts.admin_timeout_ms);
989 		return 0;
990 	}
991 
992 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
993 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
994 				     ctrlr->opts.admin_timeout_ms);
995 		return 0;
996 	}
997 
998 	/* only 1 page size for doorbell buffer */
999 	ctrlr->shadow_doorbell = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
1000 					      NULL, SPDK_ENV_LCORE_ID_ANY,
1001 					      SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
1002 	if (ctrlr->shadow_doorbell == NULL) {
1003 		rc = -ENOMEM;
1004 		goto error;
1005 	}
1006 
1007 	len = ctrlr->page_size;
1008 	prp1 = spdk_vtophys(ctrlr->shadow_doorbell, &len);
1009 	if (prp1 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
1010 		rc = -EFAULT;
1011 		goto error;
1012 	}
1013 
1014 	ctrlr->eventidx = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
1015 				       NULL, SPDK_ENV_LCORE_ID_ANY,
1016 				       SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
1017 	if (ctrlr->eventidx == NULL) {
1018 		rc = -ENOMEM;
1019 		goto error;
1020 	}
1021 
1022 	len = ctrlr->page_size;
1023 	prp2 = spdk_vtophys(ctrlr->eventidx, &len);
1024 	if (prp2 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
1025 		rc = -EFAULT;
1026 		goto error;
1027 	}
1028 
1029 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
1030 			     ctrlr->opts.admin_timeout_ms);
1031 
1032 	rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
1033 			nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
1034 	if (rc != 0) {
1035 		goto error;
1036 	}
1037 
1038 	return 0;
1039 
1040 error:
1041 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1042 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
1043 	return rc;
1044 }
1045 
1046 int
1047 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
1048 {
1049 	int rc = 0;
1050 	struct spdk_nvme_qpair	*qpair;
1051 	struct nvme_request	*req, *tmp;
1052 
1053 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1054 
1055 	if (ctrlr->is_resetting || ctrlr->is_removed) {
1056 		/*
1057 		 * Controller is already resetting or has been removed. Return
1058 		 *  immediately since there is no need to kick off another
1059 		 *  reset in these cases.
1060 		 */
1061 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1062 		return ctrlr->is_resetting ? 0 : -ENXIO;
1063 	}
1064 
1065 	ctrlr->is_resetting = true;
1066 	ctrlr->is_failed = false;
1067 
1068 	SPDK_NOTICELOG("resetting controller\n");
1069 
1070 	/* Free all of the queued abort requests */
1071 	STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
1072 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
1073 		nvme_free_request(req);
1074 		ctrlr->outstanding_aborts--;
1075 	}
1076 
1077 	nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
1078 
1079 	/* Disable all queues before disabling the controller hardware. */
1080 	TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
1081 		nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
1082 		qpair->transport_qp_is_failed = true;
1083 	}
1084 	nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_DISABLED);
1085 	nvme_qpair_complete_error_reqs(ctrlr->adminq);
1086 	nvme_transport_qpair_abort_reqs(ctrlr->adminq, 0 /* retry */);
1087 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
1088 	if (nvme_transport_ctrlr_connect_qpair(ctrlr, ctrlr->adminq) != 0) {
1089 		SPDK_ERRLOG("Controller reinitialization failed.\n");
1090 		nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_DISABLED);
1091 		rc = -1;
1092 		goto out;
1093 	}
1094 	nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_CONNECTED);
1095 
1096 	/* Doorbell buffer config is invalid during reset */
1097 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
1098 
1099 	/* Set the state back to INIT to cause a full hardware reset. */
1100 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
1101 
1102 	nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_ENABLED);
1103 	while (ctrlr->state != NVME_CTRLR_STATE_READY) {
1104 		if (nvme_ctrlr_process_init(ctrlr) != 0) {
1105 			SPDK_ERRLOG("controller reinitialization failed\n");
1106 			rc = -1;
1107 			break;
1108 		}
1109 	}
1110 
1111 	/*
1112 	 * For PCIe controllers, the memory locations of the tranpsort qpair
1113 	 * don't change when the controller is reset. They simply need to be
1114 	 * re-enabled with admin commands to the controller. For fabric
1115 	 * controllers we need to disconnect and reconnect the qpair on its
1116 	 * own thread outside of the context of the reset.
1117 	 */
1118 	if (rc == 0 && ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
1119 		/* Reinitialize qpairs */
1120 		TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
1121 			if (nvme_transport_ctrlr_connect_qpair(ctrlr, qpair) != 0) {
1122 				nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
1123 				rc = -1;
1124 				continue;
1125 			}
1126 			nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
1127 			qpair->transport_qp_is_failed = false;
1128 		}
1129 	}
1130 
1131 out:
1132 	if (rc) {
1133 		nvme_ctrlr_fail(ctrlr, false);
1134 	}
1135 	ctrlr->is_resetting = false;
1136 
1137 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1138 
1139 	return rc;
1140 }
1141 
1142 int
1143 spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transport_id *trid)
1144 {
1145 	int rc = 0;
1146 
1147 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1148 
1149 	if (ctrlr->is_failed == false) {
1150 		rc = -EPERM;
1151 		goto out;
1152 	}
1153 
1154 	if (trid->trtype != ctrlr->trid.trtype) {
1155 		rc = -EINVAL;
1156 		goto out;
1157 	}
1158 
1159 	if (strncmp(trid->subnqn, ctrlr->trid.subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
1160 		rc = -EINVAL;
1161 		goto out;
1162 	}
1163 
1164 	ctrlr->trid = *trid;
1165 
1166 out:
1167 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1168 	return rc;
1169 }
1170 
1171 static void
1172 nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
1173 {
1174 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1175 
1176 	if (spdk_nvme_cpl_is_error(cpl)) {
1177 		SPDK_ERRLOG("nvme_identify_controller failed!\n");
1178 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1179 		return;
1180 	}
1181 
1182 	/*
1183 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
1184 	 *  controller supports.
1185 	 */
1186 	ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
1187 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
1188 	if (ctrlr->cdata.mdts > 0) {
1189 		ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
1190 						ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
1191 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
1192 	}
1193 
1194 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
1195 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
1196 		ctrlr->cntlid = ctrlr->cdata.cntlid;
1197 	} else {
1198 		/*
1199 		 * Fabrics controllers should already have CNTLID from the Connect command.
1200 		 *
1201 		 * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
1202 		 * trust the one from Connect.
1203 		 */
1204 		if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
1205 			SPDK_DEBUGLOG(SPDK_LOG_NVME,
1206 				      "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
1207 				      ctrlr->cdata.cntlid, ctrlr->cntlid);
1208 		}
1209 	}
1210 
1211 	if (ctrlr->cdata.sgls.supported) {
1212 		ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
1213 		/*
1214 		 * Use MSDBD to ensure our max_sges doesn't exceed what the
1215 		 *  controller supports.
1216 		 */
1217 		ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
1218 		if (ctrlr->cdata.nvmf_specific.msdbd != 0) {
1219 			ctrlr->max_sges = spdk_min(ctrlr->cdata.nvmf_specific.msdbd, ctrlr->max_sges);
1220 		} else {
1221 			/* A value 0 indicates no limit. */
1222 		}
1223 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "transport max_sges %u\n", ctrlr->max_sges);
1224 	}
1225 
1226 	if (ctrlr->cdata.oacs.security) {
1227 		ctrlr->flags |= SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED;
1228 	}
1229 
1230 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
1231 			     ctrlr->opts.admin_timeout_ms);
1232 }
1233 
1234 static int
1235 nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
1236 {
1237 	int	rc;
1238 
1239 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
1240 			     ctrlr->opts.admin_timeout_ms);
1241 
1242 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
1243 				     &ctrlr->cdata, sizeof(ctrlr->cdata),
1244 				     nvme_ctrlr_identify_done, ctrlr);
1245 	if (rc != 0) {
1246 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1247 		return rc;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 int
1254 nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
1255 {
1256 	struct nvme_completion_poll_status	status;
1257 	int					rc;
1258 	uint32_t				i;
1259 	uint32_t				num_pages;
1260 	uint32_t				next_nsid = 0;
1261 	uint32_t				*new_ns_list = NULL;
1262 
1263 	if (ctrlr->num_ns == 0) {
1264 		spdk_free(ctrlr->active_ns_list);
1265 		ctrlr->active_ns_list = NULL;
1266 
1267 		return 0;
1268 	}
1269 
1270 	/*
1271 	 * The allocated size must be a multiple of sizeof(struct spdk_nvme_ns_list)
1272 	 */
1273 	num_pages = (ctrlr->num_ns * sizeof(new_ns_list[0]) - 1) / sizeof(struct spdk_nvme_ns_list) + 1;
1274 	new_ns_list = spdk_zmalloc(num_pages * sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
1275 				   NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
1276 	if (!new_ns_list) {
1277 		SPDK_ERRLOG("Failed to allocate active_ns_list!\n");
1278 		return -ENOMEM;
1279 	}
1280 
1281 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 1, 0) && !(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1282 		/*
1283 		 * Iterate through the pages and fetch each chunk of 1024 namespaces until
1284 		 * there are no more active namespaces
1285 		 */
1286 		for (i = 0; i < num_pages; i++) {
1287 			rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid,
1288 						     &new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list),
1289 						     nvme_completion_poll_cb, &status);
1290 			if (rc != 0) {
1291 				goto fail;
1292 			}
1293 			if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
1294 				SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n");
1295 				rc = -ENXIO;
1296 				goto fail;
1297 			}
1298 			next_nsid = new_ns_list[1024 * i + 1023];
1299 			if (next_nsid == 0) {
1300 				/*
1301 				 * No more active namespaces found, no need to fetch additional chunks
1302 				 */
1303 				break;
1304 			}
1305 		}
1306 
1307 	} else {
1308 		/*
1309 		 * Controller doesn't support active ns list CNS 0x02 so dummy up
1310 		 * an active ns list
1311 		 */
1312 		for (i = 0; i < ctrlr->num_ns; i++) {
1313 			new_ns_list[i] = i + 1;
1314 		}
1315 	}
1316 
1317 	/*
1318 	 * Now that that the list is properly setup, we can swap it in to the ctrlr and
1319 	 * free up the previous one.
1320 	 */
1321 	spdk_free(ctrlr->active_ns_list);
1322 	ctrlr->active_ns_list = new_ns_list;
1323 
1324 	return 0;
1325 fail:
1326 	spdk_free(new_ns_list);
1327 	return rc;
1328 }
1329 
1330 static void
1331 nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1332 {
1333 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1334 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1335 	uint32_t nsid;
1336 	int rc;
1337 
1338 	if (spdk_nvme_cpl_is_error(cpl)) {
1339 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1340 		return;
1341 	} else {
1342 		nvme_ns_set_identify_data(ns);
1343 	}
1344 
1345 	/* move on to the next active NS */
1346 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1347 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1348 	if (ns == NULL) {
1349 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
1350 				     ctrlr->opts.admin_timeout_ms);
1351 		return;
1352 	}
1353 	ns->ctrlr = ctrlr;
1354 	ns->id = nsid;
1355 
1356 	rc = nvme_ctrlr_identify_ns_async(ns);
1357 	if (rc) {
1358 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1359 	}
1360 }
1361 
1362 static int
1363 nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
1364 {
1365 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1366 	struct spdk_nvme_ns_data *nsdata;
1367 
1368 	nsdata = &ctrlr->nsdata[ns->id - 1];
1369 
1370 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
1371 			     ctrlr->opts.admin_timeout_ms);
1372 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
1373 				       nsdata, sizeof(*nsdata),
1374 				       nvme_ctrlr_identify_ns_async_done, ns);
1375 }
1376 
1377 static int
1378 nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1379 {
1380 	uint32_t nsid;
1381 	struct spdk_nvme_ns *ns;
1382 	int rc;
1383 
1384 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1385 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1386 	if (ns == NULL) {
1387 		/* No active NS, move on to the next state */
1388 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1389 				     ctrlr->opts.admin_timeout_ms);
1390 		return 0;
1391 	}
1392 
1393 	ns->ctrlr = ctrlr;
1394 	ns->id = nsid;
1395 
1396 	rc = nvme_ctrlr_identify_ns_async(ns);
1397 	if (rc) {
1398 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1399 	}
1400 
1401 	return rc;
1402 }
1403 
1404 static void
1405 nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1406 {
1407 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1408 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1409 	uint32_t nsid;
1410 	int rc;
1411 
1412 	if (spdk_nvme_cpl_is_error(cpl)) {
1413 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1414 				     ctrlr->opts.admin_timeout_ms);
1415 		return;
1416 	}
1417 
1418 	/* move on to the next active NS */
1419 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1420 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1421 	if (ns == NULL) {
1422 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1423 				     ctrlr->opts.admin_timeout_ms);
1424 		return;
1425 	}
1426 
1427 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1428 	if (rc) {
1429 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1430 	}
1431 }
1432 
1433 static int
1434 nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
1435 {
1436 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1437 
1438 	memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
1439 
1440 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
1441 			     ctrlr->opts.admin_timeout_ms);
1442 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
1443 				       0, ns->id, ns->id_desc_list, sizeof(ns->id_desc_list),
1444 				       nvme_ctrlr_identify_id_desc_async_done, ns);
1445 }
1446 
1447 static int
1448 nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1449 {
1450 	uint32_t nsid;
1451 	struct spdk_nvme_ns *ns;
1452 	int rc;
1453 
1454 	if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) ||
1455 	    (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1456 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
1457 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1458 				     ctrlr->opts.admin_timeout_ms);
1459 		return 0;
1460 	}
1461 
1462 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1463 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1464 	if (ns == NULL) {
1465 		/* No active NS, move on to the next state */
1466 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1467 				     ctrlr->opts.admin_timeout_ms);
1468 		return 0;
1469 	}
1470 
1471 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1472 	if (rc) {
1473 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1474 	}
1475 
1476 	return rc;
1477 }
1478 
1479 static void
1480 nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1481 {
1482 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1483 
1484 	if (spdk_nvme_cpl_is_error(cpl)) {
1485 		SPDK_ERRLOG("Set Features - Number of Queues failed!\n");
1486 	}
1487 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_NUM_QUEUES,
1488 			     ctrlr->opts.admin_timeout_ms);
1489 }
1490 
1491 static int
1492 nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1493 {
1494 	int rc;
1495 
1496 	if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
1497 		SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n",
1498 			       ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
1499 		ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
1500 	} else if (ctrlr->opts.num_io_queues < 1) {
1501 		SPDK_NOTICELOG("Requested num_io_queues 0, increasing to 1\n");
1502 		ctrlr->opts.num_io_queues = 1;
1503 	}
1504 
1505 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
1506 			     ctrlr->opts.admin_timeout_ms);
1507 
1508 	rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
1509 					   nvme_ctrlr_set_num_queues_done, ctrlr);
1510 	if (rc != 0) {
1511 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1512 		return rc;
1513 	}
1514 
1515 	return 0;
1516 }
1517 
1518 static void
1519 nvme_ctrlr_get_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1520 {
1521 	uint32_t cq_allocated, sq_allocated, min_allocated, i;
1522 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1523 
1524 	if (spdk_nvme_cpl_is_error(cpl)) {
1525 		SPDK_ERRLOG("Get Features - Number of Queues failed!\n");
1526 		ctrlr->opts.num_io_queues = 0;
1527 	} else {
1528 		/*
1529 		 * Data in cdw0 is 0-based.
1530 		 * Lower 16-bits indicate number of submission queues allocated.
1531 		 * Upper 16-bits indicate number of completion queues allocated.
1532 		 */
1533 		sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
1534 		cq_allocated = (cpl->cdw0 >> 16) + 1;
1535 
1536 		/*
1537 		 * For 1:1 queue mapping, set number of allocated queues to be minimum of
1538 		 * submission and completion queues.
1539 		 */
1540 		min_allocated = spdk_min(sq_allocated, cq_allocated);
1541 
1542 		/* Set number of queues to be minimum of requested and actually allocated. */
1543 		ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
1544 	}
1545 
1546 	ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
1547 	if (ctrlr->free_io_qids == NULL) {
1548 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1549 		return;
1550 	}
1551 
1552 	/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */
1553 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1554 	for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
1555 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1556 	}
1557 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS,
1558 			     ctrlr->opts.admin_timeout_ms);
1559 }
1560 
1561 static int
1562 nvme_ctrlr_get_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1563 {
1564 	int rc;
1565 
1566 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES,
1567 			     ctrlr->opts.admin_timeout_ms);
1568 
1569 	/* Obtain the number of queues allocated using Get Features. */
1570 	rc = nvme_ctrlr_cmd_get_num_queues(ctrlr, nvme_ctrlr_get_num_queues_done, ctrlr);
1571 	if (rc != 0) {
1572 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1573 		return rc;
1574 	}
1575 
1576 	return 0;
1577 }
1578 
1579 static void
1580 nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
1581 {
1582 	uint32_t keep_alive_interval_ms;
1583 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1584 
1585 	if (spdk_nvme_cpl_is_error(cpl)) {
1586 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n",
1587 			    cpl->status.sc, cpl->status.sct);
1588 		ctrlr->opts.keep_alive_timeout_ms = 0;
1589 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1590 		return;
1591 	}
1592 
1593 	if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
1594 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller adjusted keep alive timeout to %u ms\n",
1595 			      cpl->cdw0);
1596 	}
1597 
1598 	ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
1599 
1600 	keep_alive_interval_ms = ctrlr->opts.keep_alive_timeout_ms / 2;
1601 	if (keep_alive_interval_ms == 0) {
1602 		keep_alive_interval_ms = 1;
1603 	}
1604 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Sending keep alive every %u ms\n", keep_alive_interval_ms);
1605 
1606 	ctrlr->keep_alive_interval_ticks = (keep_alive_interval_ms * spdk_get_ticks_hz()) / UINT64_C(1000);
1607 
1608 	/* Schedule the first Keep Alive to be sent as soon as possible. */
1609 	ctrlr->next_keep_alive_tick = spdk_get_ticks();
1610 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1611 			     ctrlr->opts.admin_timeout_ms);
1612 }
1613 
1614 static int
1615 nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
1616 {
1617 	int rc;
1618 
1619 	if (ctrlr->opts.keep_alive_timeout_ms == 0) {
1620 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1621 				     ctrlr->opts.admin_timeout_ms);
1622 		return 0;
1623 	}
1624 
1625 	if (ctrlr->cdata.kas == 0) {
1626 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller KAS is 0 - not enabling Keep Alive\n");
1627 		ctrlr->opts.keep_alive_timeout_ms = 0;
1628 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1629 				     ctrlr->opts.admin_timeout_ms);
1630 		return 0;
1631 	}
1632 
1633 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
1634 			     ctrlr->opts.admin_timeout_ms);
1635 
1636 	/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
1637 	rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
1638 					     nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
1639 	if (rc != 0) {
1640 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: %d\n", rc);
1641 		ctrlr->opts.keep_alive_timeout_ms = 0;
1642 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1643 		return rc;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static void
1650 nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
1651 {
1652 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1653 
1654 	if (spdk_nvme_cpl_is_error(cpl)) {
1655 		/*
1656 		 * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
1657 		 * is optional.
1658 		 */
1659 		SPDK_WARNLOG("Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
1660 			     cpl->status.sc, cpl->status.sct);
1661 	} else {
1662 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Set Features - Host ID was successful\n");
1663 	}
1664 
1665 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1666 }
1667 
1668 static int
1669 nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
1670 {
1671 	uint8_t *host_id;
1672 	uint32_t host_id_size;
1673 	int rc;
1674 
1675 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
1676 		/*
1677 		 * NVMe-oF sends the host ID during Connect and doesn't allow
1678 		 * Set Features - Host Identifier after Connect, so we don't need to do anything here.
1679 		 */
1680 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "NVMe-oF transport - not sending Set Features - Host ID\n");
1681 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1682 		return 0;
1683 	}
1684 
1685 	if (ctrlr->cdata.ctratt.host_id_exhid_supported) {
1686 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 128-bit extended host identifier\n");
1687 		host_id = ctrlr->opts.extended_host_id;
1688 		host_id_size = sizeof(ctrlr->opts.extended_host_id);
1689 	} else {
1690 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 64-bit host identifier\n");
1691 		host_id = ctrlr->opts.host_id;
1692 		host_id_size = sizeof(ctrlr->opts.host_id);
1693 	}
1694 
1695 	/* If the user specified an all-zeroes host identifier, don't send the command. */
1696 	if (spdk_mem_all_zero(host_id, host_id_size)) {
1697 		SPDK_DEBUGLOG(SPDK_LOG_NVME,
1698 			      "User did not specify host ID - not sending Set Features - Host ID\n");
1699 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1700 		return 0;
1701 	}
1702 
1703 	SPDK_LOGDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size);
1704 
1705 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
1706 			     ctrlr->opts.admin_timeout_ms);
1707 
1708 	rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
1709 	if (rc != 0) {
1710 		SPDK_ERRLOG("Set Features - Host ID failed: %d\n", rc);
1711 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1712 		return rc;
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 static void
1719 nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1720 {
1721 	if (ctrlr->ns) {
1722 		uint32_t i, num_ns = ctrlr->num_ns;
1723 
1724 		for (i = 0; i < num_ns; i++) {
1725 			nvme_ns_destruct(&ctrlr->ns[i]);
1726 		}
1727 
1728 		spdk_free(ctrlr->ns);
1729 		ctrlr->ns = NULL;
1730 		ctrlr->num_ns = 0;
1731 	}
1732 
1733 	if (ctrlr->nsdata) {
1734 		spdk_free(ctrlr->nsdata);
1735 		ctrlr->nsdata = NULL;
1736 	}
1737 
1738 	spdk_free(ctrlr->active_ns_list);
1739 	ctrlr->active_ns_list = NULL;
1740 }
1741 
1742 static void
1743 nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1744 {
1745 	uint32_t i, nn = ctrlr->cdata.nn;
1746 	struct spdk_nvme_ns_data *nsdata;
1747 
1748 	for (i = 0; i < nn; i++) {
1749 		struct spdk_nvme_ns	*ns = &ctrlr->ns[i];
1750 		uint32_t		nsid = i + 1;
1751 
1752 		nsdata = &ctrlr->nsdata[nsid - 1];
1753 
1754 		if ((nsdata->ncap == 0) && spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
1755 			if (nvme_ns_construct(ns, nsid, ctrlr) != 0) {
1756 				continue;
1757 			}
1758 		}
1759 
1760 		if (nsdata->ncap && !spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) {
1761 			nvme_ns_destruct(ns);
1762 		}
1763 	}
1764 }
1765 
1766 static int
1767 nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1768 {
1769 	int rc = 0;
1770 	uint32_t nn = ctrlr->cdata.nn;
1771 
1772 	/* ctrlr->num_ns may be 0 (startup) or a different number of namespaces (reset),
1773 	 * so check if we need to reallocate.
1774 	 */
1775 	if (nn != ctrlr->num_ns) {
1776 		nvme_ctrlr_destruct_namespaces(ctrlr);
1777 
1778 		if (nn == 0) {
1779 			SPDK_WARNLOG("controller has 0 namespaces\n");
1780 			return 0;
1781 		}
1782 
1783 		ctrlr->ns = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64, NULL,
1784 					 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
1785 		if (ctrlr->ns == NULL) {
1786 			rc = -ENOMEM;
1787 			goto fail;
1788 		}
1789 
1790 		ctrlr->nsdata = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64,
1791 					     NULL, SPDK_ENV_SOCKET_ID_ANY,
1792 					     SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
1793 		if (ctrlr->nsdata == NULL) {
1794 			rc = -ENOMEM;
1795 			goto fail;
1796 		}
1797 
1798 		ctrlr->num_ns = nn;
1799 	}
1800 
1801 	return 0;
1802 
1803 fail:
1804 	nvme_ctrlr_destruct_namespaces(ctrlr);
1805 	return rc;
1806 }
1807 
1808 static void
1809 nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
1810 {
1811 	struct nvme_async_event_request	*aer = arg;
1812 	struct spdk_nvme_ctrlr		*ctrlr = aer->ctrlr;
1813 	struct spdk_nvme_ctrlr_process	*active_proc;
1814 	union spdk_nvme_async_event_completion	event;
1815 	int					rc;
1816 
1817 	if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
1818 	    cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
1819 		/*
1820 		 *  This is simulated when controller is being shut down, to
1821 		 *  effectively abort outstanding asynchronous event requests
1822 		 *  and make sure all memory is freed.  Do not repost the
1823 		 *  request in this case.
1824 		 */
1825 		return;
1826 	}
1827 
1828 	if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
1829 	    cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
1830 		/*
1831 		 *  SPDK will only send as many AERs as the device says it supports,
1832 		 *  so this status code indicates an out-of-spec device.  Do not repost
1833 		 *  the request in this case.
1834 		 */
1835 		SPDK_ERRLOG("Controller appears out-of-spec for asynchronous event request\n"
1836 			    "handling.  Do not repost this AER.\n");
1837 		return;
1838 	}
1839 
1840 	event.raw = cpl->cdw0;
1841 	if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
1842 	    (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
1843 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
1844 		if (rc) {
1845 			return;
1846 		}
1847 		nvme_ctrlr_update_namespaces(ctrlr);
1848 	}
1849 
1850 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1851 	if (active_proc && active_proc->aer_cb_fn) {
1852 		active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
1853 	}
1854 
1855 	/* If the ctrlr is already shutdown, we should not send aer again */
1856 	if (ctrlr->is_shutdown) {
1857 		return;
1858 	}
1859 
1860 	/*
1861 	 * Repost another asynchronous event request to replace the one
1862 	 *  that just completed.
1863 	 */
1864 	if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
1865 		/*
1866 		 * We can't do anything to recover from a failure here,
1867 		 * so just print a warning message and leave the AER unsubmitted.
1868 		 */
1869 		SPDK_ERRLOG("resubmitting AER failed!\n");
1870 	}
1871 }
1872 
1873 static int
1874 nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
1875 				    struct nvme_async_event_request *aer)
1876 {
1877 	struct nvme_request *req;
1878 
1879 	aer->ctrlr = ctrlr;
1880 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
1881 	aer->req = req;
1882 	if (req == NULL) {
1883 		return -1;
1884 	}
1885 
1886 	req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1887 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
1888 }
1889 
1890 static void
1891 nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
1892 {
1893 	struct nvme_async_event_request		*aer;
1894 	int					rc;
1895 	uint32_t				i;
1896 	struct spdk_nvme_ctrlr *ctrlr =	(struct spdk_nvme_ctrlr *)arg;
1897 
1898 	if (spdk_nvme_cpl_is_error(cpl)) {
1899 		SPDK_NOTICELOG("nvme_ctrlr_configure_aer failed!\n");
1900 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
1901 				     ctrlr->opts.admin_timeout_ms);
1902 		return;
1903 	}
1904 
1905 	/* aerl is a zero-based value, so we need to add 1 here. */
1906 	ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
1907 
1908 	for (i = 0; i < ctrlr->num_aers; i++) {
1909 		aer = &ctrlr->aer[i];
1910 		rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
1911 		if (rc) {
1912 			SPDK_ERRLOG("nvme_ctrlr_construct_and_submit_aer failed!\n");
1913 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1914 			return;
1915 		}
1916 	}
1917 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
1918 			     ctrlr->opts.admin_timeout_ms);
1919 }
1920 
1921 static int
1922 nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
1923 {
1924 	union spdk_nvme_feat_async_event_configuration	config;
1925 	int						rc;
1926 
1927 	config.raw = 0;
1928 	config.bits.crit_warn.bits.available_spare = 1;
1929 	config.bits.crit_warn.bits.temperature = 1;
1930 	config.bits.crit_warn.bits.device_reliability = 1;
1931 	config.bits.crit_warn.bits.read_only = 1;
1932 	config.bits.crit_warn.bits.volatile_memory_backup = 1;
1933 
1934 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
1935 		if (ctrlr->cdata.oaes.ns_attribute_notices) {
1936 			config.bits.ns_attr_notice = 1;
1937 		}
1938 		if (ctrlr->cdata.oaes.fw_activation_notices) {
1939 			config.bits.fw_activation_notice = 1;
1940 		}
1941 	}
1942 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
1943 		config.bits.telemetry_log_notice = 1;
1944 	}
1945 
1946 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
1947 			     ctrlr->opts.admin_timeout_ms);
1948 
1949 	rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
1950 			nvme_ctrlr_configure_aer_done,
1951 			ctrlr);
1952 	if (rc != 0) {
1953 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1954 		return rc;
1955 	}
1956 
1957 	return 0;
1958 }
1959 
1960 struct spdk_nvme_ctrlr_process *
1961 spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
1962 {
1963 	struct spdk_nvme_ctrlr_process	*active_proc;
1964 
1965 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
1966 		if (active_proc->pid == pid) {
1967 			return active_proc;
1968 		}
1969 	}
1970 
1971 	return NULL;
1972 }
1973 
1974 struct spdk_nvme_ctrlr_process *
1975 spdk_nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
1976 {
1977 	return spdk_nvme_ctrlr_get_process(ctrlr, getpid());
1978 }
1979 
1980 /**
1981  * This function will be called when a process is using the controller.
1982  *  1. For the primary process, it is called when constructing the controller.
1983  *  2. For the secondary process, it is called at probing the controller.
1984  * Note: will check whether the process is already added for the same process.
1985  */
1986 int
1987 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
1988 {
1989 	struct spdk_nvme_ctrlr_process	*ctrlr_proc;
1990 	pid_t				pid = getpid();
1991 
1992 	/* Check whether the process is already added or not */
1993 	if (spdk_nvme_ctrlr_get_process(ctrlr, pid)) {
1994 		return 0;
1995 	}
1996 
1997 	/* Initialize the per process properties for this ctrlr */
1998 	ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
1999 				  64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
2000 	if (ctrlr_proc == NULL) {
2001 		SPDK_ERRLOG("failed to allocate memory to track the process props\n");
2002 
2003 		return -1;
2004 	}
2005 
2006 	ctrlr_proc->is_primary = spdk_process_is_primary();
2007 	ctrlr_proc->pid = pid;
2008 	STAILQ_INIT(&ctrlr_proc->active_reqs);
2009 	ctrlr_proc->devhandle = devhandle;
2010 	ctrlr_proc->ref = 0;
2011 	TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
2012 
2013 	TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
2014 
2015 	return 0;
2016 }
2017 
2018 /**
2019  * This function will be called when the process detaches the controller.
2020  * Note: the ctrlr_lock must be held when calling this function.
2021  */
2022 static void
2023 nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
2024 			  struct spdk_nvme_ctrlr_process *proc)
2025 {
2026 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
2027 
2028 	assert(STAILQ_EMPTY(&proc->active_reqs));
2029 
2030 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
2031 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2032 	}
2033 
2034 	TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
2035 
2036 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
2037 		spdk_pci_device_detach(proc->devhandle);
2038 	}
2039 
2040 	spdk_free(proc);
2041 }
2042 
2043 /**
2044  * This function will be called when the process exited unexpectedly
2045  *  in order to free any incomplete nvme request, allocated IO qpairs
2046  *  and allocated memory.
2047  * Note: the ctrlr_lock must be held when calling this function.
2048  */
2049 static void
2050 nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
2051 {
2052 	struct nvme_request	*req, *tmp_req;
2053 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
2054 
2055 	STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
2056 		STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
2057 
2058 		assert(req->pid == proc->pid);
2059 
2060 		nvme_free_request(req);
2061 	}
2062 
2063 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
2064 		TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
2065 
2066 		/*
2067 		 * The process may have been killed while some qpairs were in their
2068 		 *  completion context.  Clear that flag here to allow these IO
2069 		 *  qpairs to be deleted.
2070 		 */
2071 		qpair->in_completion_context = 0;
2072 
2073 		qpair->no_deletion_notification_needed = 1;
2074 
2075 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2076 	}
2077 
2078 	spdk_free(proc);
2079 }
2080 
2081 /**
2082  * This function will be called when destructing the controller.
2083  *  1. There is no more admin request on this controller.
2084  *  2. Clean up any left resource allocation when its associated process is gone.
2085  */
2086 void
2087 nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
2088 {
2089 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
2090 
2091 	/* Free all the processes' properties and make sure no pending admin IOs */
2092 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
2093 		TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
2094 
2095 		assert(STAILQ_EMPTY(&active_proc->active_reqs));
2096 
2097 		spdk_free(active_proc);
2098 	}
2099 }
2100 
2101 /**
2102  * This function will be called when any other process attaches or
2103  *  detaches the controller in order to cleanup those unexpectedly
2104  *  terminated processes.
2105  * Note: the ctrlr_lock must be held when calling this function.
2106  */
2107 static int
2108 nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
2109 {
2110 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
2111 	int				active_proc_count = 0;
2112 
2113 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
2114 		if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
2115 			SPDK_ERRLOG("process %d terminated unexpected\n", active_proc->pid);
2116 
2117 			TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
2118 
2119 			nvme_ctrlr_cleanup_process(active_proc);
2120 		} else {
2121 			active_proc_count++;
2122 		}
2123 	}
2124 
2125 	return active_proc_count;
2126 }
2127 
2128 void
2129 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
2130 {
2131 	struct spdk_nvme_ctrlr_process	*active_proc;
2132 
2133 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2134 
2135 	nvme_ctrlr_remove_inactive_proc(ctrlr);
2136 
2137 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2138 	if (active_proc) {
2139 		active_proc->ref++;
2140 	}
2141 
2142 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2143 }
2144 
2145 void
2146 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
2147 {
2148 	struct spdk_nvme_ctrlr_process	*active_proc;
2149 	int				proc_count;
2150 
2151 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2152 
2153 	proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
2154 
2155 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2156 	if (active_proc) {
2157 		active_proc->ref--;
2158 		assert(active_proc->ref >= 0);
2159 
2160 		/*
2161 		 * The last active process will be removed at the end of
2162 		 * the destruction of the controller.
2163 		 */
2164 		if (active_proc->ref == 0 && proc_count != 1) {
2165 			nvme_ctrlr_remove_process(ctrlr, active_proc);
2166 		}
2167 	}
2168 
2169 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2170 }
2171 
2172 int
2173 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
2174 {
2175 	struct spdk_nvme_ctrlr_process	*active_proc;
2176 	int				ref = 0;
2177 
2178 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2179 
2180 	nvme_ctrlr_remove_inactive_proc(ctrlr);
2181 
2182 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
2183 		ref += active_proc->ref;
2184 	}
2185 
2186 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2187 
2188 	return ref;
2189 }
2190 
2191 /**
2192  *  Get the PCI device handle which is only visible to its associated process.
2193  */
2194 struct spdk_pci_device *
2195 nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
2196 {
2197 	struct spdk_nvme_ctrlr_process	*active_proc;
2198 	struct spdk_pci_device		*devhandle = NULL;
2199 
2200 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2201 
2202 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2203 	if (active_proc) {
2204 		devhandle = active_proc->devhandle;
2205 	}
2206 
2207 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2208 
2209 	return devhandle;
2210 }
2211 
2212 /**
2213  * This function will be called repeatedly during initialization until the controller is ready.
2214  */
2215 int
2216 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
2217 {
2218 	union spdk_nvme_cc_register cc;
2219 	union spdk_nvme_csts_register csts;
2220 	uint32_t ready_timeout_in_ms;
2221 	int rc = 0;
2222 
2223 	/*
2224 	 * May need to avoid accessing any register on the target controller
2225 	 * for a while. Return early without touching the FSM.
2226 	 * Check sleep_timeout_tsc > 0 for unit test.
2227 	 */
2228 	if ((ctrlr->sleep_timeout_tsc > 0) &&
2229 	    (spdk_get_ticks() <= ctrlr->sleep_timeout_tsc)) {
2230 		return 0;
2231 	}
2232 	ctrlr->sleep_timeout_tsc = 0;
2233 
2234 	if (nvme_ctrlr_get_cc(ctrlr, &cc) ||
2235 	    nvme_ctrlr_get_csts(ctrlr, &csts)) {
2236 		if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
2237 			/* While a device is resetting, it may be unable to service MMIO reads
2238 			 * temporarily. Allow for this case.
2239 			 */
2240 			SPDK_ERRLOG("Get registers failed while waiting for CSTS.RDY == 0\n");
2241 			goto init_timeout;
2242 		}
2243 		SPDK_ERRLOG("Failed to read CC and CSTS in state %d\n", ctrlr->state);
2244 		return -EIO;
2245 	}
2246 
2247 	ready_timeout_in_ms = 500 * ctrlr->cap.bits.to;
2248 
2249 	/*
2250 	 * Check if the current initialization step is done or has timed out.
2251 	 */
2252 	switch (ctrlr->state) {
2253 	case NVME_CTRLR_STATE_INIT_DELAY:
2254 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
2255 		if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_INIT) {
2256 			/*
2257 			 * Controller may need some delay before it's enabled.
2258 			 *
2259 			 * This is a workaround for an issue where the PCIe-attached NVMe controller
2260 			 * is not ready after VFIO reset. We delay the initialization rather than the
2261 			 * enabling itself, because this is required only for the very first enabling
2262 			 * - directly after a VFIO reset.
2263 			 */
2264 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Adding 2 second delay before initializing the controller\n");
2265 			ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2000 * spdk_get_ticks_hz() / 1000);
2266 		}
2267 		break;
2268 
2269 	case NVME_CTRLR_STATE_INIT:
2270 		/* Begin the hardware initialization by making sure the controller is disabled. */
2271 		if (cc.bits.en) {
2272 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1\n");
2273 			/*
2274 			 * Controller is currently enabled. We need to disable it to cause a reset.
2275 			 *
2276 			 * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
2277 			 *  Wait for the ready bit to be 1 before disabling the controller.
2278 			 */
2279 			if (csts.bits.rdy == 0) {
2280 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
2281 				nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
2282 				return 0;
2283 			}
2284 
2285 			/* CC.EN = 1 && CSTS.RDY == 1, so we can immediately disable the controller. */
2286 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
2287 			cc.bits.en = 0;
2288 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
2289 				SPDK_ERRLOG("set_cc() failed\n");
2290 				return -EIO;
2291 			}
2292 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2293 
2294 			/*
2295 			 * Wait 2.5 seconds before accessing PCI registers.
2296 			 * Not using sleep() to avoid blocking other controller's initialization.
2297 			 */
2298 			if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
2299 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "Applying quirk: delay 2.5 seconds before reading registers\n");
2300 				ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
2301 			}
2302 			return 0;
2303 		} else {
2304 			if (csts.bits.rdy == 1) {
2305 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 1 - waiting for shutdown to complete\n");
2306 			}
2307 
2308 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2309 			return 0;
2310 		}
2311 		break;
2312 
2313 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
2314 		if (csts.bits.rdy == 1) {
2315 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - disabling controller\n");
2316 			/* CC.EN = 1 && CSTS.RDY = 1, so we can set CC.EN = 0 now. */
2317 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
2318 			cc.bits.en = 0;
2319 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
2320 				SPDK_ERRLOG("set_cc() failed\n");
2321 				return -EIO;
2322 			}
2323 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2324 			return 0;
2325 		}
2326 		break;
2327 
2328 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
2329 		if (csts.bits.rdy == 0) {
2330 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 0\n");
2331 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
2332 			/*
2333 			 * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
2334 			 *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
2335 			 */
2336 			spdk_delay_us(100);
2337 			return 0;
2338 		}
2339 		break;
2340 
2341 	case NVME_CTRLR_STATE_ENABLE:
2342 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 1\n");
2343 		rc = nvme_ctrlr_enable(ctrlr);
2344 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
2345 		return rc;
2346 
2347 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
2348 		if (csts.bits.rdy == 1) {
2349 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
2350 			/*
2351 			 * The controller has been enabled.
2352 			 *  Perform the rest of initialization serially.
2353 			 */
2354 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
2355 					     ctrlr->opts.admin_timeout_ms);
2356 			return 0;
2357 		}
2358 		break;
2359 
2360 	case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
2361 		nvme_transport_qpair_reset(ctrlr->adminq);
2362 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY,
2363 				     ctrlr->opts.admin_timeout_ms);
2364 		break;
2365 
2366 	case NVME_CTRLR_STATE_IDENTIFY:
2367 		rc = nvme_ctrlr_identify(ctrlr);
2368 		break;
2369 
2370 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
2371 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2372 		break;
2373 
2374 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
2375 		rc = nvme_ctrlr_set_num_queues(ctrlr);
2376 		break;
2377 
2378 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
2379 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2380 		break;
2381 
2382 	case NVME_CTRLR_STATE_GET_NUM_QUEUES:
2383 		rc = nvme_ctrlr_get_num_queues(ctrlr);
2384 		break;
2385 
2386 	case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES:
2387 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2388 		break;
2389 
2390 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
2391 		rc = nvme_ctrlr_construct_namespaces(ctrlr);
2392 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
2393 				     ctrlr->opts.admin_timeout_ms);
2394 		break;
2395 
2396 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
2397 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
2398 		if (rc < 0) {
2399 			nvme_ctrlr_destruct_namespaces(ctrlr);
2400 		}
2401 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS,
2402 				     ctrlr->opts.admin_timeout_ms);
2403 		break;
2404 
2405 	case NVME_CTRLR_STATE_IDENTIFY_NS:
2406 		rc = nvme_ctrlr_identify_namespaces(ctrlr);
2407 		break;
2408 
2409 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
2410 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2411 		break;
2412 
2413 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
2414 		rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
2415 		break;
2416 
2417 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
2418 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2419 		break;
2420 
2421 	case NVME_CTRLR_STATE_CONFIGURE_AER:
2422 		rc = nvme_ctrlr_configure_aer(ctrlr);
2423 		break;
2424 
2425 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
2426 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2427 		break;
2428 
2429 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
2430 		rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
2431 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
2432 				     ctrlr->opts.admin_timeout_ms);
2433 		break;
2434 
2435 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
2436 		nvme_ctrlr_set_supported_features(ctrlr);
2437 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
2438 				     ctrlr->opts.admin_timeout_ms);
2439 		break;
2440 
2441 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
2442 		rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
2443 		break;
2444 
2445 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
2446 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2447 		break;
2448 
2449 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
2450 		rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
2451 		break;
2452 
2453 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
2454 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2455 		break;
2456 
2457 	case NVME_CTRLR_STATE_SET_HOST_ID:
2458 		rc = nvme_ctrlr_set_host_id(ctrlr);
2459 		break;
2460 
2461 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
2462 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2463 		break;
2464 
2465 	case NVME_CTRLR_STATE_READY:
2466 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Ctrlr already in ready state\n");
2467 		return 0;
2468 
2469 	case NVME_CTRLR_STATE_ERROR:
2470 		SPDK_ERRLOG("Ctrlr %s is in error state\n", ctrlr->trid.traddr);
2471 		return -1;
2472 
2473 	default:
2474 		assert(0);
2475 		return -1;
2476 	}
2477 
2478 init_timeout:
2479 	if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
2480 	    spdk_get_ticks() > ctrlr->state_timeout_tsc) {
2481 		SPDK_ERRLOG("Initialization timed out in state %d\n", ctrlr->state);
2482 		return -1;
2483 	}
2484 
2485 	return rc;
2486 }
2487 
2488 int
2489 nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
2490 {
2491 	pthread_mutexattr_t attr;
2492 	int rc = 0;
2493 
2494 	if (pthread_mutexattr_init(&attr)) {
2495 		return -1;
2496 	}
2497 	if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
2498 #ifndef __FreeBSD__
2499 	    pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
2500 	    pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
2501 #endif
2502 	    pthread_mutex_init(mtx, &attr)) {
2503 		rc = -1;
2504 	}
2505 	pthread_mutexattr_destroy(&attr);
2506 	return rc;
2507 }
2508 
2509 int
2510 nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
2511 {
2512 	int rc;
2513 
2514 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
2515 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
2516 	} else {
2517 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
2518 	}
2519 
2520 	ctrlr->flags = 0;
2521 	ctrlr->free_io_qids = NULL;
2522 	ctrlr->is_resetting = false;
2523 	ctrlr->is_failed = false;
2524 	ctrlr->is_shutdown = false;
2525 
2526 	TAILQ_INIT(&ctrlr->active_io_qpairs);
2527 	STAILQ_INIT(&ctrlr->queued_aborts);
2528 	ctrlr->outstanding_aborts = 0;
2529 
2530 	rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
2531 	if (rc != 0) {
2532 		return rc;
2533 	}
2534 
2535 	TAILQ_INIT(&ctrlr->active_procs);
2536 
2537 	return rc;
2538 }
2539 
2540 /* This function should be called once at ctrlr initialization to set up constant properties. */
2541 void
2542 nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
2543 		    const union spdk_nvme_vs_register *vs)
2544 {
2545 	ctrlr->cap = *cap;
2546 	ctrlr->vs = *vs;
2547 
2548 	if (ctrlr->cap.bits.ams & SPDK_NVME_CAP_AMS_WRR) {
2549 		ctrlr->flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
2550 	}
2551 
2552 	ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
2553 
2554 	/* For now, always select page_size == min_page_size. */
2555 	ctrlr->page_size = ctrlr->min_page_size;
2556 
2557 	ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
2558 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
2559 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
2560 
2561 	ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
2562 }
2563 
2564 void
2565 nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
2566 {
2567 	pthread_mutex_destroy(&ctrlr->ctrlr_lock);
2568 }
2569 
2570 void
2571 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
2572 {
2573 	struct spdk_nvme_qpair *qpair, *tmp;
2574 
2575 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Prepare to destruct SSD: %s\n", ctrlr->trid.traddr);
2576 
2577 	nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
2578 
2579 	TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
2580 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2581 	}
2582 
2583 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
2584 
2585 	if (ctrlr->opts.no_shn_notification) {
2586 		SPDK_INFOLOG(SPDK_LOG_NVME, "Disable SSD: %s without shutdown notification\n",
2587 			     ctrlr->trid.traddr);
2588 		nvme_ctrlr_disable(ctrlr);
2589 	} else {
2590 		nvme_ctrlr_shutdown(ctrlr);
2591 	}
2592 
2593 	nvme_ctrlr_destruct_namespaces(ctrlr);
2594 
2595 	spdk_bit_array_free(&ctrlr->free_io_qids);
2596 
2597 	nvme_transport_ctrlr_destruct(ctrlr);
2598 }
2599 
2600 int
2601 nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
2602 				struct nvme_request *req)
2603 {
2604 	return nvme_qpair_submit_request(ctrlr->adminq, req);
2605 }
2606 
2607 static void
2608 nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
2609 {
2610 	/* Do nothing */
2611 }
2612 
2613 /*
2614  * Check if we need to send a Keep Alive command.
2615  * Caller must hold ctrlr->ctrlr_lock.
2616  */
2617 static void
2618 nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
2619 {
2620 	uint64_t now;
2621 	struct nvme_request *req;
2622 	struct spdk_nvme_cmd *cmd;
2623 	int rc;
2624 
2625 	now = spdk_get_ticks();
2626 	if (now < ctrlr->next_keep_alive_tick) {
2627 		return;
2628 	}
2629 
2630 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
2631 	if (req == NULL) {
2632 		return;
2633 	}
2634 
2635 	cmd = &req->cmd;
2636 	cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
2637 
2638 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
2639 	if (rc != 0) {
2640 		SPDK_ERRLOG("Submitting Keep Alive failed\n");
2641 	}
2642 
2643 	ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
2644 }
2645 
2646 int32_t
2647 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
2648 {
2649 	int32_t num_completions;
2650 	int32_t rc;
2651 
2652 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2653 
2654 	if (ctrlr->keep_alive_interval_ticks) {
2655 		nvme_ctrlr_keep_alive(ctrlr);
2656 	}
2657 
2658 	rc = spdk_nvme_io_msg_process(ctrlr);
2659 	if (rc < 0) {
2660 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2661 		return rc;
2662 	}
2663 	num_completions = rc;
2664 
2665 	rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2666 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2667 
2668 	if (rc < 0) {
2669 		num_completions = rc;
2670 	} else {
2671 		num_completions += rc;
2672 	}
2673 
2674 	return num_completions;
2675 }
2676 
2677 const struct spdk_nvme_ctrlr_data *
2678 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
2679 {
2680 	return &ctrlr->cdata;
2681 }
2682 
2683 union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
2684 {
2685 	union spdk_nvme_csts_register csts;
2686 
2687 	if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
2688 		csts.raw = 0xFFFFFFFFu;
2689 	}
2690 	return csts;
2691 }
2692 
2693 union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
2694 {
2695 	return ctrlr->cap;
2696 }
2697 
2698 union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
2699 {
2700 	return ctrlr->vs;
2701 }
2702 
2703 union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr)
2704 {
2705 	union spdk_nvme_cmbsz_register cmbsz;
2706 
2707 	if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) {
2708 		cmbsz.raw = 0;
2709 	}
2710 
2711 	return cmbsz;
2712 }
2713 
2714 uint32_t
2715 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
2716 {
2717 	return ctrlr->num_ns;
2718 }
2719 
2720 static int32_t
2721 spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2722 {
2723 	int32_t result = -1;
2724 
2725 	if (ctrlr->active_ns_list == NULL || nsid == 0 || nsid > ctrlr->num_ns) {
2726 		return result;
2727 	}
2728 
2729 	int32_t lower = 0;
2730 	int32_t upper = ctrlr->num_ns - 1;
2731 	int32_t mid;
2732 
2733 	while (lower <= upper) {
2734 		mid = lower + (upper - lower) / 2;
2735 		if (ctrlr->active_ns_list[mid] == nsid) {
2736 			result = mid;
2737 			break;
2738 		} else {
2739 			if (ctrlr->active_ns_list[mid] != 0 && ctrlr->active_ns_list[mid] < nsid) {
2740 				lower = mid + 1;
2741 			} else {
2742 				upper = mid - 1;
2743 			}
2744 
2745 		}
2746 	}
2747 
2748 	return result;
2749 }
2750 
2751 bool
2752 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2753 {
2754 	return spdk_nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1;
2755 }
2756 
2757 uint32_t
2758 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
2759 {
2760 	return ctrlr->active_ns_list ? ctrlr->active_ns_list[0] : 0;
2761 }
2762 
2763 uint32_t
2764 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
2765 {
2766 	int32_t nsid_idx = spdk_nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid);
2767 	if (ctrlr->active_ns_list && nsid_idx >= 0 && (uint32_t)nsid_idx < ctrlr->num_ns - 1) {
2768 		return ctrlr->active_ns_list[nsid_idx + 1];
2769 	}
2770 	return 0;
2771 }
2772 
2773 struct spdk_nvme_ns *
2774 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2775 {
2776 	if (nsid < 1 || nsid > ctrlr->num_ns) {
2777 		return NULL;
2778 	}
2779 
2780 	return &ctrlr->ns[nsid - 1];
2781 }
2782 
2783 struct spdk_pci_device *
2784 spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
2785 {
2786 	if (ctrlr == NULL) {
2787 		return NULL;
2788 	}
2789 
2790 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
2791 		return NULL;
2792 	}
2793 
2794 	return nvme_ctrlr_proc_get_devhandle(ctrlr);
2795 }
2796 
2797 uint32_t
2798 spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
2799 {
2800 	return ctrlr->max_xfer_size;
2801 }
2802 
2803 void
2804 spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
2805 				      spdk_nvme_aer_cb aer_cb_fn,
2806 				      void *aer_cb_arg)
2807 {
2808 	struct spdk_nvme_ctrlr_process *active_proc;
2809 
2810 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2811 
2812 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2813 	if (active_proc) {
2814 		active_proc->aer_cb_fn = aer_cb_fn;
2815 		active_proc->aer_cb_arg = aer_cb_arg;
2816 	}
2817 
2818 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2819 }
2820 
2821 void
2822 spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
2823 		uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)
2824 {
2825 	struct spdk_nvme_ctrlr_process	*active_proc;
2826 
2827 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2828 
2829 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2830 	if (active_proc) {
2831 		active_proc->timeout_ticks = timeout_us * spdk_get_ticks_hz() / 1000000ULL;
2832 		active_proc->timeout_cb_fn = cb_fn;
2833 		active_proc->timeout_cb_arg = cb_arg;
2834 	}
2835 
2836 	ctrlr->timeout_enabled = true;
2837 
2838 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2839 }
2840 
2841 bool
2842 spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
2843 {
2844 	/* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
2845 	SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
2846 	return ctrlr->log_page_supported[log_page];
2847 }
2848 
2849 bool
2850 spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
2851 {
2852 	/* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
2853 	SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
2854 	return ctrlr->feature_supported[feature_code];
2855 }
2856 
2857 int
2858 spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2859 			  struct spdk_nvme_ctrlr_list *payload)
2860 {
2861 	struct nvme_completion_poll_status	status;
2862 	int					res;
2863 	struct spdk_nvme_ns			*ns;
2864 
2865 	res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
2866 				       nvme_completion_poll_cb, &status);
2867 	if (res) {
2868 		return res;
2869 	}
2870 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2871 		SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
2872 		return -ENXIO;
2873 	}
2874 
2875 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2876 	if (res) {
2877 		return res;
2878 	}
2879 
2880 	ns = &ctrlr->ns[nsid - 1];
2881 	return nvme_ns_construct(ns, nsid, ctrlr);
2882 }
2883 
2884 int
2885 spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2886 			  struct spdk_nvme_ctrlr_list *payload)
2887 {
2888 	struct nvme_completion_poll_status	status;
2889 	int					res;
2890 	struct spdk_nvme_ns			*ns;
2891 
2892 	res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
2893 				       nvme_completion_poll_cb, &status);
2894 	if (res) {
2895 		return res;
2896 	}
2897 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2898 		SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
2899 		return -ENXIO;
2900 	}
2901 
2902 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2903 	if (res) {
2904 		return res;
2905 	}
2906 
2907 	ns = &ctrlr->ns[nsid - 1];
2908 	/* Inactive NS */
2909 	nvme_ns_destruct(ns);
2910 
2911 	return 0;
2912 }
2913 
2914 uint32_t
2915 spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
2916 {
2917 	struct nvme_completion_poll_status	status;
2918 	int					res;
2919 	uint32_t				nsid;
2920 	struct spdk_nvme_ns			*ns;
2921 
2922 	res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status);
2923 	if (res) {
2924 		return 0;
2925 	}
2926 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2927 		SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
2928 		return 0;
2929 	}
2930 
2931 	nsid = status.cpl.cdw0;
2932 	ns = &ctrlr->ns[nsid - 1];
2933 	/* Inactive NS */
2934 	res = nvme_ns_construct(ns, nsid, ctrlr);
2935 	if (res) {
2936 		return 0;
2937 	}
2938 
2939 	/* Return the namespace ID that was created */
2940 	return nsid;
2941 }
2942 
2943 int
2944 spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2945 {
2946 	struct nvme_completion_poll_status	status;
2947 	int					res;
2948 	struct spdk_nvme_ns			*ns;
2949 
2950 	res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status);
2951 	if (res) {
2952 		return res;
2953 	}
2954 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2955 		SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
2956 		return -ENXIO;
2957 	}
2958 
2959 	res = nvme_ctrlr_identify_active_ns(ctrlr);
2960 	if (res) {
2961 		return res;
2962 	}
2963 
2964 	ns = &ctrlr->ns[nsid - 1];
2965 	nvme_ns_destruct(ns);
2966 
2967 	return 0;
2968 }
2969 
2970 int
2971 spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
2972 		       struct spdk_nvme_format *format)
2973 {
2974 	struct nvme_completion_poll_status	status;
2975 	int					res;
2976 
2977 	res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
2978 				    &status);
2979 	if (res) {
2980 		return res;
2981 	}
2982 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
2983 		SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
2984 		return -ENXIO;
2985 	}
2986 
2987 	return spdk_nvme_ctrlr_reset(ctrlr);
2988 }
2989 
2990 int
2991 spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
2992 				int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
2993 {
2994 	struct spdk_nvme_fw_commit		fw_commit;
2995 	struct nvme_completion_poll_status	status;
2996 	int					res;
2997 	unsigned int				size_remaining;
2998 	unsigned int				offset;
2999 	unsigned int				transfer;
3000 	void					*p;
3001 
3002 	if (!completion_status) {
3003 		return -EINVAL;
3004 	}
3005 	memset(completion_status, 0, sizeof(struct spdk_nvme_status));
3006 	if (size % 4) {
3007 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid size!\n");
3008 		return -1;
3009 	}
3010 
3011 	/* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
3012 	 * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
3013 	 */
3014 	if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
3015 	    (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
3016 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid command!\n");
3017 		return -1;
3018 	}
3019 
3020 	/* Firmware download */
3021 	size_remaining = size;
3022 	offset = 0;
3023 	p = payload;
3024 
3025 	while (size_remaining > 0) {
3026 		transfer = spdk_min(size_remaining, ctrlr->min_page_size);
3027 
3028 		res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
3029 						       nvme_completion_poll_cb,
3030 						       &status);
3031 		if (res) {
3032 			return res;
3033 		}
3034 
3035 		if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
3036 			SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
3037 			return -ENXIO;
3038 		}
3039 		p += transfer;
3040 		offset += transfer;
3041 		size_remaining -= transfer;
3042 	}
3043 
3044 	/* Firmware commit */
3045 	memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
3046 	fw_commit.fs = slot;
3047 	fw_commit.ca = commit_action;
3048 
3049 	res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
3050 				       &status);
3051 	if (res) {
3052 		return res;
3053 	}
3054 
3055 	res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock);
3056 
3057 	memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status));
3058 
3059 	if (res) {
3060 		if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
3061 		    status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
3062 			if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
3063 			    status.cpl.status.sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
3064 				SPDK_NOTICELOG("firmware activation requires conventional reset to be performed. !\n");
3065 			} else {
3066 				SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n");
3067 			}
3068 			return -ENXIO;
3069 		}
3070 	}
3071 
3072 	return spdk_nvme_ctrlr_reset(ctrlr);
3073 }
3074 
3075 void *
3076 spdk_nvme_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
3077 {
3078 	void *buf;
3079 
3080 	if (size == 0) {
3081 		return NULL;
3082 	}
3083 
3084 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
3085 	buf = nvme_transport_ctrlr_alloc_cmb_io_buffer(ctrlr, size);
3086 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
3087 
3088 	return buf;
3089 }
3090 
3091 void
3092 spdk_nvme_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
3093 {
3094 	if (buf && size) {
3095 		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
3096 		nvme_transport_ctrlr_free_cmb_io_buffer(ctrlr, buf, size);
3097 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
3098 	}
3099 }
3100 
3101 bool
3102 spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
3103 {
3104 	assert(ctrlr);
3105 
3106 	return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
3107 			strlen(SPDK_NVMF_DISCOVERY_NQN));
3108 }
3109 
3110 int
3111 spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
3112 				 uint16_t spsp, uint8_t nssf, void *payload, size_t size)
3113 {
3114 	struct nvme_completion_poll_status	status;
3115 	int					res;
3116 
3117 	res = nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
3118 					      nvme_completion_poll_cb, &status);
3119 	if (res) {
3120 		return res;
3121 	}
3122 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
3123 		SPDK_ERRLOG("spdk_nvme_ctrlr_security_receive failed!\n");
3124 		return -ENXIO;
3125 	}
3126 
3127 	return 0;
3128 }
3129 
3130 int
3131 spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
3132 			      uint16_t spsp, uint8_t nssf, void *payload, size_t size)
3133 {
3134 	struct nvme_completion_poll_status	status;
3135 	int					res;
3136 
3137 	res = nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size, nvme_completion_poll_cb,
3138 					   &status);
3139 	if (res) {
3140 		return res;
3141 	}
3142 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
3143 		SPDK_ERRLOG("spdk_nvme_ctrlr_security_send failed!\n");
3144 		return -ENXIO;
3145 	}
3146 
3147 	return 0;
3148 }
3149 
3150 uint64_t
3151 spdk_nvme_ctrlr_get_flags(struct spdk_nvme_ctrlr *ctrlr)
3152 {
3153 	return ctrlr->flags;
3154 }
3155 
3156 const struct spdk_nvme_transport_id *
3157 spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
3158 {
3159 	return &ctrlr->trid;
3160 }
3161