xref: /spdk/lib/nvme/nvme_ctrlr.c (revision 33f97fa33ad89651d75bafb5fb87dc4cd28dde6a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvme_internal.h"
37 #include "nvme_io_msg.h"
38 
39 #include "spdk/env.h"
40 #include "spdk/string.h"
41 
42 struct nvme_active_ns_ctx;
43 
44 static void nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr);
45 static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
46 		struct nvme_async_event_request *aer);
47 static void nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx);
48 static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
49 static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
50 
51 static int
52 nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
53 {
54 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
55 					      &cc->raw);
56 }
57 
58 static int
59 nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
60 {
61 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
62 					      &csts->raw);
63 }
64 
65 int
66 nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
67 {
68 	return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
69 					      &cap->raw);
70 }
71 
72 int
73 nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
74 {
75 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
76 					      &vs->raw);
77 }
78 
79 static int
80 nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
81 {
82 	return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
83 					      cc->raw);
84 }
85 
86 int
87 nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
88 {
89 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
90 					      &cmbsz->raw);
91 }
92 
93 void
94 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
95 {
96 	char host_id_str[SPDK_UUID_STRING_LEN];
97 
98 	assert(opts);
99 
100 	memset(opts, 0, opts_size);
101 
102 #define FIELD_OK(field) \
103 	offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
104 
105 	if (FIELD_OK(num_io_queues)) {
106 		opts->num_io_queues = DEFAULT_MAX_IO_QUEUES;
107 	}
108 
109 	if (FIELD_OK(use_cmb_sqs)) {
110 		opts->use_cmb_sqs = true;
111 	}
112 
113 	if (FIELD_OK(no_shn_notification)) {
114 		opts->no_shn_notification = false;
115 	}
116 
117 	if (FIELD_OK(arb_mechanism)) {
118 		opts->arb_mechanism = SPDK_NVME_CC_AMS_RR;
119 	}
120 
121 	if (FIELD_OK(keep_alive_timeout_ms)) {
122 		opts->keep_alive_timeout_ms = MIN_KEEP_ALIVE_TIMEOUT_IN_MS;
123 	}
124 
125 	if (FIELD_OK(transport_retry_count)) {
126 		opts->transport_retry_count = SPDK_NVME_DEFAULT_RETRY_COUNT;
127 	}
128 
129 	if (FIELD_OK(io_queue_size)) {
130 		opts->io_queue_size = DEFAULT_IO_QUEUE_SIZE;
131 	}
132 
133 	if (FIELD_OK(io_queue_requests)) {
134 		opts->io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
135 	}
136 
137 	if (FIELD_OK(host_id)) {
138 		memset(opts->host_id, 0, sizeof(opts->host_id));
139 	}
140 
141 	if (nvme_driver_init() == 0) {
142 		if (FIELD_OK(extended_host_id)) {
143 			memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
144 			       sizeof(opts->extended_host_id));
145 		}
146 
147 		if (FIELD_OK(hostnqn)) {
148 			spdk_uuid_fmt_lower(host_id_str, sizeof(host_id_str),
149 					    &g_spdk_nvme_driver->default_extended_host_id);
150 			snprintf(opts->hostnqn, sizeof(opts->hostnqn), "2014-08.org.nvmexpress:uuid:%s", host_id_str);
151 		}
152 	}
153 
154 	if (FIELD_OK(src_addr)) {
155 		memset(opts->src_addr, 0, sizeof(opts->src_addr));
156 	}
157 
158 	if (FIELD_OK(src_svcid)) {
159 		memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
160 	}
161 
162 	if (FIELD_OK(command_set)) {
163 		opts->command_set = SPDK_NVME_CC_CSS_NVM;
164 	}
165 
166 	if (FIELD_OK(admin_timeout_ms)) {
167 		opts->admin_timeout_ms = NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000;
168 	}
169 
170 	if (FIELD_OK(header_digest)) {
171 		opts->header_digest = false;
172 	}
173 
174 	if (FIELD_OK(data_digest)) {
175 		opts->data_digest = false;
176 	}
177 
178 	if (FIELD_OK(disable_error_logging)) {
179 		opts->disable_error_logging = false;
180 	}
181 
182 	if (FIELD_OK(transport_ack_timeout)) {
183 		opts->transport_ack_timeout = SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT;
184 	}
185 
186 	if (FIELD_OK(admin_queue_size)) {
187 		opts->admin_queue_size = DEFAULT_ADMIN_QUEUE_SIZE;
188 	}
189 #undef FIELD_OK
190 }
191 
192 /**
193  * This function will be called when the process allocates the IO qpair.
194  * Note: the ctrlr_lock must be held when calling this function.
195  */
196 static void
197 nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
198 {
199 	struct spdk_nvme_ctrlr_process	*active_proc;
200 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
201 
202 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
203 	if (active_proc) {
204 		TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
205 		qpair->active_proc = active_proc;
206 	}
207 }
208 
209 /**
210  * This function will be called when the process frees the IO qpair.
211  * Note: the ctrlr_lock must be held when calling this function.
212  */
213 static void
214 nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
215 {
216 	struct spdk_nvme_ctrlr_process	*active_proc;
217 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
218 	struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
219 
220 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
221 	if (!active_proc) {
222 		return;
223 	}
224 
225 	TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
226 			   per_process_tailq, tmp_qpair) {
227 		if (active_qpair == qpair) {
228 			TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
229 				     active_qpair, per_process_tailq);
230 
231 			break;
232 		}
233 	}
234 }
235 
236 void
237 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
238 		struct spdk_nvme_io_qpair_opts *opts,
239 		size_t opts_size)
240 {
241 	assert(ctrlr);
242 
243 	assert(opts);
244 
245 	memset(opts, 0, opts_size);
246 
247 #define FIELD_OK(field) \
248 	offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
249 
250 	if (FIELD_OK(qprio)) {
251 		opts->qprio = SPDK_NVME_QPRIO_URGENT;
252 	}
253 
254 	if (FIELD_OK(io_queue_size)) {
255 		opts->io_queue_size = ctrlr->opts.io_queue_size;
256 	}
257 
258 	if (FIELD_OK(io_queue_requests)) {
259 		opts->io_queue_requests = ctrlr->opts.io_queue_requests;
260 	}
261 
262 	if (FIELD_OK(delay_cmd_submit)) {
263 		opts->delay_cmd_submit = false;
264 	}
265 
266 	if (FIELD_OK(sq.vaddr)) {
267 		opts->sq.vaddr = NULL;
268 	}
269 
270 	if (FIELD_OK(sq.paddr)) {
271 		opts->sq.paddr = 0;
272 	}
273 
274 	if (FIELD_OK(sq.buffer_size)) {
275 		opts->sq.buffer_size = 0;
276 	}
277 
278 	if (FIELD_OK(cq.vaddr)) {
279 		opts->cq.vaddr = NULL;
280 	}
281 
282 	if (FIELD_OK(cq.paddr)) {
283 		opts->cq.paddr = 0;
284 	}
285 
286 	if (FIELD_OK(cq.buffer_size)) {
287 		opts->cq.buffer_size = 0;
288 	}
289 
290 	if (FIELD_OK(create_only)) {
291 		opts->create_only = false;
292 	}
293 
294 #undef FIELD_OK
295 }
296 
297 static struct spdk_nvme_qpair *
298 nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
299 			   const struct spdk_nvme_io_qpair_opts *opts)
300 {
301 	uint32_t				qid;
302 	struct spdk_nvme_qpair			*qpair;
303 	union spdk_nvme_cc_register		cc;
304 
305 	if (!ctrlr) {
306 		return NULL;
307 	}
308 
309 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
310 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
311 		SPDK_ERRLOG("get_cc failed\n");
312 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
313 		return NULL;
314 	}
315 
316 	if (opts->qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
317 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
318 		return NULL;
319 	}
320 
321 	/*
322 	 * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
323 	 * default round robin arbitration method.
324 	 */
325 	if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts->qprio != SPDK_NVME_QPRIO_URGENT)) {
326 		SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
327 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
328 		return NULL;
329 	}
330 
331 	/*
332 	 * Get the first available I/O queue ID.
333 	 */
334 	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
335 	if (qid > ctrlr->opts.num_io_queues) {
336 		SPDK_ERRLOG("No free I/O queue IDs\n");
337 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
338 		return NULL;
339 	}
340 
341 	qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
342 	if (qpair == NULL) {
343 		SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
344 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
345 		return NULL;
346 	}
347 
348 	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
349 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
350 
351 	nvme_ctrlr_proc_add_io_qpair(qpair);
352 
353 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
354 
355 	return qpair;
356 }
357 
358 int
359 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
360 {
361 	int rc;
362 
363 	if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
364 		return -EISCONN;
365 	}
366 
367 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
368 	rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
369 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
370 
371 	if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
372 		spdk_delay_us(100);
373 	}
374 
375 	return rc;
376 }
377 
378 void
379 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
380 {
381 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
382 
383 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
384 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
385 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
386 }
387 
388 struct spdk_nvme_qpair *
389 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
390 			       const struct spdk_nvme_io_qpair_opts *user_opts,
391 			       size_t opts_size)
392 {
393 
394 	struct spdk_nvme_qpair		*qpair;
395 	struct spdk_nvme_io_qpair_opts	opts;
396 	int				rc;
397 
398 	/*
399 	 * Get the default options, then overwrite them with the user-provided options
400 	 * up to opts_size.
401 	 *
402 	 * This allows for extensions of the opts structure without breaking
403 	 * ABI compatibility.
404 	 */
405 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
406 	if (user_opts) {
407 		memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
408 
409 		/* If user passes buffers, make sure they're big enough for the requested queue size */
410 		if (opts.sq.vaddr) {
411 			if (opts.sq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cmd))) {
412 				SPDK_ERRLOG("sq buffer size %lx is too small for sq size %lx\n",
413 					    opts.sq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cmd)));
414 				return NULL;
415 			}
416 		}
417 		if (opts.cq.vaddr) {
418 			if (opts.cq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cpl))) {
419 				SPDK_ERRLOG("cq buffer size %lx is too small for cq size %lx\n",
420 					    opts.cq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cpl)));
421 				return NULL;
422 			}
423 		}
424 	}
425 
426 	qpair = nvme_ctrlr_create_io_qpair(ctrlr, &opts);
427 
428 	if (qpair == NULL || opts.create_only == true) {
429 		return qpair;
430 	}
431 
432 	rc = spdk_nvme_ctrlr_connect_io_qpair(ctrlr, qpair);
433 	if (rc != 0) {
434 		SPDK_ERRLOG("nvme_transport_ctrlr_connect_io_qpair() failed\n");
435 		nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
436 		return NULL;
437 	}
438 
439 	return qpair;
440 }
441 
442 int
443 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
444 {
445 	struct spdk_nvme_ctrlr *ctrlr;
446 	int rc;
447 
448 	assert(qpair != NULL);
449 	assert(nvme_qpair_is_admin_queue(qpair) == false);
450 	assert(qpair->ctrlr != NULL);
451 
452 	ctrlr = qpair->ctrlr;
453 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
454 
455 	if (ctrlr->is_removed) {
456 		rc = -ENODEV;
457 		goto out;
458 	}
459 
460 	if (ctrlr->is_resetting) {
461 		rc = -EAGAIN;
462 		goto out;
463 	}
464 
465 	if (ctrlr->is_failed) {
466 		rc = -ENXIO;
467 		goto out;
468 	}
469 
470 	if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
471 		rc = 0;
472 		goto out;
473 	}
474 
475 	rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
476 	if (rc) {
477 		rc = -EAGAIN;
478 		goto out;
479 	}
480 
481 out:
482 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
483 	return rc;
484 }
485 
486 spdk_nvme_qp_failure_reason
487 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
488 {
489 	return ctrlr->adminq->transport_failure_reason;
490 }
491 
492 /*
493  * This internal function will attempt to take the controller
494  * lock before calling disconnect on a controller qpair.
495  * Functions already holding the controller lock should
496  * call nvme_transport_ctrlr_disconnect_qpair directly.
497  */
498 void
499 nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair)
500 {
501 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
502 
503 	assert(ctrlr != NULL);
504 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
505 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
506 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
507 }
508 
509 int
510 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
511 {
512 	struct spdk_nvme_ctrlr *ctrlr;
513 
514 	if (qpair == NULL) {
515 		return 0;
516 	}
517 
518 	ctrlr = qpair->ctrlr;
519 
520 	if (qpair->in_completion_context) {
521 		/*
522 		 * There are many cases where it is convenient to delete an io qpair in the context
523 		 *  of that qpair's completion routine.  To handle this properly, set a flag here
524 		 *  so that the completion routine will perform an actual delete after the context
525 		 *  unwinds.
526 		 */
527 		qpair->delete_after_completion_context = 1;
528 		return 0;
529 	}
530 
531 	if (qpair->poll_group && qpair->poll_group->in_completion_context) {
532 		/* Same as above, but in a poll group. */
533 		qpair->poll_group->num_qpairs_to_delete++;
534 		qpair->delete_after_completion_context = 1;
535 		return 0;
536 	}
537 
538 	if (qpair->poll_group) {
539 		spdk_nvme_poll_group_remove(qpair->poll_group->group, qpair);
540 	}
541 
542 	/* Do not retry. */
543 	nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
544 	nvme_qpair_abort_reqs(qpair, 1);
545 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
546 
547 	nvme_ctrlr_proc_remove_io_qpair(qpair);
548 
549 	TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
550 	spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
551 
552 	if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
553 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
554 		return -1;
555 	}
556 
557 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
558 	return 0;
559 }
560 
561 static void
562 nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
563 		struct spdk_nvme_intel_log_page_directory *log_page_directory)
564 {
565 	if (log_page_directory == NULL) {
566 		return;
567 	}
568 
569 	if (ctrlr->cdata.vid != SPDK_PCI_VID_INTEL) {
570 		return;
571 	}
572 
573 	ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
574 
575 	if (log_page_directory->read_latency_log_len ||
576 	    (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
577 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
578 	}
579 	if (log_page_directory->write_latency_log_len ||
580 	    (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
581 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
582 	}
583 	if (log_page_directory->temperature_statistics_log_len) {
584 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
585 	}
586 	if (log_page_directory->smart_log_len) {
587 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
588 	}
589 	if (log_page_directory->marketing_description_log_len) {
590 		ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
591 	}
592 }
593 
594 static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
595 {
596 	int rc = 0;
597 	struct nvme_completion_poll_status	*status;
598 	struct spdk_nvme_intel_log_page_directory *log_page_directory;
599 
600 	log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
601 					  64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
602 	if (log_page_directory == NULL) {
603 		SPDK_ERRLOG("could not allocate log_page_directory\n");
604 		return -ENXIO;
605 	}
606 
607 	status = calloc(1, sizeof(*status));
608 	if (!status) {
609 		SPDK_ERRLOG("Failed to allocate status tracker\n");
610 		spdk_free(log_page_directory);
611 		return -ENOMEM;
612 	}
613 
614 	rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
615 					      SPDK_NVME_GLOBAL_NS_TAG, log_page_directory,
616 					      sizeof(struct spdk_nvme_intel_log_page_directory),
617 					      0, nvme_completion_poll_cb, status);
618 	if (rc != 0) {
619 		spdk_free(log_page_directory);
620 		free(status);
621 		return rc;
622 	}
623 
624 	if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, status,
625 			ctrlr->opts.admin_timeout_ms / 1000)) {
626 		spdk_free(log_page_directory);
627 		SPDK_WARNLOG("Intel log pages not supported on Intel drive!\n");
628 		if (!status->timed_out) {
629 			free(status);
630 		}
631 		return 0;
632 	}
633 
634 	nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
635 	spdk_free(log_page_directory);
636 	free(status);
637 	return 0;
638 }
639 
640 static int
641 nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
642 {
643 	int	rc = 0;
644 
645 	memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
646 	/* Mandatory pages */
647 	ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
648 	ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
649 	ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
650 	if (ctrlr->cdata.lpa.celp) {
651 		ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
652 	}
653 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL && !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
654 		rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
655 	}
656 
657 	return rc;
658 }
659 
660 static void
661 nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
662 {
663 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
664 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
665 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
666 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
667 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
668 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
669 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
670 }
671 
672 static void
673 nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
674 {
675 	uint32_t cdw11;
676 	struct nvme_completion_poll_status *status;
677 
678 	if (ctrlr->opts.arbitration_burst == 0) {
679 		return;
680 	}
681 
682 	if (ctrlr->opts.arbitration_burst > 7) {
683 		SPDK_WARNLOG("Valid arbitration burst values is from 0-7\n");
684 		return;
685 	}
686 
687 	status = calloc(1, sizeof(*status));
688 	if (!status) {
689 		SPDK_ERRLOG("Failed to allocate status tracker\n");
690 		return;
691 	}
692 
693 	cdw11 = ctrlr->opts.arbitration_burst;
694 
695 	if (spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_WRR_SUPPORTED) {
696 		cdw11 |= (uint32_t)ctrlr->opts.low_priority_weight << 8;
697 		cdw11 |= (uint32_t)ctrlr->opts.medium_priority_weight << 16;
698 		cdw11 |= (uint32_t)ctrlr->opts.high_priority_weight << 24;
699 	}
700 
701 	if (spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ARBITRATION,
702 					    cdw11, 0, NULL, 0,
703 					    nvme_completion_poll_cb, status) < 0) {
704 		SPDK_ERRLOG("Set arbitration feature failed\n");
705 		free(status);
706 		return;
707 	}
708 
709 	if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, status,
710 			ctrlr->opts.admin_timeout_ms / 1000)) {
711 		SPDK_ERRLOG("Timeout to set arbitration feature\n");
712 	}
713 
714 	if (!status->timed_out) {
715 		free(status);
716 	}
717 }
718 
719 static void
720 nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
721 {
722 	memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
723 	/* Mandatory features */
724 	ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
725 	ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
726 	ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
727 	ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
728 	ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
729 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
730 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
731 	ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
732 	ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
733 	/* Optional features */
734 	if (ctrlr->cdata.vwc.present) {
735 		ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
736 	}
737 	if (ctrlr->cdata.apsta.supported) {
738 		ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
739 	}
740 	if (ctrlr->cdata.hmpre) {
741 		ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
742 	}
743 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
744 		nvme_ctrlr_set_intel_supported_features(ctrlr);
745 	}
746 
747 	nvme_ctrlr_set_arbitration_feature(ctrlr);
748 }
749 
750 bool
751 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
752 {
753 	return ctrlr->is_failed;
754 }
755 
756 void
757 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
758 {
759 	/*
760 	 * Set the flag here and leave the work failure of qpairs to
761 	 * spdk_nvme_qpair_process_completions().
762 	 */
763 	if (hot_remove) {
764 		ctrlr->is_removed = true;
765 	}
766 	ctrlr->is_failed = true;
767 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
768 	SPDK_ERRLOG("ctrlr %s in failed state.\n", ctrlr->trid.traddr);
769 }
770 
771 /**
772  * This public API function will try to take the controller lock.
773  * Any private functions being called from a thread already holding
774  * the ctrlr lock should call nvme_ctrlr_fail directly.
775  */
776 void
777 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
778 {
779 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
780 	nvme_ctrlr_fail(ctrlr, false);
781 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
782 }
783 
784 static void
785 nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr)
786 {
787 	union spdk_nvme_cc_register	cc;
788 	union spdk_nvme_csts_register	csts;
789 	uint32_t			ms_waited = 0;
790 	uint32_t			shutdown_timeout_ms;
791 
792 	if (ctrlr->is_removed) {
793 		return;
794 	}
795 
796 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
797 		SPDK_ERRLOG("get_cc() failed\n");
798 		return;
799 	}
800 
801 	cc.bits.shn = SPDK_NVME_SHN_NORMAL;
802 
803 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
804 		SPDK_ERRLOG("set_cc() failed\n");
805 		return;
806 	}
807 
808 	/*
809 	 * The NVMe specification defines RTD3E to be the time between
810 	 *  setting SHN = 1 until the controller will set SHST = 10b.
811 	 * If the device doesn't report RTD3 entry latency, or if it
812 	 *  reports RTD3 entry latency less than 10 seconds, pick
813 	 *  10 seconds as a reasonable amount of time to
814 	 *  wait before proceeding.
815 	 */
816 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
817 	shutdown_timeout_ms = (ctrlr->cdata.rtd3e + 999) / 1000;
818 	shutdown_timeout_ms = spdk_max(shutdown_timeout_ms, 10000);
819 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown timeout = %" PRIu32 " ms\n", shutdown_timeout_ms);
820 
821 	do {
822 		if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
823 			SPDK_ERRLOG("get_csts() failed\n");
824 			return;
825 		}
826 
827 		if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
828 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown complete in %u milliseconds\n",
829 				      ms_waited);
830 			return;
831 		}
832 
833 		nvme_delay(1000);
834 		ms_waited++;
835 	} while (ms_waited < shutdown_timeout_ms);
836 
837 	SPDK_ERRLOG("did not shutdown within %u milliseconds\n", shutdown_timeout_ms);
838 	if (ctrlr->quirks & NVME_QUIRK_SHST_COMPLETE) {
839 		SPDK_ERRLOG("likely due to shutdown handling in the VMWare emulated NVMe SSD\n");
840 	}
841 }
842 
843 static int
844 nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
845 {
846 	union spdk_nvme_cc_register	cc;
847 	int				rc;
848 
849 	rc = nvme_transport_ctrlr_enable(ctrlr);
850 	if (rc != 0) {
851 		SPDK_ERRLOG("transport ctrlr_enable failed\n");
852 		return rc;
853 	}
854 
855 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
856 		SPDK_ERRLOG("get_cc() failed\n");
857 		return -EIO;
858 	}
859 
860 	if (cc.bits.en != 0) {
861 		SPDK_ERRLOG("called with CC.EN = 1\n");
862 		return -EINVAL;
863 	}
864 
865 	cc.bits.en = 1;
866 	cc.bits.css = 0;
867 	cc.bits.shn = 0;
868 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
869 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
870 
871 	/* Page size is 2 ^ (12 + mps). */
872 	cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
873 
874 	if (ctrlr->cap.bits.css == 0) {
875 		SPDK_INFOLOG(SPDK_LOG_NVME,
876 			     "Drive reports no command sets supported. Assuming NVM is supported.\n");
877 		ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
878 	}
879 
880 	if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
881 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Requested I/O command set %u but supported mask is 0x%x\n",
882 			      ctrlr->opts.command_set, ctrlr->cap.bits.css);
883 		return -EINVAL;
884 	}
885 
886 	cc.bits.css = ctrlr->opts.command_set;
887 
888 	switch (ctrlr->opts.arb_mechanism) {
889 	case SPDK_NVME_CC_AMS_RR:
890 		break;
891 	case SPDK_NVME_CC_AMS_WRR:
892 		if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
893 			break;
894 		}
895 		return -EINVAL;
896 	case SPDK_NVME_CC_AMS_VS:
897 		if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
898 			break;
899 		}
900 		return -EINVAL;
901 	default:
902 		return -EINVAL;
903 	}
904 
905 	cc.bits.ams = ctrlr->opts.arb_mechanism;
906 
907 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
908 		SPDK_ERRLOG("set_cc() failed\n");
909 		return -EIO;
910 	}
911 
912 	return 0;
913 }
914 
915 static int
916 nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr)
917 {
918 	union spdk_nvme_cc_register	cc;
919 
920 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
921 		SPDK_ERRLOG("get_cc() failed\n");
922 		return -EIO;
923 	}
924 
925 	if (cc.bits.en == 0) {
926 		return 0;
927 	}
928 
929 	cc.bits.en = 0;
930 
931 	if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
932 		SPDK_ERRLOG("set_cc() failed\n");
933 		return -EIO;
934 	}
935 
936 	return 0;
937 }
938 
939 #ifdef DEBUG
940 static const char *
941 nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
942 {
943 	switch (state) {
944 	case NVME_CTRLR_STATE_INIT_DELAY:
945 		return "delay init";
946 	case NVME_CTRLR_STATE_INIT:
947 		return "init";
948 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
949 		return "disable and wait for CSTS.RDY = 1";
950 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
951 		return "disable and wait for CSTS.RDY = 0";
952 	case NVME_CTRLR_STATE_ENABLE:
953 		return "enable controller by writing CC.EN = 1";
954 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
955 		return "wait for CSTS.RDY = 1";
956 	case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
957 		return "reset admin queue";
958 	case NVME_CTRLR_STATE_IDENTIFY:
959 		return "identify controller";
960 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
961 		return "wait for identify controller";
962 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
963 		return "set number of queues";
964 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
965 		return "wait for set number of queues";
966 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
967 		return "construct namespaces";
968 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
969 		return "identify active ns";
970 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
971 		return "wait for identify active ns";
972 	case NVME_CTRLR_STATE_IDENTIFY_NS:
973 		return "identify ns";
974 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
975 		return "wait for identify ns";
976 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
977 		return "identify namespace id descriptors";
978 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
979 		return "wait for identify namespace id descriptors";
980 	case NVME_CTRLR_STATE_CONFIGURE_AER:
981 		return "configure AER";
982 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
983 		return "wait for configure aer";
984 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
985 		return "set supported log pages";
986 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
987 		return "set supported features";
988 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
989 		return "set doorbell buffer config";
990 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
991 		return "wait for doorbell buffer config";
992 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
993 		return "set keep alive timeout";
994 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
995 		return "wait for set keep alive timeout";
996 	case NVME_CTRLR_STATE_SET_HOST_ID:
997 		return "set host ID";
998 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
999 		return "wait for set host ID";
1000 	case NVME_CTRLR_STATE_READY:
1001 		return "ready";
1002 	case NVME_CTRLR_STATE_ERROR:
1003 		return "error";
1004 	}
1005 	return "unknown";
1006 };
1007 #endif /* DEBUG */
1008 
1009 static void
1010 nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
1011 		     uint64_t timeout_in_ms)
1012 {
1013 	uint64_t ticks_per_ms, timeout_in_ticks, now_ticks;
1014 
1015 	ctrlr->state = state;
1016 	if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
1017 		goto inf;
1018 	}
1019 
1020 	ticks_per_ms = spdk_get_ticks_hz() / 1000;
1021 	if (timeout_in_ms > UINT64_MAX / ticks_per_ms) {
1022 		SPDK_ERRLOG("Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
1023 		goto inf;
1024 	}
1025 
1026 	now_ticks = spdk_get_ticks();
1027 	timeout_in_ticks = timeout_in_ms * ticks_per_ms;
1028 	if (timeout_in_ticks > UINT64_MAX - now_ticks) {
1029 		SPDK_ERRLOG("Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
1030 		goto inf;
1031 	}
1032 
1033 	ctrlr->state_timeout_tsc = timeout_in_ticks + now_ticks;
1034 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (timeout %" PRIu64 " ms)\n",
1035 		      nvme_ctrlr_state_string(ctrlr->state), ctrlr->state_timeout_tsc);
1036 	return;
1037 inf:
1038 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (no timeout)\n",
1039 		      nvme_ctrlr_state_string(ctrlr->state));
1040 	ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
1041 }
1042 
1043 static void
1044 nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
1045 {
1046 	if (ctrlr->shadow_doorbell) {
1047 		spdk_free(ctrlr->shadow_doorbell);
1048 		ctrlr->shadow_doorbell = NULL;
1049 	}
1050 
1051 	if (ctrlr->eventidx) {
1052 		spdk_free(ctrlr->eventidx);
1053 		ctrlr->eventidx = NULL;
1054 	}
1055 }
1056 
1057 static void
1058 nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
1059 {
1060 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1061 
1062 	if (spdk_nvme_cpl_is_error(cpl)) {
1063 		SPDK_WARNLOG("Doorbell buffer config failed\n");
1064 	} else {
1065 		SPDK_INFOLOG(SPDK_LOG_NVME, "NVMe controller: %s doorbell buffer config enabled\n",
1066 			     ctrlr->trid.traddr);
1067 	}
1068 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
1069 			     ctrlr->opts.admin_timeout_ms);
1070 }
1071 
1072 static int
1073 nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
1074 {
1075 	int rc = 0;
1076 	uint64_t prp1, prp2, len;
1077 
1078 	if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
1079 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
1080 				     ctrlr->opts.admin_timeout_ms);
1081 		return 0;
1082 	}
1083 
1084 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
1085 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
1086 				     ctrlr->opts.admin_timeout_ms);
1087 		return 0;
1088 	}
1089 
1090 	/* only 1 page size for doorbell buffer */
1091 	ctrlr->shadow_doorbell = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
1092 					      NULL, SPDK_ENV_LCORE_ID_ANY,
1093 					      SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
1094 	if (ctrlr->shadow_doorbell == NULL) {
1095 		rc = -ENOMEM;
1096 		goto error;
1097 	}
1098 
1099 	len = ctrlr->page_size;
1100 	prp1 = spdk_vtophys(ctrlr->shadow_doorbell, &len);
1101 	if (prp1 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
1102 		rc = -EFAULT;
1103 		goto error;
1104 	}
1105 
1106 	ctrlr->eventidx = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
1107 				       NULL, SPDK_ENV_LCORE_ID_ANY,
1108 				       SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
1109 	if (ctrlr->eventidx == NULL) {
1110 		rc = -ENOMEM;
1111 		goto error;
1112 	}
1113 
1114 	len = ctrlr->page_size;
1115 	prp2 = spdk_vtophys(ctrlr->eventidx, &len);
1116 	if (prp2 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
1117 		rc = -EFAULT;
1118 		goto error;
1119 	}
1120 
1121 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
1122 			     ctrlr->opts.admin_timeout_ms);
1123 
1124 	rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
1125 			nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
1126 	if (rc != 0) {
1127 		goto error;
1128 	}
1129 
1130 	return 0;
1131 
1132 error:
1133 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1134 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
1135 	return rc;
1136 }
1137 
1138 int
1139 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
1140 {
1141 	int rc = 0;
1142 	struct spdk_nvme_qpair	*qpair;
1143 	struct nvme_request	*req, *tmp;
1144 
1145 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1146 
1147 	if (ctrlr->is_resetting || ctrlr->is_removed) {
1148 		/*
1149 		 * Controller is already resetting or has been removed. Return
1150 		 *  immediately since there is no need to kick off another
1151 		 *  reset in these cases.
1152 		 */
1153 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1154 		return ctrlr->is_resetting ? 0 : -ENXIO;
1155 	}
1156 
1157 	ctrlr->is_resetting = true;
1158 	ctrlr->is_failed = false;
1159 
1160 	SPDK_NOTICELOG("resetting controller\n");
1161 
1162 	/* Free all of the queued abort requests */
1163 	STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
1164 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
1165 		nvme_free_request(req);
1166 		ctrlr->outstanding_aborts--;
1167 	}
1168 
1169 	nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
1170 
1171 	/* Disable all queues before disabling the controller hardware. */
1172 	TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
1173 		qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
1174 	}
1175 
1176 	ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
1177 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
1178 	if (nvme_transport_ctrlr_connect_qpair(ctrlr, ctrlr->adminq) != 0) {
1179 		SPDK_ERRLOG("Controller reinitialization failed.\n");
1180 		rc = -1;
1181 		goto out;
1182 	}
1183 
1184 	/* Doorbell buffer config is invalid during reset */
1185 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
1186 
1187 	/* Set the state back to INIT to cause a full hardware reset. */
1188 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
1189 
1190 	nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_ENABLED);
1191 	while (ctrlr->state != NVME_CTRLR_STATE_READY) {
1192 		if (nvme_ctrlr_process_init(ctrlr) != 0) {
1193 			SPDK_ERRLOG("controller reinitialization failed\n");
1194 			rc = -1;
1195 			break;
1196 		}
1197 	}
1198 
1199 	/*
1200 	 * For PCIe controllers, the memory locations of the tranpsort qpair
1201 	 * don't change when the controller is reset. They simply need to be
1202 	 * re-enabled with admin commands to the controller. For fabric
1203 	 * controllers we need to disconnect and reconnect the qpair on its
1204 	 * own thread outside of the context of the reset.
1205 	 */
1206 	if (rc == 0 && ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
1207 		/* Reinitialize qpairs */
1208 		TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
1209 			if (nvme_transport_ctrlr_connect_qpair(ctrlr, qpair) != 0) {
1210 				qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
1211 				rc = -1;
1212 				continue;
1213 			}
1214 		}
1215 	}
1216 
1217 out:
1218 	if (rc) {
1219 		nvme_ctrlr_fail(ctrlr, false);
1220 	}
1221 	ctrlr->is_resetting = false;
1222 
1223 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1224 
1225 	return rc;
1226 }
1227 
1228 int
1229 spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transport_id *trid)
1230 {
1231 	int rc = 0;
1232 
1233 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1234 
1235 	if (ctrlr->is_failed == false) {
1236 		rc = -EPERM;
1237 		goto out;
1238 	}
1239 
1240 	if (trid->trtype != ctrlr->trid.trtype) {
1241 		rc = -EINVAL;
1242 		goto out;
1243 	}
1244 
1245 	if (strncmp(trid->subnqn, ctrlr->trid.subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
1246 		rc = -EINVAL;
1247 		goto out;
1248 	}
1249 
1250 	ctrlr->trid = *trid;
1251 
1252 out:
1253 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1254 	return rc;
1255 }
1256 
1257 static void
1258 nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
1259 {
1260 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1261 
1262 	if (spdk_nvme_cpl_is_error(cpl)) {
1263 		SPDK_ERRLOG("nvme_identify_controller failed!\n");
1264 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1265 		return;
1266 	}
1267 
1268 	/*
1269 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
1270 	 *  controller supports.
1271 	 */
1272 	ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
1273 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
1274 	if (ctrlr->cdata.mdts > 0) {
1275 		ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
1276 						ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
1277 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
1278 	}
1279 
1280 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
1281 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
1282 		ctrlr->cntlid = ctrlr->cdata.cntlid;
1283 	} else {
1284 		/*
1285 		 * Fabrics controllers should already have CNTLID from the Connect command.
1286 		 *
1287 		 * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
1288 		 * trust the one from Connect.
1289 		 */
1290 		if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
1291 			SPDK_DEBUGLOG(SPDK_LOG_NVME,
1292 				      "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
1293 				      ctrlr->cdata.cntlid, ctrlr->cntlid);
1294 		}
1295 	}
1296 
1297 	if (ctrlr->cdata.sgls.supported) {
1298 		assert(ctrlr->cdata.sgls.supported != 0x3);
1299 		ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
1300 		if (ctrlr->cdata.sgls.supported == 0x2) {
1301 			ctrlr->flags |= SPDK_NVME_CTRLR_SGL_REQUIRES_DWORD_ALIGNMENT;
1302 		}
1303 		/*
1304 		 * Use MSDBD to ensure our max_sges doesn't exceed what the
1305 		 *  controller supports.
1306 		 */
1307 		ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
1308 		if (ctrlr->cdata.nvmf_specific.msdbd != 0) {
1309 			ctrlr->max_sges = spdk_min(ctrlr->cdata.nvmf_specific.msdbd, ctrlr->max_sges);
1310 		} else {
1311 			/* A value 0 indicates no limit. */
1312 		}
1313 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "transport max_sges %u\n", ctrlr->max_sges);
1314 	}
1315 
1316 	if (ctrlr->cdata.oacs.security) {
1317 		ctrlr->flags |= SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED;
1318 	}
1319 
1320 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "fuses compare and write: %d\n", ctrlr->cdata.fuses.compare_and_write);
1321 	if (ctrlr->cdata.fuses.compare_and_write) {
1322 		ctrlr->flags |= SPDK_NVME_CTRLR_COMPARE_AND_WRITE_SUPPORTED;
1323 	}
1324 
1325 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
1326 			     ctrlr->opts.admin_timeout_ms);
1327 }
1328 
1329 static int
1330 nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
1331 {
1332 	int	rc;
1333 
1334 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
1335 			     ctrlr->opts.admin_timeout_ms);
1336 
1337 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
1338 				     &ctrlr->cdata, sizeof(ctrlr->cdata),
1339 				     nvme_ctrlr_identify_done, ctrlr);
1340 	if (rc != 0) {
1341 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1342 		return rc;
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 enum nvme_active_ns_state {
1349 	NVME_ACTIVE_NS_STATE_IDLE,
1350 	NVME_ACTIVE_NS_STATE_PROCESSING,
1351 	NVME_ACTIVE_NS_STATE_DONE,
1352 	NVME_ACTIVE_NS_STATE_ERROR
1353 };
1354 
1355 typedef void (*nvme_active_ns_ctx_deleter)(struct nvme_active_ns_ctx *);
1356 
1357 struct nvme_active_ns_ctx {
1358 	struct spdk_nvme_ctrlr *ctrlr;
1359 	uint32_t page;
1360 	uint32_t num_pages;
1361 	uint32_t next_nsid;
1362 	uint32_t *new_ns_list;
1363 	nvme_active_ns_ctx_deleter deleter;
1364 
1365 	enum nvme_active_ns_state state;
1366 };
1367 
1368 static struct nvme_active_ns_ctx *
1369 nvme_active_ns_ctx_create(struct spdk_nvme_ctrlr *ctrlr, nvme_active_ns_ctx_deleter deleter)
1370 {
1371 	struct nvme_active_ns_ctx *ctx;
1372 	uint32_t num_pages = 0;
1373 	uint32_t *new_ns_list = NULL;
1374 
1375 	ctx = calloc(1, sizeof(*ctx));
1376 	if (!ctx) {
1377 		SPDK_ERRLOG("Failed to allocate nvme_active_ns_ctx!\n");
1378 		return NULL;
1379 	}
1380 
1381 	if (ctrlr->num_ns) {
1382 		/* The allocated size must be a multiple of sizeof(struct spdk_nvme_ns_list) */
1383 		num_pages = (ctrlr->num_ns * sizeof(new_ns_list[0]) - 1) / sizeof(struct spdk_nvme_ns_list) + 1;
1384 		new_ns_list = spdk_zmalloc(num_pages * sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
1385 					   NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
1386 		if (!new_ns_list) {
1387 			SPDK_ERRLOG("Failed to allocate active_ns_list!\n");
1388 			free(ctx);
1389 			return NULL;
1390 		}
1391 	}
1392 
1393 	ctx->num_pages = num_pages;
1394 	ctx->new_ns_list = new_ns_list;
1395 	ctx->ctrlr = ctrlr;
1396 	ctx->deleter = deleter;
1397 
1398 	return ctx;
1399 }
1400 
1401 static void
1402 nvme_active_ns_ctx_destroy(struct nvme_active_ns_ctx *ctx)
1403 {
1404 	spdk_free(ctx->new_ns_list);
1405 	free(ctx);
1406 }
1407 
1408 static void
1409 nvme_ctrlr_identify_active_ns_swap(struct spdk_nvme_ctrlr *ctrlr, uint32_t **new_ns_list)
1410 {
1411 	spdk_free(ctrlr->active_ns_list);
1412 	ctrlr->active_ns_list = *new_ns_list;
1413 	*new_ns_list = NULL;
1414 }
1415 
1416 static void
1417 nvme_ctrlr_identify_active_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1418 {
1419 	struct nvme_active_ns_ctx *ctx = arg;
1420 
1421 	if (spdk_nvme_cpl_is_error(cpl)) {
1422 		ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
1423 		goto out;
1424 	}
1425 
1426 	ctx->next_nsid = ctx->new_ns_list[1024 * ctx->page + 1023];
1427 	if (ctx->next_nsid == 0 || ++ctx->page == ctx->num_pages) {
1428 		ctx->state = NVME_ACTIVE_NS_STATE_DONE;
1429 		goto out;
1430 	}
1431 
1432 	nvme_ctrlr_identify_active_ns_async(ctx);
1433 	return;
1434 
1435 out:
1436 	if (ctx->deleter) {
1437 		ctx->deleter(ctx);
1438 	}
1439 }
1440 
1441 static void
1442 nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx)
1443 {
1444 	struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
1445 	uint32_t i;
1446 	int rc;
1447 
1448 	if (ctrlr->num_ns == 0) {
1449 		ctx->state = NVME_ACTIVE_NS_STATE_DONE;
1450 		goto out;
1451 	}
1452 
1453 	/*
1454 	 * If controller doesn't support active ns list CNS 0x02 dummy up
1455 	 * an active ns list, i.e. all namespaces report as active
1456 	 */
1457 	if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 1, 0) || ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS) {
1458 		for (i = 0; i < ctrlr->num_ns; i++) {
1459 			ctx->new_ns_list[i] = i + 1;
1460 		}
1461 
1462 		ctx->state = NVME_ACTIVE_NS_STATE_DONE;
1463 		goto out;
1464 	}
1465 
1466 	ctx->state = NVME_ACTIVE_NS_STATE_PROCESSING;
1467 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, ctx->next_nsid,
1468 				     &ctx->new_ns_list[1024 * ctx->page], sizeof(struct spdk_nvme_ns_list),
1469 				     nvme_ctrlr_identify_active_ns_async_done, ctx);
1470 	if (rc != 0) {
1471 		ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
1472 		goto out;
1473 	}
1474 
1475 	return;
1476 
1477 out:
1478 	if (ctx->deleter) {
1479 		ctx->deleter(ctx);
1480 	}
1481 }
1482 
1483 static void
1484 _nvme_active_ns_ctx_deleter(struct nvme_active_ns_ctx *ctx)
1485 {
1486 	struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
1487 
1488 	if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
1489 		nvme_ctrlr_destruct_namespaces(ctrlr);
1490 		nvme_active_ns_ctx_destroy(ctx);
1491 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1492 		return;
1493 	}
1494 
1495 	assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
1496 	nvme_ctrlr_identify_active_ns_swap(ctrlr, &ctx->new_ns_list);
1497 	nvme_active_ns_ctx_destroy(ctx);
1498 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS, ctrlr->opts.admin_timeout_ms);
1499 }
1500 
1501 static void
1502 _nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
1503 {
1504 	struct nvme_active_ns_ctx *ctx;
1505 
1506 	ctx = nvme_active_ns_ctx_create(ctrlr, _nvme_active_ns_ctx_deleter);
1507 	if (!ctx) {
1508 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1509 		return;
1510 	}
1511 
1512 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
1513 			     ctrlr->opts.admin_timeout_ms);
1514 	nvme_ctrlr_identify_active_ns_async(ctx);
1515 }
1516 
1517 int
1518 nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
1519 {
1520 	struct nvme_active_ns_ctx *ctx;
1521 	int rc;
1522 
1523 	ctx = nvme_active_ns_ctx_create(ctrlr, NULL);
1524 	if (!ctx) {
1525 		return -ENOMEM;
1526 	}
1527 
1528 	nvme_ctrlr_identify_active_ns_async(ctx);
1529 	while (ctx->state == NVME_ACTIVE_NS_STATE_PROCESSING) {
1530 		rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
1531 		if (rc < 0) {
1532 			ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
1533 			break;
1534 		}
1535 	}
1536 
1537 	if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
1538 		nvme_active_ns_ctx_destroy(ctx);
1539 		return -ENXIO;
1540 	}
1541 
1542 	assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
1543 	nvme_ctrlr_identify_active_ns_swap(ctrlr, &ctx->new_ns_list);
1544 	nvme_active_ns_ctx_destroy(ctx);
1545 
1546 	return 0;
1547 }
1548 
1549 static void
1550 nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1551 {
1552 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1553 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1554 	uint32_t nsid;
1555 	int rc;
1556 
1557 	if (spdk_nvme_cpl_is_error(cpl)) {
1558 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1559 		return;
1560 	} else {
1561 		nvme_ns_set_identify_data(ns);
1562 	}
1563 
1564 	/* move on to the next active NS */
1565 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1566 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1567 	if (ns == NULL) {
1568 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
1569 				     ctrlr->opts.admin_timeout_ms);
1570 		return;
1571 	}
1572 	ns->ctrlr = ctrlr;
1573 	ns->id = nsid;
1574 
1575 	rc = nvme_ctrlr_identify_ns_async(ns);
1576 	if (rc) {
1577 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1578 	}
1579 }
1580 
1581 static int
1582 nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
1583 {
1584 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1585 	struct spdk_nvme_ns_data *nsdata;
1586 
1587 	nsdata = &ctrlr->nsdata[ns->id - 1];
1588 
1589 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
1590 			     ctrlr->opts.admin_timeout_ms);
1591 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
1592 				       nsdata, sizeof(*nsdata),
1593 				       nvme_ctrlr_identify_ns_async_done, ns);
1594 }
1595 
1596 static int
1597 nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1598 {
1599 	uint32_t nsid;
1600 	struct spdk_nvme_ns *ns;
1601 	int rc;
1602 
1603 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1604 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1605 	if (ns == NULL) {
1606 		/* No active NS, move on to the next state */
1607 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1608 				     ctrlr->opts.admin_timeout_ms);
1609 		return 0;
1610 	}
1611 
1612 	ns->ctrlr = ctrlr;
1613 	ns->id = nsid;
1614 
1615 	rc = nvme_ctrlr_identify_ns_async(ns);
1616 	if (rc) {
1617 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1618 	}
1619 
1620 	return rc;
1621 }
1622 
1623 static void
1624 nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
1625 {
1626 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
1627 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1628 	uint32_t nsid;
1629 	int rc;
1630 
1631 	if (spdk_nvme_cpl_is_error(cpl)) {
1632 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1633 				     ctrlr->opts.admin_timeout_ms);
1634 		return;
1635 	}
1636 
1637 	/* move on to the next active NS */
1638 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
1639 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1640 	if (ns == NULL) {
1641 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1642 				     ctrlr->opts.admin_timeout_ms);
1643 		return;
1644 	}
1645 
1646 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1647 	if (rc) {
1648 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1649 	}
1650 }
1651 
1652 static int
1653 nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
1654 {
1655 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
1656 
1657 	memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
1658 
1659 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
1660 			     ctrlr->opts.admin_timeout_ms);
1661 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
1662 				       0, ns->id, ns->id_desc_list, sizeof(ns->id_desc_list),
1663 				       nvme_ctrlr_identify_id_desc_async_done, ns);
1664 }
1665 
1666 static int
1667 nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1668 {
1669 	uint32_t nsid;
1670 	struct spdk_nvme_ns *ns;
1671 	int rc;
1672 
1673 	if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) ||
1674 	    (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1675 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
1676 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1677 				     ctrlr->opts.admin_timeout_ms);
1678 		return 0;
1679 	}
1680 
1681 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
1682 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
1683 	if (ns == NULL) {
1684 		/* No active NS, move on to the next state */
1685 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
1686 				     ctrlr->opts.admin_timeout_ms);
1687 		return 0;
1688 	}
1689 
1690 	rc = nvme_ctrlr_identify_id_desc_async(ns);
1691 	if (rc) {
1692 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1693 	}
1694 
1695 	return rc;
1696 }
1697 
1698 static void
1699 nvme_ctrlr_update_nvmf_ioccsz(struct spdk_nvme_ctrlr *ctrlr)
1700 {
1701 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_RDMA ||
1702 	    ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP ||
1703 	    ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_FC) {
1704 		if (ctrlr->cdata.nvmf_specific.ioccsz < 4) {
1705 			SPDK_ERRLOG("Incorrect IOCCSZ %u, the minimum value should be 4\n",
1706 				    ctrlr->cdata.nvmf_specific.ioccsz);
1707 			ctrlr->cdata.nvmf_specific.ioccsz = 4;
1708 			assert(0);
1709 		}
1710 		ctrlr->ioccsz_bytes = ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd);
1711 		ctrlr->icdoff = ctrlr->cdata.nvmf_specific.icdoff;
1712 	}
1713 }
1714 
1715 static void
1716 nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
1717 {
1718 	uint32_t cq_allocated, sq_allocated, min_allocated, i;
1719 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1720 
1721 	if (spdk_nvme_cpl_is_error(cpl)) {
1722 		SPDK_ERRLOG("Set Features - Number of Queues failed!\n");
1723 		ctrlr->opts.num_io_queues = 0;
1724 	} else {
1725 		/*
1726 		 * Data in cdw0 is 0-based.
1727 		 * Lower 16-bits indicate number of submission queues allocated.
1728 		 * Upper 16-bits indicate number of completion queues allocated.
1729 		 */
1730 		sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
1731 		cq_allocated = (cpl->cdw0 >> 16) + 1;
1732 
1733 		/*
1734 		 * For 1:1 queue mapping, set number of allocated queues to be minimum of
1735 		 * submission and completion queues.
1736 		 */
1737 		min_allocated = spdk_min(sq_allocated, cq_allocated);
1738 
1739 		/* Set number of queues to be minimum of requested and actually allocated. */
1740 		ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
1741 	}
1742 
1743 	ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
1744 	if (ctrlr->free_io_qids == NULL) {
1745 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1746 		return;
1747 	}
1748 
1749 	/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */
1750 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1751 	for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
1752 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1753 	}
1754 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS,
1755 			     ctrlr->opts.admin_timeout_ms);
1756 }
1757 
1758 static int
1759 nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
1760 {
1761 	int rc;
1762 
1763 	if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
1764 		SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n",
1765 			       ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
1766 		ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
1767 	} else if (ctrlr->opts.num_io_queues < 1) {
1768 		SPDK_NOTICELOG("Requested num_io_queues 0, increasing to 1\n");
1769 		ctrlr->opts.num_io_queues = 1;
1770 	}
1771 
1772 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
1773 			     ctrlr->opts.admin_timeout_ms);
1774 
1775 	rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
1776 					   nvme_ctrlr_set_num_queues_done, ctrlr);
1777 	if (rc != 0) {
1778 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1779 		return rc;
1780 	}
1781 
1782 	return 0;
1783 }
1784 
1785 static void
1786 nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
1787 {
1788 	uint32_t keep_alive_interval_ms;
1789 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1790 
1791 	if (spdk_nvme_cpl_is_error(cpl)) {
1792 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n",
1793 			    cpl->status.sc, cpl->status.sct);
1794 		ctrlr->opts.keep_alive_timeout_ms = 0;
1795 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1796 		return;
1797 	}
1798 
1799 	if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
1800 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller adjusted keep alive timeout to %u ms\n",
1801 			      cpl->cdw0);
1802 	}
1803 
1804 	ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
1805 
1806 	keep_alive_interval_ms = ctrlr->opts.keep_alive_timeout_ms / 2;
1807 	if (keep_alive_interval_ms == 0) {
1808 		keep_alive_interval_ms = 1;
1809 	}
1810 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Sending keep alive every %u ms\n", keep_alive_interval_ms);
1811 
1812 	ctrlr->keep_alive_interval_ticks = (keep_alive_interval_ms * spdk_get_ticks_hz()) / UINT64_C(1000);
1813 
1814 	/* Schedule the first Keep Alive to be sent as soon as possible. */
1815 	ctrlr->next_keep_alive_tick = spdk_get_ticks();
1816 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1817 			     ctrlr->opts.admin_timeout_ms);
1818 }
1819 
1820 static int
1821 nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
1822 {
1823 	int rc;
1824 
1825 	if (ctrlr->opts.keep_alive_timeout_ms == 0) {
1826 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1827 				     ctrlr->opts.admin_timeout_ms);
1828 		return 0;
1829 	}
1830 
1831 	if (ctrlr->cdata.kas == 0) {
1832 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller KAS is 0 - not enabling Keep Alive\n");
1833 		ctrlr->opts.keep_alive_timeout_ms = 0;
1834 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
1835 				     ctrlr->opts.admin_timeout_ms);
1836 		return 0;
1837 	}
1838 
1839 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
1840 			     ctrlr->opts.admin_timeout_ms);
1841 
1842 	/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
1843 	rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
1844 					     nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
1845 	if (rc != 0) {
1846 		SPDK_ERRLOG("Keep alive timeout Get Feature failed: %d\n", rc);
1847 		ctrlr->opts.keep_alive_timeout_ms = 0;
1848 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1849 		return rc;
1850 	}
1851 
1852 	return 0;
1853 }
1854 
1855 static void
1856 nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
1857 {
1858 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
1859 
1860 	if (spdk_nvme_cpl_is_error(cpl)) {
1861 		/*
1862 		 * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
1863 		 * is optional.
1864 		 */
1865 		SPDK_WARNLOG("Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
1866 			     cpl->status.sc, cpl->status.sct);
1867 	} else {
1868 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Set Features - Host ID was successful\n");
1869 	}
1870 
1871 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1872 }
1873 
1874 static int
1875 nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
1876 {
1877 	uint8_t *host_id;
1878 	uint32_t host_id_size;
1879 	int rc;
1880 
1881 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
1882 		/*
1883 		 * NVMe-oF sends the host ID during Connect and doesn't allow
1884 		 * Set Features - Host Identifier after Connect, so we don't need to do anything here.
1885 		 */
1886 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "NVMe-oF transport - not sending Set Features - Host ID\n");
1887 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1888 		return 0;
1889 	}
1890 
1891 	if (ctrlr->cdata.ctratt.host_id_exhid_supported) {
1892 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 128-bit extended host identifier\n");
1893 		host_id = ctrlr->opts.extended_host_id;
1894 		host_id_size = sizeof(ctrlr->opts.extended_host_id);
1895 	} else {
1896 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 64-bit host identifier\n");
1897 		host_id = ctrlr->opts.host_id;
1898 		host_id_size = sizeof(ctrlr->opts.host_id);
1899 	}
1900 
1901 	/* If the user specified an all-zeroes host identifier, don't send the command. */
1902 	if (spdk_mem_all_zero(host_id, host_id_size)) {
1903 		SPDK_DEBUGLOG(SPDK_LOG_NVME,
1904 			      "User did not specify host ID - not sending Set Features - Host ID\n");
1905 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
1906 		return 0;
1907 	}
1908 
1909 	SPDK_LOGDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size);
1910 
1911 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
1912 			     ctrlr->opts.admin_timeout_ms);
1913 
1914 	rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
1915 	if (rc != 0) {
1916 		SPDK_ERRLOG("Set Features - Host ID failed: %d\n", rc);
1917 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1918 		return rc;
1919 	}
1920 
1921 	return 0;
1922 }
1923 
1924 static void
1925 nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1926 {
1927 	if (ctrlr->ns) {
1928 		uint32_t i, num_ns = ctrlr->num_ns;
1929 
1930 		for (i = 0; i < num_ns; i++) {
1931 			nvme_ns_destruct(&ctrlr->ns[i]);
1932 		}
1933 
1934 		spdk_free(ctrlr->ns);
1935 		ctrlr->ns = NULL;
1936 		ctrlr->num_ns = 0;
1937 	}
1938 
1939 	if (ctrlr->nsdata) {
1940 		spdk_free(ctrlr->nsdata);
1941 		ctrlr->nsdata = NULL;
1942 	}
1943 
1944 	spdk_free(ctrlr->active_ns_list);
1945 	ctrlr->active_ns_list = NULL;
1946 }
1947 
1948 static void
1949 nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1950 {
1951 	uint32_t i, nn = ctrlr->cdata.nn;
1952 	struct spdk_nvme_ns_data *nsdata;
1953 	bool ns_is_active;
1954 
1955 	for (i = 0; i < nn; i++) {
1956 		struct spdk_nvme_ns	*ns = &ctrlr->ns[i];
1957 		uint32_t		nsid = i + 1;
1958 
1959 		nsdata = &ctrlr->nsdata[nsid - 1];
1960 		ns_is_active = spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid);
1961 
1962 		if (nsdata->ncap && ns_is_active) {
1963 			if (nvme_ns_update(ns) != 0) {
1964 				SPDK_ERRLOG("Failed to update active NS %u\n", nsid);
1965 				continue;
1966 			}
1967 		}
1968 
1969 		if ((nsdata->ncap == 0) && ns_is_active) {
1970 			if (nvme_ns_construct(ns, nsid, ctrlr) != 0) {
1971 				continue;
1972 			}
1973 		}
1974 
1975 		if (nsdata->ncap && !ns_is_active) {
1976 			nvme_ns_destruct(ns);
1977 		}
1978 	}
1979 }
1980 
1981 static int
1982 nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1983 {
1984 	int rc = 0;
1985 	uint32_t nn = ctrlr->cdata.nn;
1986 
1987 	/* ctrlr->num_ns may be 0 (startup) or a different number of namespaces (reset),
1988 	 * so check if we need to reallocate.
1989 	 */
1990 	if (nn != ctrlr->num_ns) {
1991 		nvme_ctrlr_destruct_namespaces(ctrlr);
1992 
1993 		if (nn == 0) {
1994 			SPDK_WARNLOG("controller has 0 namespaces\n");
1995 			return 0;
1996 		}
1997 
1998 		ctrlr->ns = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64, NULL,
1999 					 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
2000 		if (ctrlr->ns == NULL) {
2001 			rc = -ENOMEM;
2002 			goto fail;
2003 		}
2004 
2005 		ctrlr->nsdata = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64,
2006 					     NULL, SPDK_ENV_SOCKET_ID_ANY,
2007 					     SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
2008 		if (ctrlr->nsdata == NULL) {
2009 			rc = -ENOMEM;
2010 			goto fail;
2011 		}
2012 
2013 		ctrlr->num_ns = nn;
2014 	}
2015 
2016 	return 0;
2017 
2018 fail:
2019 	nvme_ctrlr_destruct_namespaces(ctrlr);
2020 	return rc;
2021 }
2022 
2023 static void
2024 nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
2025 {
2026 	struct nvme_async_event_request	*aer = arg;
2027 	struct spdk_nvme_ctrlr		*ctrlr = aer->ctrlr;
2028 	struct spdk_nvme_ctrlr_process	*active_proc;
2029 	union spdk_nvme_async_event_completion	event;
2030 	int					rc;
2031 
2032 	if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
2033 	    cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
2034 		/*
2035 		 *  This is simulated when controller is being shut down, to
2036 		 *  effectively abort outstanding asynchronous event requests
2037 		 *  and make sure all memory is freed.  Do not repost the
2038 		 *  request in this case.
2039 		 */
2040 		return;
2041 	}
2042 
2043 	if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
2044 	    cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
2045 		/*
2046 		 *  SPDK will only send as many AERs as the device says it supports,
2047 		 *  so this status code indicates an out-of-spec device.  Do not repost
2048 		 *  the request in this case.
2049 		 */
2050 		SPDK_ERRLOG("Controller appears out-of-spec for asynchronous event request\n"
2051 			    "handling.  Do not repost this AER.\n");
2052 		return;
2053 	}
2054 
2055 	event.raw = cpl->cdw0;
2056 	if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
2057 	    (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
2058 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
2059 		if (rc) {
2060 			return;
2061 		}
2062 		nvme_ctrlr_update_namespaces(ctrlr);
2063 	}
2064 
2065 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2066 	if (active_proc && active_proc->aer_cb_fn) {
2067 		active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
2068 	}
2069 
2070 	/* If the ctrlr was removed or in the destruct state, we should not send aer again */
2071 	if (ctrlr->is_removed || ctrlr->is_destructed) {
2072 		return;
2073 	}
2074 
2075 	/*
2076 	 * Repost another asynchronous event request to replace the one
2077 	 *  that just completed.
2078 	 */
2079 	if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
2080 		/*
2081 		 * We can't do anything to recover from a failure here,
2082 		 * so just print a warning message and leave the AER unsubmitted.
2083 		 */
2084 		SPDK_ERRLOG("resubmitting AER failed!\n");
2085 	}
2086 }
2087 
2088 static int
2089 nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
2090 				    struct nvme_async_event_request *aer)
2091 {
2092 	struct nvme_request *req;
2093 
2094 	aer->ctrlr = ctrlr;
2095 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
2096 	aer->req = req;
2097 	if (req == NULL) {
2098 		return -1;
2099 	}
2100 
2101 	req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2102 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
2103 }
2104 
2105 static void
2106 nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
2107 {
2108 	struct nvme_async_event_request		*aer;
2109 	int					rc;
2110 	uint32_t				i;
2111 	struct spdk_nvme_ctrlr *ctrlr =	(struct spdk_nvme_ctrlr *)arg;
2112 
2113 	if (spdk_nvme_cpl_is_error(cpl)) {
2114 		SPDK_NOTICELOG("nvme_ctrlr_configure_aer failed!\n");
2115 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
2116 				     ctrlr->opts.admin_timeout_ms);
2117 		return;
2118 	}
2119 
2120 	/* aerl is a zero-based value, so we need to add 1 here. */
2121 	ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
2122 
2123 	for (i = 0; i < ctrlr->num_aers; i++) {
2124 		aer = &ctrlr->aer[i];
2125 		rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
2126 		if (rc) {
2127 			SPDK_ERRLOG("nvme_ctrlr_construct_and_submit_aer failed!\n");
2128 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2129 			return;
2130 		}
2131 	}
2132 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
2133 			     ctrlr->opts.admin_timeout_ms);
2134 }
2135 
2136 static int
2137 nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
2138 {
2139 	union spdk_nvme_feat_async_event_configuration	config;
2140 	int						rc;
2141 
2142 	config.raw = 0;
2143 	config.bits.crit_warn.bits.available_spare = 1;
2144 	config.bits.crit_warn.bits.temperature = 1;
2145 	config.bits.crit_warn.bits.device_reliability = 1;
2146 	config.bits.crit_warn.bits.read_only = 1;
2147 	config.bits.crit_warn.bits.volatile_memory_backup = 1;
2148 
2149 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
2150 		if (ctrlr->cdata.oaes.ns_attribute_notices) {
2151 			config.bits.ns_attr_notice = 1;
2152 		}
2153 		if (ctrlr->cdata.oaes.fw_activation_notices) {
2154 			config.bits.fw_activation_notice = 1;
2155 		}
2156 	}
2157 	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
2158 		config.bits.telemetry_log_notice = 1;
2159 	}
2160 
2161 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
2162 			     ctrlr->opts.admin_timeout_ms);
2163 
2164 	rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
2165 			nvme_ctrlr_configure_aer_done,
2166 			ctrlr);
2167 	if (rc != 0) {
2168 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2169 		return rc;
2170 	}
2171 
2172 	return 0;
2173 }
2174 
2175 struct spdk_nvme_ctrlr_process *
2176 spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
2177 {
2178 	struct spdk_nvme_ctrlr_process	*active_proc;
2179 
2180 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
2181 		if (active_proc->pid == pid) {
2182 			return active_proc;
2183 		}
2184 	}
2185 
2186 	return NULL;
2187 }
2188 
2189 struct spdk_nvme_ctrlr_process *
2190 spdk_nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
2191 {
2192 	return spdk_nvme_ctrlr_get_process(ctrlr, getpid());
2193 }
2194 
2195 /**
2196  * This function will be called when a process is using the controller.
2197  *  1. For the primary process, it is called when constructing the controller.
2198  *  2. For the secondary process, it is called at probing the controller.
2199  * Note: will check whether the process is already added for the same process.
2200  */
2201 int
2202 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
2203 {
2204 	struct spdk_nvme_ctrlr_process	*ctrlr_proc;
2205 	pid_t				pid = getpid();
2206 
2207 	/* Check whether the process is already added or not */
2208 	if (spdk_nvme_ctrlr_get_process(ctrlr, pid)) {
2209 		return 0;
2210 	}
2211 
2212 	/* Initialize the per process properties for this ctrlr */
2213 	ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
2214 				  64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
2215 	if (ctrlr_proc == NULL) {
2216 		SPDK_ERRLOG("failed to allocate memory to track the process props\n");
2217 
2218 		return -1;
2219 	}
2220 
2221 	ctrlr_proc->is_primary = spdk_process_is_primary();
2222 	ctrlr_proc->pid = pid;
2223 	STAILQ_INIT(&ctrlr_proc->active_reqs);
2224 	ctrlr_proc->devhandle = devhandle;
2225 	ctrlr_proc->ref = 0;
2226 	TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
2227 
2228 	TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
2229 
2230 	return 0;
2231 }
2232 
2233 /**
2234  * This function will be called when the process detaches the controller.
2235  * Note: the ctrlr_lock must be held when calling this function.
2236  */
2237 static void
2238 nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
2239 			  struct spdk_nvme_ctrlr_process *proc)
2240 {
2241 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
2242 
2243 	assert(STAILQ_EMPTY(&proc->active_reqs));
2244 
2245 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
2246 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2247 	}
2248 
2249 	TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
2250 
2251 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
2252 		spdk_pci_device_detach(proc->devhandle);
2253 	}
2254 
2255 	spdk_free(proc);
2256 }
2257 
2258 /**
2259  * This function will be called when the process exited unexpectedly
2260  *  in order to free any incomplete nvme request, allocated IO qpairs
2261  *  and allocated memory.
2262  * Note: the ctrlr_lock must be held when calling this function.
2263  */
2264 static void
2265 nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
2266 {
2267 	struct nvme_request	*req, *tmp_req;
2268 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
2269 
2270 	STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
2271 		STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
2272 
2273 		assert(req->pid == proc->pid);
2274 
2275 		nvme_free_request(req);
2276 	}
2277 
2278 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
2279 		TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
2280 
2281 		/*
2282 		 * The process may have been killed while some qpairs were in their
2283 		 *  completion context.  Clear that flag here to allow these IO
2284 		 *  qpairs to be deleted.
2285 		 */
2286 		qpair->in_completion_context = 0;
2287 
2288 		qpair->no_deletion_notification_needed = 1;
2289 
2290 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2291 	}
2292 
2293 	spdk_free(proc);
2294 }
2295 
2296 /**
2297  * This function will be called when destructing the controller.
2298  *  1. There is no more admin request on this controller.
2299  *  2. Clean up any left resource allocation when its associated process is gone.
2300  */
2301 void
2302 nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
2303 {
2304 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
2305 
2306 	/* Free all the processes' properties and make sure no pending admin IOs */
2307 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
2308 		TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
2309 
2310 		assert(STAILQ_EMPTY(&active_proc->active_reqs));
2311 
2312 		spdk_free(active_proc);
2313 	}
2314 }
2315 
2316 /**
2317  * This function will be called when any other process attaches or
2318  *  detaches the controller in order to cleanup those unexpectedly
2319  *  terminated processes.
2320  * Note: the ctrlr_lock must be held when calling this function.
2321  */
2322 static int
2323 nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
2324 {
2325 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
2326 	int				active_proc_count = 0;
2327 
2328 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
2329 		if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
2330 			SPDK_ERRLOG("process %d terminated unexpected\n", active_proc->pid);
2331 
2332 			TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
2333 
2334 			nvme_ctrlr_cleanup_process(active_proc);
2335 		} else {
2336 			active_proc_count++;
2337 		}
2338 	}
2339 
2340 	return active_proc_count;
2341 }
2342 
2343 void
2344 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
2345 {
2346 	struct spdk_nvme_ctrlr_process	*active_proc;
2347 
2348 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2349 
2350 	nvme_ctrlr_remove_inactive_proc(ctrlr);
2351 
2352 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2353 	if (active_proc) {
2354 		active_proc->ref++;
2355 	}
2356 
2357 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2358 }
2359 
2360 void
2361 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
2362 {
2363 	struct spdk_nvme_ctrlr_process	*active_proc;
2364 	int				proc_count;
2365 
2366 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2367 
2368 	proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
2369 
2370 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2371 	if (active_proc) {
2372 		active_proc->ref--;
2373 		assert(active_proc->ref >= 0);
2374 
2375 		/*
2376 		 * The last active process will be removed at the end of
2377 		 * the destruction of the controller.
2378 		 */
2379 		if (active_proc->ref == 0 && proc_count != 1) {
2380 			nvme_ctrlr_remove_process(ctrlr, active_proc);
2381 		}
2382 	}
2383 
2384 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2385 }
2386 
2387 int
2388 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
2389 {
2390 	struct spdk_nvme_ctrlr_process	*active_proc;
2391 	int				ref = 0;
2392 
2393 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2394 
2395 	nvme_ctrlr_remove_inactive_proc(ctrlr);
2396 
2397 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
2398 		ref += active_proc->ref;
2399 	}
2400 
2401 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2402 
2403 	return ref;
2404 }
2405 
2406 /**
2407  *  Get the PCI device handle which is only visible to its associated process.
2408  */
2409 struct spdk_pci_device *
2410 nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
2411 {
2412 	struct spdk_nvme_ctrlr_process	*active_proc;
2413 	struct spdk_pci_device		*devhandle = NULL;
2414 
2415 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2416 
2417 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
2418 	if (active_proc) {
2419 		devhandle = active_proc->devhandle;
2420 	}
2421 
2422 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2423 
2424 	return devhandle;
2425 }
2426 
2427 /**
2428  * This function will be called repeatedly during initialization until the controller is ready.
2429  */
2430 int
2431 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
2432 {
2433 	union spdk_nvme_cc_register cc;
2434 	union spdk_nvme_csts_register csts;
2435 	uint32_t ready_timeout_in_ms;
2436 	int rc = 0;
2437 
2438 	/*
2439 	 * May need to avoid accessing any register on the target controller
2440 	 * for a while. Return early without touching the FSM.
2441 	 * Check sleep_timeout_tsc > 0 for unit test.
2442 	 */
2443 	if ((ctrlr->sleep_timeout_tsc > 0) &&
2444 	    (spdk_get_ticks() <= ctrlr->sleep_timeout_tsc)) {
2445 		return 0;
2446 	}
2447 	ctrlr->sleep_timeout_tsc = 0;
2448 
2449 	if (nvme_ctrlr_get_cc(ctrlr, &cc) ||
2450 	    nvme_ctrlr_get_csts(ctrlr, &csts)) {
2451 		if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
2452 			/* While a device is resetting, it may be unable to service MMIO reads
2453 			 * temporarily. Allow for this case.
2454 			 */
2455 			SPDK_ERRLOG("Get registers failed while waiting for CSTS.RDY == 0\n");
2456 			goto init_timeout;
2457 		}
2458 		SPDK_ERRLOG("Failed to read CC and CSTS in state %d\n", ctrlr->state);
2459 		return -EIO;
2460 	}
2461 
2462 	ready_timeout_in_ms = 500 * ctrlr->cap.bits.to;
2463 
2464 	/*
2465 	 * Check if the current initialization step is done or has timed out.
2466 	 */
2467 	switch (ctrlr->state) {
2468 	case NVME_CTRLR_STATE_INIT_DELAY:
2469 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
2470 		if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_INIT) {
2471 			/*
2472 			 * Controller may need some delay before it's enabled.
2473 			 *
2474 			 * This is a workaround for an issue where the PCIe-attached NVMe controller
2475 			 * is not ready after VFIO reset. We delay the initialization rather than the
2476 			 * enabling itself, because this is required only for the very first enabling
2477 			 * - directly after a VFIO reset.
2478 			 */
2479 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Adding 2 second delay before initializing the controller\n");
2480 			ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2000 * spdk_get_ticks_hz() / 1000);
2481 		}
2482 		break;
2483 
2484 	case NVME_CTRLR_STATE_INIT:
2485 		/* Begin the hardware initialization by making sure the controller is disabled. */
2486 		if (cc.bits.en) {
2487 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1\n");
2488 			/*
2489 			 * Controller is currently enabled. We need to disable it to cause a reset.
2490 			 *
2491 			 * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
2492 			 *  Wait for the ready bit to be 1 before disabling the controller.
2493 			 */
2494 			if (csts.bits.rdy == 0) {
2495 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
2496 				nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
2497 				return 0;
2498 			}
2499 
2500 			/* CC.EN = 1 && CSTS.RDY == 1, so we can immediately disable the controller. */
2501 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
2502 			cc.bits.en = 0;
2503 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
2504 				SPDK_ERRLOG("set_cc() failed\n");
2505 				return -EIO;
2506 			}
2507 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2508 
2509 			/*
2510 			 * Wait 2.5 seconds before accessing PCI registers.
2511 			 * Not using sleep() to avoid blocking other controller's initialization.
2512 			 */
2513 			if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
2514 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "Applying quirk: delay 2.5 seconds before reading registers\n");
2515 				ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
2516 			}
2517 			return 0;
2518 		} else {
2519 			if (csts.bits.rdy == 1) {
2520 				SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 1 - waiting for shutdown to complete\n");
2521 			}
2522 
2523 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2524 			return 0;
2525 		}
2526 		break;
2527 
2528 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
2529 		if (csts.bits.rdy == 1) {
2530 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - disabling controller\n");
2531 			/* CC.EN = 1 && CSTS.RDY = 1, so we can set CC.EN = 0 now. */
2532 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n");
2533 			cc.bits.en = 0;
2534 			if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
2535 				SPDK_ERRLOG("set_cc() failed\n");
2536 				return -EIO;
2537 			}
2538 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
2539 			return 0;
2540 		}
2541 		break;
2542 
2543 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
2544 		if (csts.bits.rdy == 0) {
2545 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 0\n");
2546 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
2547 			/*
2548 			 * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
2549 			 *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
2550 			 */
2551 			spdk_delay_us(100);
2552 			return 0;
2553 		}
2554 		break;
2555 
2556 	case NVME_CTRLR_STATE_ENABLE:
2557 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 1\n");
2558 		rc = nvme_ctrlr_enable(ctrlr);
2559 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
2560 		return rc;
2561 
2562 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
2563 		if (csts.bits.rdy == 1) {
2564 			SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
2565 			/*
2566 			 * The controller has been enabled.
2567 			 *  Perform the rest of initialization serially.
2568 			 */
2569 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
2570 					     ctrlr->opts.admin_timeout_ms);
2571 			return 0;
2572 		}
2573 		break;
2574 
2575 	case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
2576 		nvme_transport_qpair_reset(ctrlr->adminq);
2577 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY,
2578 				     ctrlr->opts.admin_timeout_ms);
2579 		break;
2580 
2581 	case NVME_CTRLR_STATE_IDENTIFY:
2582 		rc = nvme_ctrlr_identify(ctrlr);
2583 		break;
2584 
2585 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
2586 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2587 		break;
2588 
2589 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
2590 		nvme_ctrlr_update_nvmf_ioccsz(ctrlr);
2591 		rc = nvme_ctrlr_set_num_queues(ctrlr);
2592 		break;
2593 
2594 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
2595 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2596 		break;
2597 
2598 	case NVME_CTRLR_STATE_CONSTRUCT_NS:
2599 		rc = nvme_ctrlr_construct_namespaces(ctrlr);
2600 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
2601 				     ctrlr->opts.admin_timeout_ms);
2602 		break;
2603 
2604 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
2605 		_nvme_ctrlr_identify_active_ns(ctrlr);
2606 		break;
2607 
2608 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
2609 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2610 		break;
2611 
2612 	case NVME_CTRLR_STATE_IDENTIFY_NS:
2613 		rc = nvme_ctrlr_identify_namespaces(ctrlr);
2614 		break;
2615 
2616 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
2617 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2618 		break;
2619 
2620 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
2621 		rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
2622 		break;
2623 
2624 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
2625 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2626 		break;
2627 
2628 	case NVME_CTRLR_STATE_CONFIGURE_AER:
2629 		rc = nvme_ctrlr_configure_aer(ctrlr);
2630 		break;
2631 
2632 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
2633 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2634 		break;
2635 
2636 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
2637 		rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
2638 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
2639 				     ctrlr->opts.admin_timeout_ms);
2640 		break;
2641 
2642 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
2643 		nvme_ctrlr_set_supported_features(ctrlr);
2644 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
2645 				     ctrlr->opts.admin_timeout_ms);
2646 		break;
2647 
2648 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
2649 		rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
2650 		break;
2651 
2652 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
2653 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2654 		break;
2655 
2656 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
2657 		rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
2658 		break;
2659 
2660 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
2661 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2662 		break;
2663 
2664 	case NVME_CTRLR_STATE_SET_HOST_ID:
2665 		rc = nvme_ctrlr_set_host_id(ctrlr);
2666 		break;
2667 
2668 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
2669 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2670 		break;
2671 
2672 	case NVME_CTRLR_STATE_READY:
2673 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Ctrlr already in ready state\n");
2674 		return 0;
2675 
2676 	case NVME_CTRLR_STATE_ERROR:
2677 		SPDK_ERRLOG("Ctrlr %s is in error state\n", ctrlr->trid.traddr);
2678 		return -1;
2679 
2680 	default:
2681 		assert(0);
2682 		return -1;
2683 	}
2684 
2685 init_timeout:
2686 	if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
2687 	    spdk_get_ticks() > ctrlr->state_timeout_tsc) {
2688 		SPDK_ERRLOG("Initialization timed out in state %d\n", ctrlr->state);
2689 		return -1;
2690 	}
2691 
2692 	return rc;
2693 }
2694 
2695 int
2696 nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
2697 {
2698 	pthread_mutexattr_t attr;
2699 	int rc = 0;
2700 
2701 	if (pthread_mutexattr_init(&attr)) {
2702 		return -1;
2703 	}
2704 	if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
2705 #ifndef __FreeBSD__
2706 	    pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
2707 	    pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
2708 #endif
2709 	    pthread_mutex_init(mtx, &attr)) {
2710 		rc = -1;
2711 	}
2712 	pthread_mutexattr_destroy(&attr);
2713 	return rc;
2714 }
2715 
2716 int
2717 nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
2718 {
2719 	int rc;
2720 
2721 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
2722 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
2723 	} else {
2724 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
2725 	}
2726 
2727 	if (ctrlr->opts.admin_queue_size > SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES) {
2728 		SPDK_ERRLOG("admin_queue_size %u exceeds max defined by NVMe spec, use max value\n",
2729 			    ctrlr->opts.admin_queue_size);
2730 		ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES;
2731 	}
2732 
2733 	if (ctrlr->opts.admin_queue_size < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES) {
2734 		SPDK_ERRLOG("admin_queue_size %u is less than minimum defined by NVMe spec, use min value\n",
2735 			    ctrlr->opts.admin_queue_size);
2736 		ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES;
2737 	}
2738 
2739 	ctrlr->flags = 0;
2740 	ctrlr->free_io_qids = NULL;
2741 	ctrlr->is_resetting = false;
2742 	ctrlr->is_failed = false;
2743 	ctrlr->is_destructed = false;
2744 
2745 	TAILQ_INIT(&ctrlr->active_io_qpairs);
2746 	STAILQ_INIT(&ctrlr->queued_aborts);
2747 	ctrlr->outstanding_aborts = 0;
2748 
2749 	rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
2750 	if (rc != 0) {
2751 		return rc;
2752 	}
2753 
2754 	TAILQ_INIT(&ctrlr->active_procs);
2755 
2756 	return rc;
2757 }
2758 
2759 /* This function should be called once at ctrlr initialization to set up constant properties. */
2760 void
2761 nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
2762 		    const union spdk_nvme_vs_register *vs)
2763 {
2764 	ctrlr->cap = *cap;
2765 	ctrlr->vs = *vs;
2766 
2767 	if (ctrlr->cap.bits.ams & SPDK_NVME_CAP_AMS_WRR) {
2768 		ctrlr->flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
2769 	}
2770 
2771 	ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
2772 
2773 	/* For now, always select page_size == min_page_size. */
2774 	ctrlr->page_size = ctrlr->min_page_size;
2775 
2776 	ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
2777 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
2778 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
2779 
2780 	ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
2781 }
2782 
2783 void
2784 nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
2785 {
2786 	pthread_mutex_destroy(&ctrlr->ctrlr_lock);
2787 }
2788 
2789 void
2790 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
2791 {
2792 	struct spdk_nvme_qpair *qpair, *tmp;
2793 
2794 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Prepare to destruct SSD: %s\n", ctrlr->trid.traddr);
2795 
2796 	ctrlr->is_destructed = true;
2797 
2798 	spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2799 	nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
2800 
2801 	TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
2802 		spdk_nvme_ctrlr_free_io_qpair(qpair);
2803 	}
2804 
2805 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
2806 
2807 	if (ctrlr->opts.no_shn_notification) {
2808 		SPDK_INFOLOG(SPDK_LOG_NVME, "Disable SSD: %s without shutdown notification\n",
2809 			     ctrlr->trid.traddr);
2810 		nvme_ctrlr_disable(ctrlr);
2811 	} else {
2812 		nvme_ctrlr_shutdown(ctrlr);
2813 	}
2814 
2815 	nvme_ctrlr_destruct_namespaces(ctrlr);
2816 
2817 	spdk_bit_array_free(&ctrlr->free_io_qids);
2818 
2819 	nvme_transport_ctrlr_destruct(ctrlr);
2820 }
2821 
2822 int
2823 nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
2824 				struct nvme_request *req)
2825 {
2826 	return nvme_qpair_submit_request(ctrlr->adminq, req);
2827 }
2828 
2829 static void
2830 nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
2831 {
2832 	/* Do nothing */
2833 }
2834 
2835 /*
2836  * Check if we need to send a Keep Alive command.
2837  * Caller must hold ctrlr->ctrlr_lock.
2838  */
2839 static void
2840 nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
2841 {
2842 	uint64_t now;
2843 	struct nvme_request *req;
2844 	struct spdk_nvme_cmd *cmd;
2845 	int rc;
2846 
2847 	now = spdk_get_ticks();
2848 	if (now < ctrlr->next_keep_alive_tick) {
2849 		return;
2850 	}
2851 
2852 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
2853 	if (req == NULL) {
2854 		return;
2855 	}
2856 
2857 	cmd = &req->cmd;
2858 	cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
2859 
2860 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
2861 	if (rc != 0) {
2862 		SPDK_ERRLOG("Submitting Keep Alive failed\n");
2863 	}
2864 
2865 	ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
2866 }
2867 
2868 int32_t
2869 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
2870 {
2871 	int32_t num_completions;
2872 	int32_t rc;
2873 
2874 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
2875 
2876 	if (ctrlr->keep_alive_interval_ticks) {
2877 		nvme_ctrlr_keep_alive(ctrlr);
2878 	}
2879 
2880 	rc = spdk_nvme_io_msg_process(ctrlr);
2881 	if (rc < 0) {
2882 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2883 		return rc;
2884 	}
2885 	num_completions = rc;
2886 
2887 	rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2888 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
2889 
2890 	if (rc < 0) {
2891 		num_completions = rc;
2892 	} else {
2893 		num_completions += rc;
2894 	}
2895 
2896 	return num_completions;
2897 }
2898 
2899 const struct spdk_nvme_ctrlr_data *
2900 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
2901 {
2902 	return &ctrlr->cdata;
2903 }
2904 
2905 union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
2906 {
2907 	union spdk_nvme_csts_register csts;
2908 
2909 	if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
2910 		csts.raw = 0xFFFFFFFFu;
2911 	}
2912 	return csts;
2913 }
2914 
2915 union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
2916 {
2917 	return ctrlr->cap;
2918 }
2919 
2920 union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
2921 {
2922 	return ctrlr->vs;
2923 }
2924 
2925 union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr)
2926 {
2927 	union spdk_nvme_cmbsz_register cmbsz;
2928 
2929 	if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) {
2930 		cmbsz.raw = 0;
2931 	}
2932 
2933 	return cmbsz;
2934 }
2935 
2936 uint32_t
2937 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
2938 {
2939 	return ctrlr->num_ns;
2940 }
2941 
2942 static int32_t
2943 spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2944 {
2945 	int32_t result = -1;
2946 
2947 	if (ctrlr->active_ns_list == NULL || nsid == 0 || nsid > ctrlr->num_ns) {
2948 		return result;
2949 	}
2950 
2951 	int32_t lower = 0;
2952 	int32_t upper = ctrlr->num_ns - 1;
2953 	int32_t mid;
2954 
2955 	while (lower <= upper) {
2956 		mid = lower + (upper - lower) / 2;
2957 		if (ctrlr->active_ns_list[mid] == nsid) {
2958 			result = mid;
2959 			break;
2960 		} else {
2961 			if (ctrlr->active_ns_list[mid] != 0 && ctrlr->active_ns_list[mid] < nsid) {
2962 				lower = mid + 1;
2963 			} else {
2964 				upper = mid - 1;
2965 			}
2966 
2967 		}
2968 	}
2969 
2970 	return result;
2971 }
2972 
2973 bool
2974 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2975 {
2976 	return spdk_nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1;
2977 }
2978 
2979 uint32_t
2980 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
2981 {
2982 	return ctrlr->active_ns_list ? ctrlr->active_ns_list[0] : 0;
2983 }
2984 
2985 uint32_t
2986 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
2987 {
2988 	int32_t nsid_idx = spdk_nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid);
2989 	if (ctrlr->active_ns_list && nsid_idx >= 0 && (uint32_t)nsid_idx < ctrlr->num_ns - 1) {
2990 		return ctrlr->active_ns_list[nsid_idx + 1];
2991 	}
2992 	return 0;
2993 }
2994 
2995 struct spdk_nvme_ns *
2996 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2997 {
2998 	if (nsid < 1 || nsid > ctrlr->num_ns) {
2999 		return NULL;
3000 	}
3001 
3002 	return &ctrlr->ns[nsid - 1];
3003 }
3004 
3005 struct spdk_pci_device *
3006 spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
3007 {
3008 	if (ctrlr == NULL) {
3009 		return NULL;
3010 	}
3011 
3012 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
3013 		return NULL;
3014 	}
3015 
3016 	return nvme_ctrlr_proc_get_devhandle(ctrlr);
3017 }
3018 
3019 uint32_t
3020 spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
3021 {
3022 	return ctrlr->max_xfer_size;
3023 }
3024 
3025 void
3026 spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
3027 				      spdk_nvme_aer_cb aer_cb_fn,
3028 				      void *aer_cb_arg)
3029 {
3030 	struct spdk_nvme_ctrlr_process *active_proc;
3031 
3032 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
3033 
3034 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
3035 	if (active_proc) {
3036 		active_proc->aer_cb_fn = aer_cb_fn;
3037 		active_proc->aer_cb_arg = aer_cb_arg;
3038 	}
3039 
3040 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
3041 }
3042 
3043 void
3044 spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
3045 		uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)
3046 {
3047 	struct spdk_nvme_ctrlr_process	*active_proc;
3048 
3049 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
3050 
3051 	active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
3052 	if (active_proc) {
3053 		active_proc->timeout_ticks = timeout_us * spdk_get_ticks_hz() / 1000000ULL;
3054 		active_proc->timeout_cb_fn = cb_fn;
3055 		active_proc->timeout_cb_arg = cb_arg;
3056 	}
3057 
3058 	ctrlr->timeout_enabled = true;
3059 
3060 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
3061 }
3062 
3063 bool
3064 spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
3065 {
3066 	/* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
3067 	SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
3068 	return ctrlr->log_page_supported[log_page];
3069 }
3070 
3071 bool
3072 spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
3073 {
3074 	/* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
3075 	SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
3076 	return ctrlr->feature_supported[feature_code];
3077 }
3078 
3079 int
3080 spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
3081 			  struct spdk_nvme_ctrlr_list *payload)
3082 {
3083 	struct nvme_completion_poll_status	*status;
3084 	int					res;
3085 	struct spdk_nvme_ns			*ns;
3086 
3087 	status = calloc(1, sizeof(*status));
3088 	if (!status) {
3089 		SPDK_ERRLOG("Failed to allocate status tracker\n");
3090 		return -ENOMEM;
3091 	}
3092 
3093 	res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
3094 				       nvme_completion_poll_cb, status);
3095 	if (res) {
3096 		free(status);
3097 		return res;
3098 	}
3099 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
3100 		SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
3101 		if (!status->timed_out) {
3102 			free(status);
3103 		}
3104 		return -ENXIO;
3105 	}
3106 	free(status);
3107 
3108 	res = nvme_ctrlr_identify_active_ns(ctrlr);
3109 	if (res) {
3110 		return res;
3111 	}
3112 
3113 	ns = &ctrlr->ns[nsid - 1];
3114 	return nvme_ns_construct(ns, nsid, ctrlr);
3115 }
3116 
3117 int
3118 spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
3119 			  struct spdk_nvme_ctrlr_list *payload)
3120 {
3121 	struct nvme_completion_poll_status	*status;
3122 	int					res;
3123 	struct spdk_nvme_ns			*ns;
3124 
3125 	status = calloc(1, sizeof(*status));
3126 	if (!status) {
3127 		SPDK_ERRLOG("Failed to allocate status tracker\n");
3128 		return -ENOMEM;
3129 	}
3130 
3131 	res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
3132 				       nvme_completion_poll_cb, status);
3133 	if (res) {
3134 		free(status);
3135 		return res;
3136 	}
3137 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
3138 		SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
3139 		if (!status->timed_out) {
3140 			free(status);
3141 		}
3142 		return -ENXIO;
3143 	}
3144 	free(status);
3145 
3146 	res = nvme_ctrlr_identify_active_ns(ctrlr);
3147 	if (res) {
3148 		return res;
3149 	}
3150 
3151 	ns = &ctrlr->ns[nsid - 1];
3152 	/* Inactive NS */
3153 	nvme_ns_destruct(ns);
3154 
3155 	return 0;
3156 }
3157 
3158 uint32_t
3159 spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
3160 {
3161 	struct nvme_completion_poll_status	*status;
3162 	int					res;
3163 	uint32_t				nsid;
3164 	struct spdk_nvme_ns			*ns;
3165 
3166 	status = calloc(1, sizeof(*status));
3167 	if (!status) {
3168 		SPDK_ERRLOG("Failed to allocate status tracker\n");
3169 		return 0;
3170 	}
3171 
3172 	res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, status);
3173 	if (res) {
3174 		free(status);
3175 		return 0;
3176 	}
3177 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
3178 		SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
3179 		if (!status->timed_out) {
3180 			free(status);
3181 		}
3182 		return 0;
3183 	}
3184 
3185 	nsid = status->cpl.cdw0;
3186 	ns = &ctrlr->ns[nsid - 1];
3187 	free(status);
3188 	/* Inactive NS */
3189 	res = nvme_ns_construct(ns, nsid, ctrlr);
3190 	if (res) {
3191 		return 0;
3192 	}
3193 
3194 	/* Return the namespace ID that was created */
3195 	return nsid;
3196 }
3197 
3198 int
3199 spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
3200 {
3201 	struct nvme_completion_poll_status	*status;
3202 	int					res;
3203 	struct spdk_nvme_ns			*ns;
3204 
3205 	status = calloc(1, sizeof(*status));
3206 	if (!status) {
3207 		SPDK_ERRLOG("Failed to allocate status tracker\n");
3208 		return -ENOMEM;
3209 	}
3210 
3211 	res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, status);
3212 	if (res) {
3213 		free(status);
3214 		return res;
3215 	}
3216 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
3217 		SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
3218 		if (!status->timed_out) {
3219 			free(status);
3220 		}
3221 		return -ENXIO;
3222 	}
3223 	free(status);
3224 
3225 	res = nvme_ctrlr_identify_active_ns(ctrlr);
3226 	if (res) {
3227 		return res;
3228 	}
3229 
3230 	ns = &ctrlr->ns[nsid - 1];
3231 	nvme_ns_destruct(ns);
3232 
3233 	return 0;
3234 }
3235 
3236 int
3237 spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
3238 		       struct spdk_nvme_format *format)
3239 {
3240 	struct nvme_completion_poll_status	*status;
3241 	int					res;
3242 
3243 	status = calloc(1, sizeof(*status));
3244 	if (!status) {
3245 		SPDK_ERRLOG("Failed to allocate status tracker\n");
3246 		return -ENOMEM;
3247 	}
3248 
3249 	res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
3250 				    status);
3251 	if (res) {
3252 		free(status);
3253 		return res;
3254 	}
3255 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
3256 		SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
3257 		if (!status->timed_out) {
3258 			free(status);
3259 		}
3260 		return -ENXIO;
3261 	}
3262 	free(status);
3263 
3264 	return spdk_nvme_ctrlr_reset(ctrlr);
3265 }
3266 
3267 int
3268 spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
3269 				int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
3270 {
3271 	struct spdk_nvme_fw_commit		fw_commit;
3272 	struct nvme_completion_poll_status	*status;
3273 	int					res;
3274 	unsigned int				size_remaining;
3275 	unsigned int				offset;
3276 	unsigned int				transfer;
3277 	void					*p;
3278 
3279 	if (!completion_status) {
3280 		return -EINVAL;
3281 	}
3282 	memset(completion_status, 0, sizeof(struct spdk_nvme_status));
3283 	if (size % 4) {
3284 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid size!\n");
3285 		return -1;
3286 	}
3287 
3288 	/* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
3289 	 * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
3290 	 */
3291 	if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
3292 	    (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
3293 		SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid command!\n");
3294 		return -1;
3295 	}
3296 
3297 	status = calloc(1, sizeof(*status));
3298 	if (!status) {
3299 		SPDK_ERRLOG("Failed to allocate status tracker\n");
3300 		return -ENOMEM;
3301 	}
3302 
3303 	/* Firmware download */
3304 	size_remaining = size;
3305 	offset = 0;
3306 	p = payload;
3307 
3308 	while (size_remaining > 0) {
3309 		transfer = spdk_min(size_remaining, ctrlr->min_page_size);
3310 
3311 		memset(status, 0, sizeof(*status));
3312 		res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
3313 						       nvme_completion_poll_cb,
3314 						       status);
3315 		if (res) {
3316 			free(status);
3317 			return res;
3318 		}
3319 
3320 		if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
3321 			SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
3322 			if (!status->timed_out) {
3323 				free(status);
3324 			}
3325 			return -ENXIO;
3326 		}
3327 		p += transfer;
3328 		offset += transfer;
3329 		size_remaining -= transfer;
3330 	}
3331 
3332 	/* Firmware commit */
3333 	memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
3334 	fw_commit.fs = slot;
3335 	fw_commit.ca = commit_action;
3336 
3337 	memset(status, 0, sizeof(*status));
3338 	res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
3339 				       status);
3340 	if (res) {
3341 		free(status);
3342 		return res;
3343 	}
3344 
3345 	res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock);
3346 
3347 	memcpy(completion_status, &status->cpl.status, sizeof(struct spdk_nvme_status));
3348 
3349 	if (!status->timed_out) {
3350 		free(status);
3351 	}
3352 
3353 	if (res) {
3354 		if (completion_status->sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
3355 		    completion_status->sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
3356 			if (completion_status->sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
3357 			    completion_status->sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
3358 				SPDK_NOTICELOG("firmware activation requires conventional reset to be performed. !\n");
3359 			} else {
3360 				SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n");
3361 			}
3362 			return -ENXIO;
3363 		}
3364 	}
3365 
3366 	return spdk_nvme_ctrlr_reset(ctrlr);
3367 }
3368 
3369 int
3370 spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
3371 {
3372 	int rc, size;
3373 	union spdk_nvme_cmbsz_register cmbsz;
3374 
3375 	cmbsz = spdk_nvme_ctrlr_get_regs_cmbsz(ctrlr);
3376 
3377 	if (cmbsz.bits.rds == 0 || cmbsz.bits.wds == 0) {
3378 		return -ENOTSUP;
3379 	}
3380 
3381 	size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4));
3382 
3383 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
3384 	rc = nvme_transport_ctrlr_reserve_cmb(ctrlr);
3385 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
3386 
3387 	if (rc < 0) {
3388 		return rc;
3389 	}
3390 
3391 	return size;
3392 }
3393 
3394 void *
3395 spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
3396 {
3397 	void *buf;
3398 
3399 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
3400 	buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
3401 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
3402 
3403 	return buf;
3404 }
3405 
3406 void
3407 spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
3408 {
3409 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
3410 	nvme_transport_ctrlr_unmap_cmb(ctrlr);
3411 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
3412 }
3413 
3414 bool
3415 spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
3416 {
3417 	assert(ctrlr);
3418 
3419 	return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
3420 			strlen(SPDK_NVMF_DISCOVERY_NQN));
3421 }
3422 
3423 int
3424 spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
3425 				 uint16_t spsp, uint8_t nssf, void *payload, size_t size)
3426 {
3427 	struct nvme_completion_poll_status	*status;
3428 	int					res;
3429 
3430 	status = calloc(1, sizeof(*status));
3431 	if (!status) {
3432 		SPDK_ERRLOG("Failed to allocate status tracker\n");
3433 		return -ENOMEM;
3434 	}
3435 
3436 	res = spdk_nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
3437 			nvme_completion_poll_cb, status);
3438 	if (res) {
3439 		free(status);
3440 		return res;
3441 	}
3442 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
3443 		SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_security_receive failed!\n");
3444 		if (!status->timed_out) {
3445 			free(status);
3446 		}
3447 		return -ENXIO;
3448 	}
3449 	free(status);
3450 
3451 	return 0;
3452 }
3453 
3454 int
3455 spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
3456 			      uint16_t spsp, uint8_t nssf, void *payload, size_t size)
3457 {
3458 	struct nvme_completion_poll_status	*status;
3459 	int					res;
3460 
3461 	status = calloc(1, sizeof(*status));
3462 	if (!status) {
3463 		SPDK_ERRLOG("Failed to allocate status tracker\n");
3464 		return -ENOMEM;
3465 	}
3466 
3467 	res = spdk_nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size,
3468 						nvme_completion_poll_cb,
3469 						status);
3470 	if (res) {
3471 		free(status);
3472 		return res;
3473 	}
3474 	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
3475 		SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_security_send failed!\n");
3476 		if (!status->timed_out) {
3477 			free(status);
3478 		}
3479 		return -ENXIO;
3480 	}
3481 
3482 	free(status);
3483 
3484 	return 0;
3485 }
3486 
3487 uint64_t
3488 spdk_nvme_ctrlr_get_flags(struct spdk_nvme_ctrlr *ctrlr)
3489 {
3490 	return ctrlr->flags;
3491 }
3492 
3493 const struct spdk_nvme_transport_id *
3494 spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
3495 {
3496 	return &ctrlr->trid;
3497 }
3498 
3499 /* FIXME need to specify max number of iovs */
3500 int
3501 spdk_nvme_map_prps(void *prv, struct spdk_nvme_cmd *cmd, struct iovec *iovs,
3502 		   uint32_t len, size_t mps,
3503 		   void *(*gpa_to_vva)(void *prv, uint64_t addr, uint64_t len))
3504 {
3505 	uint64_t prp1, prp2;
3506 	void *vva;
3507 	uint32_t i;
3508 	uint32_t residue_len, nents;
3509 	uint64_t *prp_list;
3510 	int iovcnt;
3511 
3512 	prp1 = cmd->dptr.prp.prp1;
3513 	prp2 = cmd->dptr.prp.prp2;
3514 
3515 	/* PRP1 may started with unaligned page address */
3516 	residue_len = mps - (prp1 % mps);
3517 	residue_len = spdk_min(len, residue_len);
3518 
3519 	vva = gpa_to_vva(prv, prp1, residue_len);
3520 	if (spdk_unlikely(vva == NULL)) {
3521 		SPDK_ERRLOG("GPA to VVA failed\n");
3522 		return -1;
3523 	}
3524 	iovs[0].iov_base = vva;
3525 	iovs[0].iov_len = residue_len;
3526 	len -= residue_len;
3527 
3528 	if (len) {
3529 		if (spdk_unlikely(prp2 == 0)) {
3530 			SPDK_ERRLOG("no PRP2, %d remaining\n", len);
3531 			return -1;
3532 		}
3533 
3534 		if (len <= mps) {
3535 			/* 2 PRP used */
3536 			iovcnt = 2;
3537 			vva = gpa_to_vva(prv, prp2, len);
3538 			if (spdk_unlikely(vva == NULL)) {
3539 				SPDK_ERRLOG("no VVA for %#lx, len%#x\n",
3540 					    prp2, len);
3541 				return -1;
3542 			}
3543 			iovs[1].iov_base = vva;
3544 			iovs[1].iov_len = len;
3545 		} else {
3546 			/* PRP list used */
3547 			nents = (len + mps - 1) / mps;
3548 			vva = gpa_to_vva(prv, prp2, nents * sizeof(*prp_list));
3549 			if (spdk_unlikely(vva == NULL)) {
3550 				SPDK_ERRLOG("no VVA for %#lx, nents=%#x\n",
3551 					    prp2, nents);
3552 				return -1;
3553 			}
3554 			prp_list = vva;
3555 			i = 0;
3556 			while (len != 0) {
3557 				residue_len = spdk_min(len, mps);
3558 				vva = gpa_to_vva(prv, prp_list[i], residue_len);
3559 				if (spdk_unlikely(vva == NULL)) {
3560 					SPDK_ERRLOG("no VVA for %#lx, residue_len=%#x\n",
3561 						    prp_list[i], residue_len);
3562 					return -1;
3563 				}
3564 				iovs[i + 1].iov_base = vva;
3565 				iovs[i + 1].iov_len = residue_len;
3566 				len -= residue_len;
3567 				i++;
3568 			}
3569 			iovcnt = i + 1;
3570 		}
3571 	} else {
3572 		/* 1 PRP used */
3573 		iovcnt = 1;
3574 	}
3575 
3576 	return iovcnt;
3577 }
3578