xref: /spdk/lib/nvme/nvme_ctrlr.c (revision f43b76508486ba8d7475f1c170cf471de6619b14)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
33a8852d3SEvgeniy Kochetov  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
4113075beSAlexey Marchuk  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
51010fb3aSDaniel Verkamp  */
693933831SDaniel Verkamp 
7b961d9ccSBen Walker #include "spdk/stdinc.h"
8b961d9ccSBen Walker 
91010fb3aSDaniel Verkamp #include "nvme_internal.h"
1011739f3cSBen Walker #include "nvme_io_msg.h"
11b961d9ccSBen Walker 
120dd80395SBen Walker #include "spdk/env.h"
13d81f3dfdSDaniel Verkamp #include "spdk/string.h"
144f4f505cStyler_sun #include "spdk/endian.h"
15aa2970b9SCunyin Chang 
16842ae79aSJacek Kalwas struct nvme_active_ns_ctx;
17842ae79aSJacek Kalwas 
186ce73aa6SDaniel Verkamp static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
191010fb3aSDaniel Verkamp 		struct nvme_async_event_request *aer);
20842ae79aSJacek Kalwas static void nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx);
2192bf76c9SChangpeng Liu static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
22c4d1b7d5SNiklas Cassel static int nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns);
23a2fdc4ddSChangpeng Liu static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
24f5ba8a5eSJim Harris static void nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr);
2509acc162SKonrad Sztyber static void nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
2609acc162SKonrad Sztyber 				 uint64_t timeout_in_ms);
271010fb3aSDaniel Verkamp 
28e7602c15SBen Walker static int
29e7602c15SBen Walker nvme_ns_cmp(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2)
30e7602c15SBen Walker {
31e7602c15SBen Walker 	if (ns1->id < ns2->id) {
32e7602c15SBen Walker 		return -1;
33e7602c15SBen Walker 	} else if (ns1->id > ns2->id) {
34e7602c15SBen Walker 		return 1;
35e7602c15SBen Walker 	} else {
36e7602c15SBen Walker 		return 0;
37e7602c15SBen Walker 	}
38e7602c15SBen Walker }
39e7602c15SBen Walker 
40e7602c15SBen Walker RB_GENERATE_STATIC(nvme_ns_tree, spdk_nvme_ns, node, nvme_ns_cmp);
41e7602c15SBen Walker 
4209acc162SKonrad Sztyber #define nvme_ctrlr_get_reg_async(ctrlr, reg, sz, cb_fn, cb_arg) \
4309acc162SKonrad Sztyber 	nvme_transport_ctrlr_get_reg_ ## sz ## _async(ctrlr, \
4409acc162SKonrad Sztyber 		offsetof(struct spdk_nvme_registers, reg), cb_fn, cb_arg)
4509acc162SKonrad Sztyber 
465f376485SKonrad Sztyber #define nvme_ctrlr_set_reg_async(ctrlr, reg, sz, val, cb_fn, cb_arg) \
475f376485SKonrad Sztyber 	nvme_transport_ctrlr_set_reg_ ## sz ## _async(ctrlr, \
485f376485SKonrad Sztyber 		offsetof(struct spdk_nvme_registers, reg), val, cb_fn, cb_arg)
495f376485SKonrad Sztyber 
50ccc084f3SKonrad Sztyber #define nvme_ctrlr_get_cc_async(ctrlr, cb_fn, cb_arg) \
51ccc084f3SKonrad Sztyber 	nvme_ctrlr_get_reg_async(ctrlr, cc, 4, cb_fn, cb_arg)
52ccc084f3SKonrad Sztyber 
538da3c166SKonrad Sztyber #define nvme_ctrlr_get_csts_async(ctrlr, cb_fn, cb_arg) \
548da3c166SKonrad Sztyber 	nvme_ctrlr_get_reg_async(ctrlr, csts, 4, cb_fn, cb_arg)
558da3c166SKonrad Sztyber 
569d8251f6SKonrad Sztyber #define nvme_ctrlr_get_cap_async(ctrlr, cb_fn, cb_arg) \
579d8251f6SKonrad Sztyber 	nvme_ctrlr_get_reg_async(ctrlr, cap, 8, cb_fn, cb_arg)
589d8251f6SKonrad Sztyber 
5909acc162SKonrad Sztyber #define nvme_ctrlr_get_vs_async(ctrlr, cb_fn, cb_arg) \
6009acc162SKonrad Sztyber 	nvme_ctrlr_get_reg_async(ctrlr, vs, 4, cb_fn, cb_arg)
6109acc162SKonrad Sztyber 
625f376485SKonrad Sztyber #define nvme_ctrlr_set_cc_async(ctrlr, value, cb_fn, cb_arg) \
635f376485SKonrad Sztyber 	nvme_ctrlr_set_reg_async(ctrlr, cc, 4, value, cb_fn, cb_arg)
645f376485SKonrad Sztyber 
65179f122cSDaniel Verkamp static int
66179f122cSDaniel Verkamp nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
67179f122cSDaniel Verkamp {
681ffec5d5SDaniel Verkamp 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
69179f122cSDaniel Verkamp 					      &cc->raw);
70179f122cSDaniel Verkamp }
71179f122cSDaniel Verkamp 
72179f122cSDaniel Verkamp static int
73179f122cSDaniel Verkamp nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
74179f122cSDaniel Verkamp {
751ffec5d5SDaniel Verkamp 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
76179f122cSDaniel Verkamp 					      &csts->raw);
77179f122cSDaniel Verkamp }
78179f122cSDaniel Verkamp 
79a5790100SDaniel Verkamp int
80179f122cSDaniel Verkamp nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
81179f122cSDaniel Verkamp {
821ffec5d5SDaniel Verkamp 	return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
83179f122cSDaniel Verkamp 					      &cap->raw);
84179f122cSDaniel Verkamp }
85179f122cSDaniel Verkamp 
86f7b58aeaSDaniel Verkamp int
87179f122cSDaniel Verkamp nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
88179f122cSDaniel Verkamp {
891ffec5d5SDaniel Verkamp 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
90179f122cSDaniel Verkamp 					      &vs->raw);
91179f122cSDaniel Verkamp }
92179f122cSDaniel Verkamp 
93993c4a07SBen Walker int
94993c4a07SBen Walker nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
95993c4a07SBen Walker {
96993c4a07SBen Walker 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
97993c4a07SBen Walker 					      &cmbsz->raw);
98993c4a07SBen Walker }
99993c4a07SBen Walker 
10048aed8a5SKrishna Kanth Reddy int
10148aed8a5SKrishna Kanth Reddy nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap)
10248aed8a5SKrishna Kanth Reddy {
10348aed8a5SKrishna Kanth Reddy 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
10448aed8a5SKrishna Kanth Reddy 					      &pmrcap->raw);
10548aed8a5SKrishna Kanth Reddy }
10648aed8a5SKrishna Kanth Reddy 
107fec55c84SKrishna Kanth Reddy int
108fec55c84SKrishna Kanth Reddy nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo)
109fec55c84SKrishna Kanth Reddy {
110fec55c84SKrishna Kanth Reddy 	return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bpinfo.raw),
111fec55c84SKrishna Kanth Reddy 					      &bpinfo->raw);
112fec55c84SKrishna Kanth Reddy }
113fec55c84SKrishna Kanth Reddy 
114fec55c84SKrishna Kanth Reddy int
115fec55c84SKrishna Kanth Reddy nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel)
116fec55c84SKrishna Kanth Reddy {
117fec55c84SKrishna Kanth Reddy 	return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bprsel.raw),
118fec55c84SKrishna Kanth Reddy 					      bprsel->raw);
119fec55c84SKrishna Kanth Reddy }
120fec55c84SKrishna Kanth Reddy 
121fec55c84SKrishna Kanth Reddy int
122fec55c84SKrishna Kanth Reddy nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value)
123fec55c84SKrishna Kanth Reddy {
124fec55c84SKrishna Kanth Reddy 	return nvme_transport_ctrlr_set_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, bpmbl),
125fec55c84SKrishna Kanth Reddy 					      bpmbl_value);
126fec55c84SKrishna Kanth Reddy }
127fec55c84SKrishna Kanth Reddy 
12802d3d439SMichael Haeuptle static int
12902d3d439SMichael Haeuptle nvme_ctrlr_set_nssr(struct spdk_nvme_ctrlr *ctrlr, uint32_t nssr_value)
13002d3d439SMichael Haeuptle {
13102d3d439SMichael Haeuptle 	return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, nssr),
13202d3d439SMichael Haeuptle 					      nssr_value);
13302d3d439SMichael Haeuptle }
13402d3d439SMichael Haeuptle 
13564563adaSNiklas Cassel bool
13664563adaSNiklas Cassel nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr)
13764563adaSNiklas Cassel {
13864563adaSNiklas Cassel 	return ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS &&
13964563adaSNiklas Cassel 	       ctrlr->opts.command_set == SPDK_NVME_CC_CSS_IOCS;
14064563adaSNiklas Cassel }
14164563adaSNiklas Cassel 
14218450e8bSZiye Yang /* When the field in spdk_nvme_ctrlr_opts are changed and you change this function, please
14318450e8bSZiye Yang  * also update the nvme_ctrl_opts_init function in nvme_ctrlr.c
14418450e8bSZiye Yang  */
1454ad99808SDaniel Verkamp void
146dc2fb2edSGangCao spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
1474ad99808SDaniel Verkamp {
1486f227249SGangCao 	assert(opts);
1496f227249SGangCao 
15018450e8bSZiye Yang 	opts->opts_size = opts_size;
15118450e8bSZiye Yang 
1526f227249SGangCao #define FIELD_OK(field) \
1536f227249SGangCao 	offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
1546f227249SGangCao 
155771fae3eSZiye Yang #define SET_FIELD(field, value) \
156771fae3eSZiye Yang 	if (offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size) { \
157771fae3eSZiye Yang 		opts->field = value; \
158771fae3eSZiye Yang 	} \
1596f227249SGangCao 
160771fae3eSZiye Yang 	SET_FIELD(num_io_queues, DEFAULT_MAX_IO_QUEUES);
161815ce363SJim Harris 	SET_FIELD(use_cmb_sqs, false);
162771fae3eSZiye Yang 	SET_FIELD(no_shn_notification, false);
1633ab7a1f6SAnkit Kumar 	SET_FIELD(enable_interrupts, false);
164771fae3eSZiye Yang 	SET_FIELD(arb_mechanism, SPDK_NVME_CC_AMS_RR);
165771fae3eSZiye Yang 	SET_FIELD(arbitration_burst, 0);
166771fae3eSZiye Yang 	SET_FIELD(low_priority_weight, 0);
167771fae3eSZiye Yang 	SET_FIELD(medium_priority_weight, 0);
168771fae3eSZiye Yang 	SET_FIELD(high_priority_weight, 0);
169771fae3eSZiye Yang 	SET_FIELD(keep_alive_timeout_ms, MIN_KEEP_ALIVE_TIMEOUT_IN_MS);
170771fae3eSZiye Yang 	SET_FIELD(transport_retry_count, SPDK_NVME_DEFAULT_RETRY_COUNT);
171771fae3eSZiye Yang 	SET_FIELD(io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1726f227249SGangCao 
173cd82151eSBen Walker 	if (nvme_driver_init() == 0) {
1746f227249SGangCao 		if (FIELD_OK(hostnqn)) {
17510575b06SKonrad Sztyber 			nvme_get_default_hostnqn(opts->hostnqn, sizeof(opts->hostnqn));
1764ad99808SDaniel Verkamp 		}
17705cd548cSZiye Yang 
17805cd548cSZiye Yang 		if (FIELD_OK(extended_host_id)) {
17905cd548cSZiye Yang 			memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
18005cd548cSZiye Yang 			       sizeof(opts->extended_host_id));
18105cd548cSZiye Yang 		}
18205cd548cSZiye Yang 
18305cd548cSZiye Yang 	}
18405cd548cSZiye Yang 
185771fae3eSZiye Yang 	SET_FIELD(io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1864ad99808SDaniel Verkamp 
1876f227249SGangCao 	if (FIELD_OK(src_addr)) {
1886f227249SGangCao 		memset(opts->src_addr, 0, sizeof(opts->src_addr));
1896f227249SGangCao 	}
1906f227249SGangCao 
1916f227249SGangCao 	if (FIELD_OK(src_svcid)) {
1926f227249SGangCao 		memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
1936f227249SGangCao 	}
19499c8c6d8SBen Walker 
19505cd548cSZiye Yang 	if (FIELD_OK(host_id)) {
19605cd548cSZiye Yang 		memset(opts->host_id, 0, sizeof(opts->host_id));
19705cd548cSZiye Yang 	}
19805cd548cSZiye Yang 
199771fae3eSZiye Yang 	SET_FIELD(command_set, CHAR_BIT);
200771fae3eSZiye Yang 	SET_FIELD(admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
201771fae3eSZiye Yang 	SET_FIELD(header_digest, false);
202771fae3eSZiye Yang 	SET_FIELD(data_digest, false);
203771fae3eSZiye Yang 	SET_FIELD(disable_error_logging, false);
204771fae3eSZiye Yang 	SET_FIELD(transport_ack_timeout, SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT);
205771fae3eSZiye Yang 	SET_FIELD(admin_queue_size, DEFAULT_ADMIN_QUEUE_SIZE);
206771fae3eSZiye Yang 	SET_FIELD(fabrics_connect_timeout_us, NVME_FABRIC_CONNECT_COMMAND_TIMEOUT);
207e0715c2aSShuhei Matsumoto 	SET_FIELD(disable_read_ana_log_page, false);
208b801af09SJim Harris 	SET_FIELD(disable_read_changed_ns_list_log_page, false);
209a6e805f5SKonrad Sztyber 	SET_FIELD(tls_psk, NULL);
210fc8dece0SKonrad Sztyber 	SET_FIELD(dhchap_key, NULL);
211dbaa0488SKonrad Sztyber 	SET_FIELD(dhchap_ctrlr_key, NULL);
212ab93bb4eSKonrad Sztyber 	SET_FIELD(dhchap_digests,
213ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA256) |
214ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA384) |
215ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA512));
216ab93bb4eSKonrad Sztyber 	SET_FIELD(dhchap_dhgroups,
217ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_NULL) |
218ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_2048) |
219ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_3072) |
220ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_4096) |
221ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_6144) |
222ab93bb4eSKonrad Sztyber 		  SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_8192));
2236f227249SGangCao #undef FIELD_OK
224771fae3eSZiye Yang #undef SET_FIELD
2256f227249SGangCao }
2266f227249SGangCao 
2273875f287SBen Walker const struct spdk_nvme_ctrlr_opts *
2283875f287SBen Walker spdk_nvme_ctrlr_get_opts(struct spdk_nvme_ctrlr *ctrlr)
2293875f287SBen Walker {
2303875f287SBen Walker 	return &ctrlr->opts;
2313875f287SBen Walker }
2323875f287SBen Walker 
233bb726d51SGangCao /**
234bb726d51SGangCao  * This function will be called when the process allocates the IO qpair.
235bb726d51SGangCao  * Note: the ctrlr_lock must be held when calling this function.
236bb726d51SGangCao  */
237bb726d51SGangCao static void
238bb726d51SGangCao nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
239bb726d51SGangCao {
240bb726d51SGangCao 	struct spdk_nvme_ctrlr_process	*active_proc;
241bb726d51SGangCao 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
242bb726d51SGangCao 
2431a9c19a9SSeth Howell 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
244cbd9c241SDaniel Verkamp 	if (active_proc) {
245cbd9c241SDaniel Verkamp 		TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
24631bf5d79SZiye Yang 		qpair->active_proc = active_proc;
247bb726d51SGangCao 	}
248bb726d51SGangCao }
249bb726d51SGangCao 
250bb726d51SGangCao /**
251bb726d51SGangCao  * This function will be called when the process frees the IO qpair.
252bb726d51SGangCao  * Note: the ctrlr_lock must be held when calling this function.
253bb726d51SGangCao  */
254bb726d51SGangCao static void
255bb726d51SGangCao nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
256bb726d51SGangCao {
257bb726d51SGangCao 	struct spdk_nvme_ctrlr_process	*active_proc;
258bb726d51SGangCao 	struct spdk_nvme_ctrlr		*ctrlr = qpair->ctrlr;
25948820a5eSGangCao 	struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
260bb726d51SGangCao 
2611a9c19a9SSeth Howell 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
262cbd9c241SDaniel Verkamp 	if (!active_proc) {
26348820a5eSGangCao 		return;
26448820a5eSGangCao 	}
26548820a5eSGangCao 
26648820a5eSGangCao 	TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
26748820a5eSGangCao 			   per_process_tailq, tmp_qpair) {
26848820a5eSGangCao 		if (active_qpair == qpair) {
26948820a5eSGangCao 			TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
27048820a5eSGangCao 				     active_qpair, per_process_tailq);
27148820a5eSGangCao 
272bb726d51SGangCao 			break;
273bb726d51SGangCao 		}
274bb726d51SGangCao 	}
275bb726d51SGangCao }
276bb726d51SGangCao 
277ce4fcbceSDaniel Verkamp void
278ce4fcbceSDaniel Verkamp spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
279ce4fcbceSDaniel Verkamp 		struct spdk_nvme_io_qpair_opts *opts,
280ce4fcbceSDaniel Verkamp 		size_t opts_size)
281ce4fcbceSDaniel Verkamp {
2823d0f1168SGangCao 	assert(ctrlr);
2833d0f1168SGangCao 
2843d0f1168SGangCao 	assert(opts);
285ce4fcbceSDaniel Verkamp 
286ce4fcbceSDaniel Verkamp 	memset(opts, 0, opts_size);
287*f43b7650SAnkit Kumar 	opts->opts_size = opts_size;
288ce4fcbceSDaniel Verkamp 
289ce4fcbceSDaniel Verkamp #define FIELD_OK(field) \
290ce4fcbceSDaniel Verkamp 	offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
291ce4fcbceSDaniel Verkamp 
292*f43b7650SAnkit Kumar #define SET_FIELD(field, value) \
293*f43b7650SAnkit Kumar         if (FIELD_OK(field)) { \
294*f43b7650SAnkit Kumar                 opts->field = value; \
295*f43b7650SAnkit Kumar         } \
296ce4fcbceSDaniel Verkamp 
297*f43b7650SAnkit Kumar 	SET_FIELD(qprio, SPDK_NVME_QPRIO_URGENT);
298*f43b7650SAnkit Kumar 	SET_FIELD(io_queue_size, ctrlr->opts.io_queue_size);
299*f43b7650SAnkit Kumar 	SET_FIELD(io_queue_requests, ctrlr->opts.io_queue_requests);
300*f43b7650SAnkit Kumar 	SET_FIELD(delay_cmd_submit, false);
301*f43b7650SAnkit Kumar 	SET_FIELD(sq.vaddr, NULL);
302*f43b7650SAnkit Kumar 	SET_FIELD(sq.paddr, 0);
303*f43b7650SAnkit Kumar 	SET_FIELD(sq.buffer_size, 0);
304*f43b7650SAnkit Kumar 	SET_FIELD(cq.vaddr, NULL);
305*f43b7650SAnkit Kumar 	SET_FIELD(cq.paddr, 0);
306*f43b7650SAnkit Kumar 	SET_FIELD(cq.buffer_size, 0);
307*f43b7650SAnkit Kumar 	SET_FIELD(create_only, false);
308*f43b7650SAnkit Kumar 	SET_FIELD(async_mode, false);
309*f43b7650SAnkit Kumar 	SET_FIELD(disable_pcie_sgl_merge, false);
310771f65bbSMonica Kenguva 
311ce4fcbceSDaniel Verkamp #undef FIELD_OK
312*f43b7650SAnkit Kumar #undef SET_FIELD
313*f43b7650SAnkit Kumar }
314*f43b7650SAnkit Kumar 
315*f43b7650SAnkit Kumar static void
316*f43b7650SAnkit Kumar nvme_ctrlr_io_qpair_opts_copy(struct spdk_nvme_io_qpair_opts *dst,
317*f43b7650SAnkit Kumar 			      const struct spdk_nvme_io_qpair_opts *src, size_t opts_size_src)
318*f43b7650SAnkit Kumar {
319*f43b7650SAnkit Kumar 	if (!opts_size_src) {
320*f43b7650SAnkit Kumar 		SPDK_ERRLOG("opts_size_src should not be zero value\n");
321*f43b7650SAnkit Kumar 		assert(false);
322*f43b7650SAnkit Kumar 	}
323*f43b7650SAnkit Kumar 
324*f43b7650SAnkit Kumar #define FIELD_OK(field) \
325*f43b7650SAnkit Kumar         offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(src->field) <= opts_size_src
326*f43b7650SAnkit Kumar 
327*f43b7650SAnkit Kumar #define SET_FIELD(field) \
328*f43b7650SAnkit Kumar         if (FIELD_OK(field)) { \
329*f43b7650SAnkit Kumar                 dst->field = src->field; \
330*f43b7650SAnkit Kumar         } \
331*f43b7650SAnkit Kumar 
332*f43b7650SAnkit Kumar 	SET_FIELD(qprio);
333*f43b7650SAnkit Kumar 	SET_FIELD(io_queue_size);
334*f43b7650SAnkit Kumar 	SET_FIELD(io_queue_requests);
335*f43b7650SAnkit Kumar 	SET_FIELD(delay_cmd_submit);
336*f43b7650SAnkit Kumar 	SET_FIELD(sq.vaddr);
337*f43b7650SAnkit Kumar 	SET_FIELD(sq.paddr);
338*f43b7650SAnkit Kumar 	SET_FIELD(sq.buffer_size);
339*f43b7650SAnkit Kumar 	SET_FIELD(cq.vaddr);
340*f43b7650SAnkit Kumar 	SET_FIELD(cq.paddr);
341*f43b7650SAnkit Kumar 	SET_FIELD(cq.buffer_size);
342*f43b7650SAnkit Kumar 	SET_FIELD(create_only);
343*f43b7650SAnkit Kumar 	SET_FIELD(async_mode);
344*f43b7650SAnkit Kumar 	SET_FIELD(disable_pcie_sgl_merge);
345*f43b7650SAnkit Kumar 
346*f43b7650SAnkit Kumar 	dst->opts_size = opts_size_src;
347*f43b7650SAnkit Kumar 
348*f43b7650SAnkit Kumar 	/* You should not remove this statement, but need to update the assert statement
349*f43b7650SAnkit Kumar 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
350*f43b7650SAnkit Kumar 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_io_qpair_opts) == 80, "Incorrect size");
351*f43b7650SAnkit Kumar 
352*f43b7650SAnkit Kumar #undef FIELD_OK
353*f43b7650SAnkit Kumar #undef SET_FIELD
354ce4fcbceSDaniel Verkamp }
355ce4fcbceSDaniel Verkamp 
3567b4558e3SSeth Howell static struct spdk_nvme_qpair *
3577b4558e3SSeth Howell nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
3587b4558e3SSeth Howell 			   const struct spdk_nvme_io_qpair_opts *opts)
3597b4558e3SSeth Howell {
3604e06de69SJacek Kalwas 	int32_t					qid;
3617b4558e3SSeth Howell 	struct spdk_nvme_qpair			*qpair;
3627b4558e3SSeth Howell 	union spdk_nvme_cc_register		cc;
3637b4558e3SSeth Howell 
3647b4558e3SSeth Howell 	if (!ctrlr) {
3657b4558e3SSeth Howell 		return NULL;
3667b4558e3SSeth Howell 	}
3677b4558e3SSeth Howell 
368e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
3690825befaSKonrad Sztyber 	cc.raw = ctrlr->process_init_cc.raw;
3707b4558e3SSeth Howell 
3717b4558e3SSeth Howell 	if (opts->qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
372e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
3737b4558e3SSeth Howell 		return NULL;
3747b4558e3SSeth Howell 	}
3757b4558e3SSeth Howell 
3767b4558e3SSeth Howell 	/*
3777b4558e3SSeth Howell 	 * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
3787b4558e3SSeth Howell 	 * default round robin arbitration method.
3797b4558e3SSeth Howell 	 */
3807b4558e3SSeth Howell 	if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts->qprio != SPDK_NVME_QPRIO_URGENT)) {
38101f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "invalid queue priority for default round robin arbitration method\n");
382e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
3837b4558e3SSeth Howell 		return NULL;
3847b4558e3SSeth Howell 	}
3857b4558e3SSeth Howell 
3864e06de69SJacek Kalwas 	qid = spdk_nvme_ctrlr_alloc_qid(ctrlr);
3874e06de69SJacek Kalwas 	if (qid < 0) {
388e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
3897b4558e3SSeth Howell 		return NULL;
3907b4558e3SSeth Howell 	}
3917b4558e3SSeth Howell 
3927b4558e3SSeth Howell 	qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
3937b4558e3SSeth Howell 	if (qpair == NULL) {
39401f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_create_io_qpair() failed\n");
3954e06de69SJacek Kalwas 		spdk_nvme_ctrlr_free_qid(ctrlr, qid);
396e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
3977b4558e3SSeth Howell 		return NULL;
3987b4558e3SSeth Howell 	}
3997b4558e3SSeth Howell 
4007b4558e3SSeth Howell 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
4017b4558e3SSeth Howell 
4027b4558e3SSeth Howell 	nvme_ctrlr_proc_add_io_qpair(qpair);
4037b4558e3SSeth Howell 
404e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
4057b4558e3SSeth Howell 
4067b4558e3SSeth Howell 	return qpair;
4077b4558e3SSeth Howell }
4087b4558e3SSeth Howell 
4097b4558e3SSeth Howell int
4107b4558e3SSeth Howell spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
4117b4558e3SSeth Howell {
4127b4558e3SSeth Howell 	int rc;
4137b4558e3SSeth Howell 
4149649ee09SSeth Howell 	if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
4157b4558e3SSeth Howell 		return -EISCONN;
4167b4558e3SSeth Howell 	}
4177b4558e3SSeth Howell 
418e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
4197b4558e3SSeth Howell 	rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
420e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
4217b4558e3SSeth Howell 
4227b4558e3SSeth Howell 	if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
4237b4558e3SSeth Howell 		spdk_delay_us(100);
4247b4558e3SSeth Howell 	}
4257b4558e3SSeth Howell 
4267b4558e3SSeth Howell 	return rc;
4277b4558e3SSeth Howell }
4287b4558e3SSeth Howell 
4295d9d52fdSSeth Howell void
4305d9d52fdSSeth Howell spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
4315d9d52fdSSeth Howell {
4325d9d52fdSSeth Howell 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
4335d9d52fdSSeth Howell 
434e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
4355d9d52fdSSeth Howell 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
436e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
4375d9d52fdSSeth Howell }
4385d9d52fdSSeth Howell 
43928b353a5SAnkit Kumar int
44028b353a5SAnkit Kumar spdk_nvme_ctrlr_get_admin_qp_fd(struct spdk_nvme_ctrlr *ctrlr,
44128b353a5SAnkit Kumar 				struct spdk_event_handler_opts *opts)
44228b353a5SAnkit Kumar {
44328b353a5SAnkit Kumar 	return spdk_nvme_qpair_get_fd(ctrlr->adminq, opts);
44428b353a5SAnkit Kumar }
44528b353a5SAnkit Kumar 
4463272320cSDaniel Verkamp struct spdk_nvme_qpair *
4473272320cSDaniel Verkamp spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
448ce4fcbceSDaniel Verkamp 			       const struct spdk_nvme_io_qpair_opts *user_opts,
449ce4fcbceSDaniel Verkamp 			       size_t opts_size)
4503272320cSDaniel Verkamp {
451ce4fcbceSDaniel Verkamp 
452af130056SAlexey Marchuk 	struct spdk_nvme_qpair		*qpair = NULL;
4537b4558e3SSeth Howell 	struct spdk_nvme_io_qpair_opts	opts;
4547b4558e3SSeth Howell 	int				rc;
455ce4fcbceSDaniel Verkamp 
456e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
457af130056SAlexey Marchuk 
4583c4a68caSAlexey Marchuk 	if (spdk_unlikely(ctrlr->state != NVME_CTRLR_STATE_READY)) {
4593c4a68caSAlexey Marchuk 		/* When controller is resetting or initializing, free_io_qids is deleted or not created yet.
4603c4a68caSAlexey Marchuk 		 * We can't create IO qpair in that case */
461af130056SAlexey Marchuk 		goto unlock;
4623c4a68caSAlexey Marchuk 	}
4633c4a68caSAlexey Marchuk 
464ce4fcbceSDaniel Verkamp 	/*
465ce4fcbceSDaniel Verkamp 	 * Get the default options, then overwrite them with the user-provided options
466ce4fcbceSDaniel Verkamp 	 * up to opts_size.
467ce4fcbceSDaniel Verkamp 	 *
468ce4fcbceSDaniel Verkamp 	 * This allows for extensions of the opts structure without breaking
469ce4fcbceSDaniel Verkamp 	 * ABI compatibility.
470ce4fcbceSDaniel Verkamp 	 */
471ce4fcbceSDaniel Verkamp 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
472ce4fcbceSDaniel Verkamp 	if (user_opts) {
473*f43b7650SAnkit Kumar 		nvme_ctrlr_io_qpair_opts_copy(&opts, user_opts, spdk_min(opts.opts_size, opts_size));
4748785d505SJames Bergsten 
4758785d505SJames Bergsten 		/* If user passes buffers, make sure they're big enough for the requested queue size */
4768785d505SJames Bergsten 		if (opts.sq.vaddr) {
4778785d505SJames Bergsten 			if (opts.sq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cmd))) {
47801f45ecdSGangCao 				NVME_CTRLR_ERRLOG(ctrlr, "sq buffer size %" PRIx64 " is too small for sq size %zx\n",
4798785d505SJames Bergsten 						  opts.sq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cmd)));
480af130056SAlexey Marchuk 				goto unlock;
4818785d505SJames Bergsten 			}
4828785d505SJames Bergsten 		}
4838785d505SJames Bergsten 		if (opts.cq.vaddr) {
4848785d505SJames Bergsten 			if (opts.cq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cpl))) {
48501f45ecdSGangCao 				NVME_CTRLR_ERRLOG(ctrlr, "cq buffer size %" PRIx64 " is too small for cq size %zx\n",
4868785d505SJames Bergsten 						  opts.cq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cpl)));
487af130056SAlexey Marchuk 				goto unlock;
4888785d505SJames Bergsten 			}
4898785d505SJames Bergsten 		}
490ce4fcbceSDaniel Verkamp 	}
4917991eb19SZiye Yang 
4923ab7a1f6SAnkit Kumar 	if (ctrlr->opts.enable_interrupts && opts.delay_cmd_submit) {
4933ab7a1f6SAnkit Kumar 		NVME_CTRLR_ERRLOG(ctrlr, "delay command submit cannot work with interrupts\n");
4943ab7a1f6SAnkit Kumar 		goto unlock;
4953ab7a1f6SAnkit Kumar 	}
4963ab7a1f6SAnkit Kumar 
4977b4558e3SSeth Howell 	qpair = nvme_ctrlr_create_io_qpair(ctrlr, &opts);
4987b4558e3SSeth Howell 
4997b4558e3SSeth Howell 	if (qpair == NULL || opts.create_only == true) {
500af130056SAlexey Marchuk 		goto unlock;
501179f122cSDaniel Verkamp 	}
5023272320cSDaniel Verkamp 
5037b4558e3SSeth Howell 	rc = spdk_nvme_ctrlr_connect_io_qpair(ctrlr, qpair);
5043b99ee99SSeth Howell 	if (rc != 0) {
50501f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_connect_io_qpair() failed\n");
506d92c2f11SVasuki Manikarnike 		nvme_ctrlr_proc_remove_io_qpair(qpair);
507ce83fc2aSSeth Howell 		TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
508d92c2f11SVasuki Manikarnike 		spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
5093b99ee99SSeth Howell 		nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
510af130056SAlexey Marchuk 		qpair = NULL;
511af130056SAlexey Marchuk 		goto unlock;
5123b99ee99SSeth Howell 	}
51313f8cf15SJim Harris 
514af130056SAlexey Marchuk unlock:
515e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
516af130056SAlexey Marchuk 
5173272320cSDaniel Verkamp 	return qpair;
5183272320cSDaniel Verkamp }
5193272320cSDaniel Verkamp 
5203272320cSDaniel Verkamp int
521e45b619cSSeth Howell spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
522e45b619cSSeth Howell {
523e45b619cSSeth Howell 	struct spdk_nvme_ctrlr *ctrlr;
524c9bc86c0SSeth Howell 	enum nvme_qpair_state qpair_state;
525e45b619cSSeth Howell 	int rc;
526e45b619cSSeth Howell 
527e45b619cSSeth Howell 	assert(qpair != NULL);
528e45b619cSSeth Howell 	assert(nvme_qpair_is_admin_queue(qpair) == false);
529e45b619cSSeth Howell 	assert(qpair->ctrlr != NULL);
530e45b619cSSeth Howell 
531e45b619cSSeth Howell 	ctrlr = qpair->ctrlr;
532e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
533c9bc86c0SSeth Howell 	qpair_state = nvme_qpair_get_state(qpair);
534e45b619cSSeth Howell 
535e45b619cSSeth Howell 	if (ctrlr->is_removed) {
536e45b619cSSeth Howell 		rc = -ENODEV;
537e45b619cSSeth Howell 		goto out;
538e45b619cSSeth Howell 	}
539e45b619cSSeth Howell 
540c9bc86c0SSeth Howell 	if (ctrlr->is_resetting || qpair_state == NVME_QPAIR_DISCONNECTING) {
541e45b619cSSeth Howell 		rc = -EAGAIN;
542e45b619cSSeth Howell 		goto out;
543e45b619cSSeth Howell 	}
544e45b619cSSeth Howell 
545c9bc86c0SSeth Howell 	if (ctrlr->is_failed || qpair_state == NVME_QPAIR_DESTROYING) {
546e45b619cSSeth Howell 		rc = -ENXIO;
547e45b619cSSeth Howell 		goto out;
548e45b619cSSeth Howell 	}
549e45b619cSSeth Howell 
550c9bc86c0SSeth Howell 	if (qpair_state != NVME_QPAIR_DISCONNECTED) {
551e45b619cSSeth Howell 		rc = 0;
552e45b619cSSeth Howell 		goto out;
553e45b619cSSeth Howell 	}
554e45b619cSSeth Howell 
555e45b619cSSeth Howell 	rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
556e45b619cSSeth Howell 	if (rc) {
557e45b619cSSeth Howell 		rc = -EAGAIN;
558e45b619cSSeth Howell 		goto out;
559e45b619cSSeth Howell 	}
560e45b619cSSeth Howell 
561e45b619cSSeth Howell out:
562e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
563e45b619cSSeth Howell 	return rc;
564e45b619cSSeth Howell }
565e45b619cSSeth Howell 
56624bca2eaSSeth Howell spdk_nvme_qp_failure_reason
56724bca2eaSSeth Howell spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
56824bca2eaSSeth Howell {
56924bca2eaSSeth Howell 	return ctrlr->adminq->transport_failure_reason;
57024bca2eaSSeth Howell }
57124bca2eaSSeth Howell 
572a4925ba7SSeth Howell /*
573a4925ba7SSeth Howell  * This internal function will attempt to take the controller
574a4925ba7SSeth Howell  * lock before calling disconnect on a controller qpair.
575a4925ba7SSeth Howell  * Functions already holding the controller lock should
576a4925ba7SSeth Howell  * call nvme_transport_ctrlr_disconnect_qpair directly.
577a4925ba7SSeth Howell  */
578a4925ba7SSeth Howell void
579a4925ba7SSeth Howell nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair)
580a4925ba7SSeth Howell {
581a4925ba7SSeth Howell 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
582a4925ba7SSeth Howell 
583a4925ba7SSeth Howell 	assert(ctrlr != NULL);
584e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
585a4925ba7SSeth Howell 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
586e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
587a4925ba7SSeth Howell }
588a4925ba7SSeth Howell 
589e45b619cSSeth Howell int
5903272320cSDaniel Verkamp spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
5913272320cSDaniel Verkamp {
5923272320cSDaniel Verkamp 	struct spdk_nvme_ctrlr *ctrlr;
5933272320cSDaniel Verkamp 
5943272320cSDaniel Verkamp 	if (qpair == NULL) {
5953272320cSDaniel Verkamp 		return 0;
5963272320cSDaniel Verkamp 	}
5973272320cSDaniel Verkamp 
5983272320cSDaniel Verkamp 	ctrlr = qpair->ctrlr;
5993272320cSDaniel Verkamp 
60037ccb50cSJim Harris 	if (qpair->in_completion_context) {
60137ccb50cSJim Harris 		/*
60237ccb50cSJim Harris 		 * There are many cases where it is convenient to delete an io qpair in the context
60337ccb50cSJim Harris 		 *  of that qpair's completion routine.  To handle this properly, set a flag here
60437ccb50cSJim Harris 		 *  so that the completion routine will perform an actual delete after the context
60537ccb50cSJim Harris 		 *  unwinds.
60637ccb50cSJim Harris 		 */
60737ccb50cSJim Harris 		qpair->delete_after_completion_context = 1;
60837ccb50cSJim Harris 		return 0;
60937ccb50cSJim Harris 	}
61037ccb50cSJim Harris 
6114ddd77b2SKonrad Sztyber 	if (qpair->auth.cb_fn != NULL) {
6124ddd77b2SKonrad Sztyber 		qpair->auth.cb_fn(qpair->auth.cb_ctx, -ECANCELED);
6134ddd77b2SKonrad Sztyber 		qpair->auth.cb_fn = NULL;
6144ddd77b2SKonrad Sztyber 	}
6154ddd77b2SKonrad Sztyber 
616113075beSAlexey Marchuk 	qpair->destroy_in_progress = 1;
617113075beSAlexey Marchuk 
618e021cc01SShuhei Matsumoto 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
619e021cc01SShuhei Matsumoto 
620ac31590bSChangpeng Liu 	if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
621b9a18797SSeth Howell 		spdk_nvme_poll_group_remove(qpair->poll_group->group, qpair);
622b9a18797SSeth Howell 	}
623b9a18797SSeth Howell 
6249fe50848SSeth Howell 	/* Do not retry. */
6257defb70dSSeth Howell 	nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
626751e2812SJim Harris 
627751e2812SJim Harris 	/* In the multi-process case, a process may call this function on a foreign
628751e2812SJim Harris 	 * I/O qpair (i.e. one that this process did not create) when that qpairs process
629751e2812SJim Harris 	 * exits unexpectedly.  In that case, we must not try to abort any reqs associated
630751e2812SJim Harris 	 * with that qpair, since the callbacks will also be foreign to this process.
631751e2812SJim Harris 	 */
632751e2812SJim Harris 	if (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr)) {
63349d3a5e4SShuhei Matsumoto 		nvme_qpair_abort_all_queued_reqs(qpair);
634751e2812SJim Harris 	}
635751e2812SJim Harris 
636e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
6373272320cSDaniel Verkamp 
638bb726d51SGangCao 	nvme_ctrlr_proc_remove_io_qpair(qpair);
639bb726d51SGangCao 
640c194ebd8SDaniel Verkamp 	TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
6414e06de69SJacek Kalwas 	spdk_nvme_ctrlr_free_qid(ctrlr, qpair->id);
642c194ebd8SDaniel Verkamp 
6434246e79cSJim Harris 	nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
644e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
6453272320cSDaniel Verkamp 	return 0;
6463272320cSDaniel Verkamp }
6473272320cSDaniel Verkamp 
6489945c00cSCunyin Chang static void
6496ce73aa6SDaniel Verkamp nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
6508332f9e4SDaniel Verkamp 		struct spdk_nvme_intel_log_page_directory *log_page_directory)
6519945c00cSCunyin Chang {
652a00852c1SDaniel Verkamp 	if (log_page_directory == NULL) {
6539945c00cSCunyin Chang 		return;
654a00852c1SDaniel Verkamp 	}
6559945c00cSCunyin Chang 
656988ce2ecSJim Harris 	assert(ctrlr->cdata.vid == SPDK_PCI_VID_INTEL);
657aa2970b9SCunyin Chang 
6588332f9e4SDaniel Verkamp 	ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
6599945c00cSCunyin Chang 
660aa2970b9SCunyin Chang 	if (log_page_directory->read_latency_log_len ||
6618bf37ee7SWenbo Wang 	    (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
6628332f9e4SDaniel Verkamp 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
6639945c00cSCunyin Chang 	}
664aa2970b9SCunyin Chang 	if (log_page_directory->write_latency_log_len ||
6658bf37ee7SWenbo Wang 	    (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
6668332f9e4SDaniel Verkamp 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
6679945c00cSCunyin Chang 	}
6689945c00cSCunyin Chang 	if (log_page_directory->temperature_statistics_log_len) {
6698332f9e4SDaniel Verkamp 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
6709945c00cSCunyin Chang 	}
6719945c00cSCunyin Chang 	if (log_page_directory->smart_log_len) {
6728332f9e4SDaniel Verkamp 		ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
6739945c00cSCunyin Chang 	}
67410f0f4b0SCunyin Chang 	if (log_page_directory->marketing_description_log_len) {
67510f0f4b0SCunyin Chang 		ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
67610f0f4b0SCunyin Chang 	}
6779945c00cSCunyin Chang }
6789945c00cSCunyin Chang 
679632c8d56SChangpeng Liu struct intel_log_pages_ctx {
680632c8d56SChangpeng Liu 	struct spdk_nvme_intel_log_page_directory log_page_directory;
681632c8d56SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr;
682632c8d56SChangpeng Liu };
683632c8d56SChangpeng Liu 
684632c8d56SChangpeng Liu static void
685632c8d56SChangpeng Liu nvme_ctrlr_set_intel_support_log_pages_done(void *arg, const struct spdk_nvme_cpl *cpl)
686632c8d56SChangpeng Liu {
687632c8d56SChangpeng Liu 	struct intel_log_pages_ctx *ctx = arg;
688632c8d56SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
689632c8d56SChangpeng Liu 
690632c8d56SChangpeng Liu 	if (!spdk_nvme_cpl_is_error(cpl)) {
691632c8d56SChangpeng Liu 		nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, &ctx->log_page_directory);
692632c8d56SChangpeng Liu 	}
693632c8d56SChangpeng Liu 
694632c8d56SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
695632c8d56SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
696632c8d56SChangpeng Liu 	free(ctx);
697632c8d56SChangpeng Liu }
698632c8d56SChangpeng Liu 
6998dd1cd21SBen Walker static int
7008dd1cd21SBen Walker nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
7019945c00cSCunyin Chang {
7029ad05b31SChangpeng Liu 	int rc = 0;
703632c8d56SChangpeng Liu 	struct intel_log_pages_ctx *ctx;
7049945c00cSCunyin Chang 
705632c8d56SChangpeng Liu 	ctx = calloc(1, sizeof(*ctx));
706632c8d56SChangpeng Liu 	if (!ctx) {
707632c8d56SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
708632c8d56SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
7092077fbd7SIgor Konopko 		return 0;
7109945c00cSCunyin Chang 	}
7119945c00cSCunyin Chang 
712632c8d56SChangpeng Liu 	ctx->ctrlr = ctrlr;
713632c8d56SChangpeng Liu 
714632c8d56SChangpeng Liu 	rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
715632c8d56SChangpeng Liu 					      SPDK_NVME_GLOBAL_NS_TAG, &ctx->log_page_directory,
716632c8d56SChangpeng Liu 					      sizeof(struct spdk_nvme_intel_log_page_directory),
717632c8d56SChangpeng Liu 					      0, nvme_ctrlr_set_intel_support_log_pages_done, ctx);
718632c8d56SChangpeng Liu 	if (rc != 0) {
719632c8d56SChangpeng Liu 		free(ctx);
720632c8d56SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
721632c8d56SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
722632c8d56SChangpeng Liu 		return 0;
723632c8d56SChangpeng Liu 	}
724632c8d56SChangpeng Liu 
725632c8d56SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
726632c8d56SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
727632c8d56SChangpeng Liu 
7289945c00cSCunyin Chang 	return 0;
7299945c00cSCunyin Chang }
7309945c00cSCunyin Chang 
7319ad05b31SChangpeng Liu static int
732a8ee010eSEvgeniy Kochetov nvme_ctrlr_alloc_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
733a8ee010eSEvgeniy Kochetov {
734a8ee010eSEvgeniy Kochetov 	uint32_t ana_log_page_size;
735a8ee010eSEvgeniy Kochetov 
736a8ee010eSEvgeniy Kochetov 	ana_log_page_size = sizeof(struct spdk_nvme_ana_page) + ctrlr->cdata.nanagrpid *
73784688fdbSBen Walker 			    sizeof(struct spdk_nvme_ana_group_descriptor) + ctrlr->active_ns_count *
738a8ee010eSEvgeniy Kochetov 			    sizeof(uint32_t);
739a8ee010eSEvgeniy Kochetov 
740076d14ceSEvgeniy Kochetov 	/* Number of active namespaces may have changed.
741076d14ceSEvgeniy Kochetov 	 * Check if ANA log page fits into existing buffer.
742076d14ceSEvgeniy Kochetov 	 */
743076d14ceSEvgeniy Kochetov 	if (ana_log_page_size > ctrlr->ana_log_page_size) {
744076d14ceSEvgeniy Kochetov 		void *new_buffer;
745076d14ceSEvgeniy Kochetov 
746076d14ceSEvgeniy Kochetov 		if (ctrlr->ana_log_page) {
747076d14ceSEvgeniy Kochetov 			new_buffer = realloc(ctrlr->ana_log_page, ana_log_page_size);
748076d14ceSEvgeniy Kochetov 		} else {
749076d14ceSEvgeniy Kochetov 			new_buffer = calloc(1, ana_log_page_size);
750076d14ceSEvgeniy Kochetov 		}
751076d14ceSEvgeniy Kochetov 
752076d14ceSEvgeniy Kochetov 		if (!new_buffer) {
753076d14ceSEvgeniy Kochetov 			NVME_CTRLR_ERRLOG(ctrlr, "could not allocate ANA log page buffer, size %u\n",
754076d14ceSEvgeniy Kochetov 					  ana_log_page_size);
755a8ee010eSEvgeniy Kochetov 			return -ENXIO;
756a8ee010eSEvgeniy Kochetov 		}
757a8ee010eSEvgeniy Kochetov 
758076d14ceSEvgeniy Kochetov 		ctrlr->ana_log_page = new_buffer;
759076d14ceSEvgeniy Kochetov 		if (ctrlr->copied_ana_desc) {
760076d14ceSEvgeniy Kochetov 			new_buffer = realloc(ctrlr->copied_ana_desc, ana_log_page_size);
761076d14ceSEvgeniy Kochetov 		} else {
762076d14ceSEvgeniy Kochetov 			new_buffer = calloc(1, ana_log_page_size);
763076d14ceSEvgeniy Kochetov 		}
764076d14ceSEvgeniy Kochetov 
765076d14ceSEvgeniy Kochetov 		if (!new_buffer) {
766076d14ceSEvgeniy Kochetov 			NVME_CTRLR_ERRLOG(ctrlr, "could not allocate a buffer to parse ANA descriptor, size %u\n",
767076d14ceSEvgeniy Kochetov 					  ana_log_page_size);
768a8ee010eSEvgeniy Kochetov 			return -ENOMEM;
769a8ee010eSEvgeniy Kochetov 		}
770a8ee010eSEvgeniy Kochetov 
771076d14ceSEvgeniy Kochetov 		ctrlr->copied_ana_desc = new_buffer;
772a8ee010eSEvgeniy Kochetov 		ctrlr->ana_log_page_size = ana_log_page_size;
773076d14ceSEvgeniy Kochetov 	}
774a8ee010eSEvgeniy Kochetov 
775a8ee010eSEvgeniy Kochetov 	return 0;
776a8ee010eSEvgeniy Kochetov }
777a8ee010eSEvgeniy Kochetov 
778a8ee010eSEvgeniy Kochetov static int
779a0befabdSShuhei Matsumoto nvme_ctrlr_update_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
780a0befabdSShuhei Matsumoto {
781a0befabdSShuhei Matsumoto 	struct nvme_completion_poll_status *status;
782a0befabdSShuhei Matsumoto 	int rc;
783a0befabdSShuhei Matsumoto 
784076d14ceSEvgeniy Kochetov 	rc = nvme_ctrlr_alloc_ana_log_page(ctrlr);
785076d14ceSEvgeniy Kochetov 	if (rc != 0) {
786076d14ceSEvgeniy Kochetov 		return rc;
787076d14ceSEvgeniy Kochetov 	}
788076d14ceSEvgeniy Kochetov 
789a0befabdSShuhei Matsumoto 	status = calloc(1, sizeof(*status));
790a0befabdSShuhei Matsumoto 	if (status == NULL) {
79101f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
792a0befabdSShuhei Matsumoto 		return -ENOMEM;
793a0befabdSShuhei Matsumoto 	}
794a0befabdSShuhei Matsumoto 
795a0befabdSShuhei Matsumoto 	rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS,
796a0befabdSShuhei Matsumoto 					      SPDK_NVME_GLOBAL_NS_TAG, ctrlr->ana_log_page,
797a0befabdSShuhei Matsumoto 					      ctrlr->ana_log_page_size, 0,
798a0befabdSShuhei Matsumoto 					      nvme_completion_poll_cb, status);
799a0befabdSShuhei Matsumoto 	if (rc != 0) {
800a0befabdSShuhei Matsumoto 		free(status);
801a0befabdSShuhei Matsumoto 		return rc;
802a0befabdSShuhei Matsumoto 	}
803a0befabdSShuhei Matsumoto 
804a0befabdSShuhei Matsumoto 	if (nvme_wait_for_completion_robust_lock_timeout(ctrlr->adminq, status, &ctrlr->ctrlr_lock,
805a0befabdSShuhei Matsumoto 			ctrlr->opts.admin_timeout_ms * 1000)) {
806a0befabdSShuhei Matsumoto 		if (!status->timed_out) {
807a0befabdSShuhei Matsumoto 			free(status);
808a0befabdSShuhei Matsumoto 		}
809a0befabdSShuhei Matsumoto 		return -EIO;
810a0befabdSShuhei Matsumoto 	}
811a0befabdSShuhei Matsumoto 
812a0befabdSShuhei Matsumoto 	free(status);
813a0befabdSShuhei Matsumoto 	return 0;
814a0befabdSShuhei Matsumoto }
815a0befabdSShuhei Matsumoto 
816a0befabdSShuhei Matsumoto static int
817cc2b6962SShuhei Matsumoto nvme_ctrlr_update_ns_ana_states(const struct spdk_nvme_ana_group_descriptor *desc,
818cc2b6962SShuhei Matsumoto 				void *cb_arg)
819cc2b6962SShuhei Matsumoto {
820cc2b6962SShuhei Matsumoto 	struct spdk_nvme_ctrlr *ctrlr = cb_arg;
821cc2b6962SShuhei Matsumoto 	struct spdk_nvme_ns *ns;
822cc2b6962SShuhei Matsumoto 	uint32_t i, nsid;
823cc2b6962SShuhei Matsumoto 
824cc2b6962SShuhei Matsumoto 	for (i = 0; i < desc->num_of_nsid; i++) {
825cc2b6962SShuhei Matsumoto 		nsid = desc->nsid[i];
826cc2b6962SShuhei Matsumoto 		if (nsid == 0 || nsid > ctrlr->cdata.nn) {
827cc2b6962SShuhei Matsumoto 			continue;
828cc2b6962SShuhei Matsumoto 		}
829cc2b6962SShuhei Matsumoto 
830f0e24858SEvgeniy Kochetov 		ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
831f0e24858SEvgeniy Kochetov 		assert(ns != NULL);
832cc2b6962SShuhei Matsumoto 
833cc2b6962SShuhei Matsumoto 		ns->ana_group_id = desc->ana_group_id;
834cc2b6962SShuhei Matsumoto 		ns->ana_state = desc->ana_state;
835cc2b6962SShuhei Matsumoto 	}
836cc2b6962SShuhei Matsumoto 
837cc2b6962SShuhei Matsumoto 	return 0;
838cc2b6962SShuhei Matsumoto }
839cc2b6962SShuhei Matsumoto 
8403befb518SShuhei Matsumoto int
8413befb518SShuhei Matsumoto nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
8423befb518SShuhei Matsumoto 			      spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg)
8433befb518SShuhei Matsumoto {
844a066f0c3SShuhei Matsumoto 	struct spdk_nvme_ana_group_descriptor *copied_desc;
845a066f0c3SShuhei Matsumoto 	uint8_t *orig_desc;
846a066f0c3SShuhei Matsumoto 	uint32_t i, desc_size, copy_len;
8473befb518SShuhei Matsumoto 	int rc = 0;
8483befb518SShuhei Matsumoto 
8493befb518SShuhei Matsumoto 	if (ctrlr->ana_log_page == NULL) {
8503befb518SShuhei Matsumoto 		return -EINVAL;
8513befb518SShuhei Matsumoto 	}
8523befb518SShuhei Matsumoto 
853a066f0c3SShuhei Matsumoto 	copied_desc = ctrlr->copied_ana_desc;
854a066f0c3SShuhei Matsumoto 
855a066f0c3SShuhei Matsumoto 	orig_desc = (uint8_t *)ctrlr->ana_log_page + sizeof(struct spdk_nvme_ana_page);
856a066f0c3SShuhei Matsumoto 	copy_len = ctrlr->ana_log_page_size - sizeof(struct spdk_nvme_ana_page);
8573befb518SShuhei Matsumoto 
8583befb518SShuhei Matsumoto 	for (i = 0; i < ctrlr->ana_log_page->num_ana_group_desc; i++) {
859a066f0c3SShuhei Matsumoto 		memcpy(copied_desc, orig_desc, copy_len);
860a066f0c3SShuhei Matsumoto 
861a066f0c3SShuhei Matsumoto 		rc = cb_fn(copied_desc, cb_arg);
8623befb518SShuhei Matsumoto 		if (rc != 0) {
8633befb518SShuhei Matsumoto 			break;
8643befb518SShuhei Matsumoto 		}
865a066f0c3SShuhei Matsumoto 
866a066f0c3SShuhei Matsumoto 		desc_size = sizeof(struct spdk_nvme_ana_group_descriptor) +
867a066f0c3SShuhei Matsumoto 			    copied_desc->num_of_nsid * sizeof(uint32_t);
868a066f0c3SShuhei Matsumoto 		orig_desc += desc_size;
869a066f0c3SShuhei Matsumoto 		copy_len -= desc_size;
8703befb518SShuhei Matsumoto 	}
8713befb518SShuhei Matsumoto 
8723befb518SShuhei Matsumoto 	return rc;
8733befb518SShuhei Matsumoto }
8743befb518SShuhei Matsumoto 
875a0befabdSShuhei Matsumoto static int
8766ce73aa6SDaniel Verkamp nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
8779945c00cSCunyin Chang {
8789ad05b31SChangpeng Liu 	int	rc = 0;
8799ad05b31SChangpeng Liu 
88097601bb3SDaniel Verkamp 	memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
88197601bb3SDaniel Verkamp 	/* Mandatory pages */
882ad35d6cdSDaniel Verkamp 	ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
883ad35d6cdSDaniel Verkamp 	ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
884ad35d6cdSDaniel Verkamp 	ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
8859945c00cSCunyin Chang 	if (ctrlr->cdata.lpa.celp) {
886ad35d6cdSDaniel Verkamp 		ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
8879945c00cSCunyin Chang 	}
888632c8d56SChangpeng Liu 
8896c6efee8SShuhei Matsumoto 	if (ctrlr->cdata.cmic.ana_reporting) {
890e0715c2aSShuhei Matsumoto 		ctrlr->log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] = true;
891e0715c2aSShuhei Matsumoto 		if (!ctrlr->opts.disable_read_ana_log_page) {
892673c8a65SChangpeng Liu 			rc = nvme_ctrlr_update_ana_log_page(ctrlr);
893cc2b6962SShuhei Matsumoto 			if (rc == 0) {
894cc2b6962SShuhei Matsumoto 				nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
895cc2b6962SShuhei Matsumoto 							      ctrlr);
896cc2b6962SShuhei Matsumoto 			}
8976c6efee8SShuhei Matsumoto 		}
898e0715c2aSShuhei Matsumoto 	}
8999ad05b31SChangpeng Liu 
900b4d406b7SAnkit Kumar 	if (ctrlr->cdata.ctratt.bits.fdps) {
901cc7736c9SAnkit Kumar 		ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_CONFIGURATIONS] = true;
902cc7736c9SAnkit Kumar 		ctrlr->log_page_supported[SPDK_NVME_LOG_RECLAIM_UNIT_HANDLE_USAGE] = true;
903cc7736c9SAnkit Kumar 		ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_STATISTICS] = true;
904cc7736c9SAnkit Kumar 		ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_EVENTS] = true;
905cc7736c9SAnkit Kumar 	}
906cc7736c9SAnkit Kumar 
90705dce1eeSJim Harris 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL &&
90805dce1eeSJim Harris 	    ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE &&
90905dce1eeSJim Harris 	    !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
910632c8d56SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
911632c8d56SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
912632c8d56SChangpeng Liu 
913632c8d56SChangpeng Liu 	} else {
914632c8d56SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
915632c8d56SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
916632c8d56SChangpeng Liu 
917632c8d56SChangpeng Liu 	}
918632c8d56SChangpeng Liu 
9199ad05b31SChangpeng Liu 	return rc;
9209945c00cSCunyin Chang }
9219945c00cSCunyin Chang 
92216eee6e2SCunyin Chang static void
9236ce73aa6SDaniel Verkamp nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
92416eee6e2SCunyin Chang {
9258332f9e4SDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
9268332f9e4SDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
9278332f9e4SDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
9288332f9e4SDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
9298332f9e4SDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
9308332f9e4SDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
9318332f9e4SDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
93216eee6e2SCunyin Chang }
93316eee6e2SCunyin Chang 
93416eee6e2SCunyin Chang static void
935acb9849cSChangpeng Liu nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
936acb9849cSChangpeng Liu {
937acb9849cSChangpeng Liu 	uint32_t cdw11;
9388818ace2SAlexey Marchuk 	struct nvme_completion_poll_status *status;
939acb9849cSChangpeng Liu 
940acb9849cSChangpeng Liu 	if (ctrlr->opts.arbitration_burst == 0) {
941acb9849cSChangpeng Liu 		return;
942acb9849cSChangpeng Liu 	}
943acb9849cSChangpeng Liu 
944acb9849cSChangpeng Liu 	if (ctrlr->opts.arbitration_burst > 7) {
94501f45ecdSGangCao 		NVME_CTRLR_WARNLOG(ctrlr, "Valid arbitration burst values is from 0-7\n");
946acb9849cSChangpeng Liu 		return;
947acb9849cSChangpeng Liu 	}
948acb9849cSChangpeng Liu 
94924d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
9508818ace2SAlexey Marchuk 	if (!status) {
95101f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
9528818ace2SAlexey Marchuk 		return;
9538818ace2SAlexey Marchuk 	}
9548818ace2SAlexey Marchuk 
955acb9849cSChangpeng Liu 	cdw11 = ctrlr->opts.arbitration_burst;
956acb9849cSChangpeng Liu 
957acb9849cSChangpeng Liu 	if (spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_WRR_SUPPORTED) {
958acb9849cSChangpeng Liu 		cdw11 |= (uint32_t)ctrlr->opts.low_priority_weight << 8;
959acb9849cSChangpeng Liu 		cdw11 |= (uint32_t)ctrlr->opts.medium_priority_weight << 16;
960acb9849cSChangpeng Liu 		cdw11 |= (uint32_t)ctrlr->opts.high_priority_weight << 24;
961acb9849cSChangpeng Liu 	}
962acb9849cSChangpeng Liu 
963acb9849cSChangpeng Liu 	if (spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ARBITRATION,
964acb9849cSChangpeng Liu 					    cdw11, 0, NULL, 0,
9658818ace2SAlexey Marchuk 					    nvme_completion_poll_cb, status) < 0) {
96601f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Set arbitration feature failed\n");
9678818ace2SAlexey Marchuk 		free(status);
968acb9849cSChangpeng Liu 		return;
969acb9849cSChangpeng Liu 	}
970acb9849cSChangpeng Liu 
9711a9c19a9SSeth Howell 	if (nvme_wait_for_completion_timeout(ctrlr->adminq, status,
972b3bb3a1bSSeth Howell 					     ctrlr->opts.admin_timeout_ms * 1000)) {
97301f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Timeout to set arbitration feature\n");
974acb9849cSChangpeng Liu 	}
9758818ace2SAlexey Marchuk 
9768818ace2SAlexey Marchuk 	if (!status->timed_out) {
9778818ace2SAlexey Marchuk 		free(status);
9788818ace2SAlexey Marchuk 	}
979acb9849cSChangpeng Liu }
980acb9849cSChangpeng Liu 
981acb9849cSChangpeng Liu static void
9826ce73aa6SDaniel Verkamp nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
98316eee6e2SCunyin Chang {
98416eee6e2SCunyin Chang 	memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
98516eee6e2SCunyin Chang 	/* Mandatory features */
986ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
987ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
988ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
989ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
990ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
991ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
992ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
993ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
994ad35d6cdSDaniel Verkamp 	ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
99516eee6e2SCunyin Chang 	/* Optional features */
99616eee6e2SCunyin Chang 	if (ctrlr->cdata.vwc.present) {
997ad35d6cdSDaniel Verkamp 		ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
99816eee6e2SCunyin Chang 	}
99916eee6e2SCunyin Chang 	if (ctrlr->cdata.apsta.supported) {
1000ad35d6cdSDaniel Verkamp 		ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
100116eee6e2SCunyin Chang 	}
100216eee6e2SCunyin Chang 	if (ctrlr->cdata.hmpre) {
1003ad35d6cdSDaniel Verkamp 		ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
100416eee6e2SCunyin Chang 	}
10058cb09df6SDaniel Verkamp 	if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
100616eee6e2SCunyin Chang 		nvme_ctrlr_set_intel_supported_features(ctrlr);
100716eee6e2SCunyin Chang 	}
1008acb9849cSChangpeng Liu 
1009acb9849cSChangpeng Liu 	nvme_ctrlr_set_arbitration_feature(ctrlr);
101016eee6e2SCunyin Chang }
101116eee6e2SCunyin Chang 
101260ce1414SShuhei Matsumoto static void
101360ce1414SShuhei Matsumoto nvme_ctrlr_set_host_feature_done(void *arg, const struct spdk_nvme_cpl *cpl)
101460ce1414SShuhei Matsumoto {
101560ce1414SShuhei Matsumoto 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
101660ce1414SShuhei Matsumoto 
101760ce1414SShuhei Matsumoto 	spdk_free(ctrlr->tmp_ptr);
101860ce1414SShuhei Matsumoto 	ctrlr->tmp_ptr = NULL;
101960ce1414SShuhei Matsumoto 
102060ce1414SShuhei Matsumoto 	if (spdk_nvme_cpl_is_error(cpl)) {
102160ce1414SShuhei Matsumoto 		NVME_CTRLR_ERRLOG(ctrlr, "Set host behavior support feature failed: SC %x SCT %x\n",
102260ce1414SShuhei Matsumoto 				  cpl->status.sc, cpl->status.sct);
102360ce1414SShuhei Matsumoto 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
102460ce1414SShuhei Matsumoto 		return;
102560ce1414SShuhei Matsumoto 	}
102660ce1414SShuhei Matsumoto 
102760ce1414SShuhei Matsumoto 	ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT] = true;
102860ce1414SShuhei Matsumoto 
102960ce1414SShuhei Matsumoto 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
103060ce1414SShuhei Matsumoto 			     ctrlr->opts.admin_timeout_ms);
103160ce1414SShuhei Matsumoto }
103260ce1414SShuhei Matsumoto 
103360ce1414SShuhei Matsumoto /* We do not want to do add synchronous operation anymore.
103460ce1414SShuhei Matsumoto  * We set the Host Behavior Support feature asynchronousin in different states.
103560ce1414SShuhei Matsumoto  */
103660ce1414SShuhei Matsumoto static int
103760ce1414SShuhei Matsumoto nvme_ctrlr_set_host_feature(struct spdk_nvme_ctrlr *ctrlr)
103860ce1414SShuhei Matsumoto {
103960ce1414SShuhei Matsumoto 	struct spdk_nvme_host_behavior *host;
104060ce1414SShuhei Matsumoto 	int rc;
104160ce1414SShuhei Matsumoto 
104260ce1414SShuhei Matsumoto 	if (!ctrlr->cdata.ctratt.bits.elbas) {
104360ce1414SShuhei Matsumoto 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
104460ce1414SShuhei Matsumoto 				     ctrlr->opts.admin_timeout_ms);
104560ce1414SShuhei Matsumoto 		return 0;
104660ce1414SShuhei Matsumoto 	}
104760ce1414SShuhei Matsumoto 
104860ce1414SShuhei Matsumoto 	ctrlr->tmp_ptr = spdk_dma_zmalloc(sizeof(struct spdk_nvme_host_behavior), 4096, NULL);
104960ce1414SShuhei Matsumoto 	if (!ctrlr->tmp_ptr) {
105060ce1414SShuhei Matsumoto 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate host behavior support data\n");
105160ce1414SShuhei Matsumoto 		rc = -ENOMEM;
105260ce1414SShuhei Matsumoto 		goto error;
105360ce1414SShuhei Matsumoto 	}
105460ce1414SShuhei Matsumoto 
105560ce1414SShuhei Matsumoto 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE,
105660ce1414SShuhei Matsumoto 			     ctrlr->opts.admin_timeout_ms);
105760ce1414SShuhei Matsumoto 
105860ce1414SShuhei Matsumoto 	host = ctrlr->tmp_ptr;
105960ce1414SShuhei Matsumoto 
106060ce1414SShuhei Matsumoto 	host->lbafee = 1;
106160ce1414SShuhei Matsumoto 
106260ce1414SShuhei Matsumoto 	rc = spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT,
106360ce1414SShuhei Matsumoto 					     0, 0, host, sizeof(struct spdk_nvme_host_behavior),
106460ce1414SShuhei Matsumoto 					     nvme_ctrlr_set_host_feature_done, ctrlr);
106560ce1414SShuhei Matsumoto 	if (rc != 0) {
106660ce1414SShuhei Matsumoto 		NVME_CTRLR_ERRLOG(ctrlr, "Set host behavior support feature failed: %d\n", rc);
106760ce1414SShuhei Matsumoto 		goto error;
106860ce1414SShuhei Matsumoto 	}
106960ce1414SShuhei Matsumoto 
107060ce1414SShuhei Matsumoto 	return 0;
107160ce1414SShuhei Matsumoto 
107260ce1414SShuhei Matsumoto error:
107360ce1414SShuhei Matsumoto 	spdk_free(ctrlr->tmp_ptr);
107460ce1414SShuhei Matsumoto 	ctrlr->tmp_ptr = NULL;
107560ce1414SShuhei Matsumoto 
107660ce1414SShuhei Matsumoto 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
107760ce1414SShuhei Matsumoto 	return rc;
107860ce1414SShuhei Matsumoto }
107960ce1414SShuhei Matsumoto 
10807d3771f9SSeth Howell bool
10817d3771f9SSeth Howell spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
10827d3771f9SSeth Howell {
10837d3771f9SSeth Howell 	return ctrlr->is_failed;
10847d3771f9SSeth Howell }
10857d3771f9SSeth Howell 
1086e8c63cddSCunyin Chang void
1087e8c63cddSCunyin Chang nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
10881010fb3aSDaniel Verkamp {
10895f3e922aSCunyin Chang 	/*
10905f3e922aSCunyin Chang 	 * Set the flag here and leave the work failure of qpairs to
10915f3e922aSCunyin Chang 	 * spdk_nvme_qpair_process_completions().
10925f3e922aSCunyin Chang 	 */
1093e8c63cddSCunyin Chang 	if (hot_remove) {
1094e8c63cddSCunyin Chang 		ctrlr->is_removed = true;
1095e8c63cddSCunyin Chang 	}
1096974c70e6SShuhei Matsumoto 
1097974c70e6SShuhei Matsumoto 	if (ctrlr->is_failed) {
109801f45ecdSGangCao 		NVME_CTRLR_NOTICELOG(ctrlr, "already in failed state\n");
1099974c70e6SShuhei Matsumoto 		return;
1100974c70e6SShuhei Matsumoto 	}
1101974c70e6SShuhei Matsumoto 
1102df7c2a22SShuhei Matsumoto 	if (ctrlr->is_disconnecting) {
1103df7c2a22SShuhei Matsumoto 		NVME_CTRLR_DEBUGLOG(ctrlr, "already disconnecting\n");
1104df7c2a22SShuhei Matsumoto 		return;
1105df7c2a22SShuhei Matsumoto 	}
1106df7c2a22SShuhei Matsumoto 
11071010fb3aSDaniel Verkamp 	ctrlr->is_failed = true;
1108a58416abSJim Harris 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
1109300583e2SSeth Howell 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
111001f45ecdSGangCao 	NVME_CTRLR_ERRLOG(ctrlr, "in failed state.\n");
1111db3fda2eSCunyin Chang }
11121010fb3aSDaniel Verkamp 
11137d3771f9SSeth Howell /**
11147d3771f9SSeth Howell  * This public API function will try to take the controller lock.
11157d3771f9SSeth Howell  * Any private functions being called from a thread already holding
11167d3771f9SSeth Howell  * the ctrlr lock should call nvme_ctrlr_fail directly.
11177d3771f9SSeth Howell  */
11187d3771f9SSeth Howell void
11197d3771f9SSeth Howell spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
11207d3771f9SSeth Howell {
1121e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
11227d3771f9SSeth Howell 	nvme_ctrlr_fail(ctrlr, false);
1123e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
11247d3771f9SSeth Howell }
11257d3771f9SSeth Howell 
11261010fb3aSDaniel Verkamp static void
1127b6ecc372SKonrad Sztyber nvme_ctrlr_shutdown_set_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
11281010fb3aSDaniel Verkamp {
1129b6ecc372SKonrad Sztyber 	struct nvme_ctrlr_detach_ctx *ctx = _ctx;
1130b6ecc372SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
11311010fb3aSDaniel Verkamp 
1132b6ecc372SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
1133b6ecc372SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
11343806b2e1SShuhei Matsumoto 		ctx->shutdown_complete = true;
1135179f122cSDaniel Verkamp 		return;
1136179f122cSDaniel Verkamp 	}
1137179f122cSDaniel Verkamp 
1138c1f60541SKonrad Sztyber 	if (ctrlr->opts.no_shn_notification) {
1139c1f60541SKonrad Sztyber 		ctx->shutdown_complete = true;
1140c1f60541SKonrad Sztyber 		return;
1141c1f60541SKonrad Sztyber 	}
1142c1f60541SKonrad Sztyber 
11431010fb3aSDaniel Verkamp 	/*
1144acb9d248SDaniel Verkamp 	 * The NVMe specification defines RTD3E to be the time between
1145acb9d248SDaniel Verkamp 	 *  setting SHN = 1 until the controller will set SHST = 10b.
1146ffee381dSDaniel Verkamp 	 * If the device doesn't report RTD3 entry latency, or if it
1147ffee381dSDaniel Verkamp 	 *  reports RTD3 entry latency less than 10 seconds, pick
1148ffee381dSDaniel Verkamp 	 *  10 seconds as a reasonable amount of time to
11491010fb3aSDaniel Verkamp 	 *  wait before proceeding.
11501010fb3aSDaniel Verkamp 	 */
115101f45ecdSGangCao 	NVME_CTRLR_DEBUGLOG(ctrlr, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
1152257fcb73SShuhei Matsumoto 	ctx->shutdown_timeout_ms = SPDK_CEIL_DIV(ctrlr->cdata.rtd3e, 1000);
1153257fcb73SShuhei Matsumoto 	ctx->shutdown_timeout_ms = spdk_max(ctx->shutdown_timeout_ms, 10000);
115401f45ecdSGangCao 	NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown timeout = %" PRIu32 " ms\n", ctx->shutdown_timeout_ms);
1155acb9d248SDaniel Verkamp 
1156257fcb73SShuhei Matsumoto 	ctx->shutdown_start_tsc = spdk_get_ticks();
1157b6ecc372SKonrad Sztyber 	ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
1158b6ecc372SKonrad Sztyber }
1159b6ecc372SKonrad Sztyber 
1160b6ecc372SKonrad Sztyber static void
1161b6ecc372SKonrad Sztyber nvme_ctrlr_shutdown_get_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
1162b6ecc372SKonrad Sztyber {
1163b6ecc372SKonrad Sztyber 	struct nvme_ctrlr_detach_ctx *ctx = _ctx;
1164b6ecc372SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
1165b6ecc372SKonrad Sztyber 	union spdk_nvme_cc_register cc;
1166b6ecc372SKonrad Sztyber 	int rc;
1167b6ecc372SKonrad Sztyber 
1168b6ecc372SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
1169b6ecc372SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
1170b6ecc372SKonrad Sztyber 		ctx->shutdown_complete = true;
1171b6ecc372SKonrad Sztyber 		return;
1172b6ecc372SKonrad Sztyber 	}
1173b6ecc372SKonrad Sztyber 
1174b6ecc372SKonrad Sztyber 	assert(value <= UINT32_MAX);
1175b6ecc372SKonrad Sztyber 	cc.raw = (uint32_t)value;
1176c1f60541SKonrad Sztyber 
1177c1f60541SKonrad Sztyber 	if (ctrlr->opts.no_shn_notification) {
1178c1f60541SKonrad Sztyber 		NVME_CTRLR_INFOLOG(ctrlr, "Disable SSD without shutdown notification\n");
1179c1f60541SKonrad Sztyber 		if (cc.bits.en == 0) {
1180c1f60541SKonrad Sztyber 			ctx->shutdown_complete = true;
1181c1f60541SKonrad Sztyber 			return;
1182c1f60541SKonrad Sztyber 		}
1183c1f60541SKonrad Sztyber 
1184c1f60541SKonrad Sztyber 		cc.bits.en = 0;
1185c1f60541SKonrad Sztyber 	} else {
1186b6ecc372SKonrad Sztyber 		cc.bits.shn = SPDK_NVME_SHN_NORMAL;
1187c1f60541SKonrad Sztyber 	}
1188b6ecc372SKonrad Sztyber 
1189b6ecc372SKonrad Sztyber 	rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_shutdown_set_cc_done, ctx);
1190b6ecc372SKonrad Sztyber 	if (rc != 0) {
1191b6ecc372SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
1192b6ecc372SKonrad Sztyber 		ctx->shutdown_complete = true;
1193b6ecc372SKonrad Sztyber 	}
1194b6ecc372SKonrad Sztyber }
1195b6ecc372SKonrad Sztyber 
1196b6ecc372SKonrad Sztyber static void
1197b6ecc372SKonrad Sztyber nvme_ctrlr_shutdown_async(struct spdk_nvme_ctrlr *ctrlr,
1198b6ecc372SKonrad Sztyber 			  struct nvme_ctrlr_detach_ctx *ctx)
1199b6ecc372SKonrad Sztyber {
1200b6ecc372SKonrad Sztyber 	int rc;
1201b6ecc372SKonrad Sztyber 
1202b6ecc372SKonrad Sztyber 	if (ctrlr->is_removed) {
1203b6ecc372SKonrad Sztyber 		ctx->shutdown_complete = true;
1204b6ecc372SKonrad Sztyber 		return;
1205b6ecc372SKonrad Sztyber 	}
1206b6ecc372SKonrad Sztyber 
120797cc1177SShuhei Matsumoto 	if (ctrlr->adminq == NULL ||
120897cc1177SShuhei Matsumoto 	    ctrlr->adminq->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
120997cc1177SShuhei Matsumoto 		NVME_CTRLR_INFOLOG(ctrlr, "Adminq is not connected.\n");
121097cc1177SShuhei Matsumoto 		ctx->shutdown_complete = true;
121197cc1177SShuhei Matsumoto 		return;
121297cc1177SShuhei Matsumoto 	}
121397cc1177SShuhei Matsumoto 
1214b6ecc372SKonrad Sztyber 	ctx->state = NVME_CTRLR_DETACH_SET_CC;
1215b6ecc372SKonrad Sztyber 	rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_shutdown_get_cc_done, ctx);
1216b6ecc372SKonrad Sztyber 	if (rc != 0) {
1217b6ecc372SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
1218b6ecc372SKonrad Sztyber 		ctx->shutdown_complete = true;
1219b6ecc372SKonrad Sztyber 	}
1220b6ecc372SKonrad Sztyber }
1221b6ecc372SKonrad Sztyber 
1222b6ecc372SKonrad Sztyber static void
1223b6ecc372SKonrad Sztyber nvme_ctrlr_shutdown_get_csts_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
1224b6ecc372SKonrad Sztyber {
1225b6ecc372SKonrad Sztyber 	struct nvme_ctrlr_detach_ctx *ctx = _ctx;
1226b6ecc372SKonrad Sztyber 
1227b6ecc372SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
1228b6ecc372SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctx->ctrlr, "Failed to read the CSTS register\n");
1229b6ecc372SKonrad Sztyber 		ctx->shutdown_complete = true;
1230b6ecc372SKonrad Sztyber 		return;
1231b6ecc372SKonrad Sztyber 	}
1232b6ecc372SKonrad Sztyber 
1233b6ecc372SKonrad Sztyber 	assert(value <= UINT32_MAX);
1234b6ecc372SKonrad Sztyber 	ctx->csts.raw = (uint32_t)value;
1235b6ecc372SKonrad Sztyber 	ctx->state = NVME_CTRLR_DETACH_GET_CSTS_DONE;
1236257fcb73SShuhei Matsumoto }
1237257fcb73SShuhei Matsumoto 
1238257fcb73SShuhei Matsumoto static int
1239257fcb73SShuhei Matsumoto nvme_ctrlr_shutdown_poll_async(struct spdk_nvme_ctrlr *ctrlr,
1240257fcb73SShuhei Matsumoto 			       struct nvme_ctrlr_detach_ctx *ctx)
1241257fcb73SShuhei Matsumoto {
1242257fcb73SShuhei Matsumoto 	union spdk_nvme_csts_register	csts;
1243257fcb73SShuhei Matsumoto 	uint32_t			ms_waited;
1244257fcb73SShuhei Matsumoto 
1245b6ecc372SKonrad Sztyber 	switch (ctx->state) {
1246b6ecc372SKonrad Sztyber 	case NVME_CTRLR_DETACH_SET_CC:
1247b6ecc372SKonrad Sztyber 	case NVME_CTRLR_DETACH_GET_CSTS:
1248b6ecc372SKonrad Sztyber 		/* We're still waiting for the register operation to complete */
1249b6ecc372SKonrad Sztyber 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
1250b6ecc372SKonrad Sztyber 		return -EAGAIN;
1251257fcb73SShuhei Matsumoto 
1252b6ecc372SKonrad Sztyber 	case NVME_CTRLR_DETACH_CHECK_CSTS:
1253b6ecc372SKonrad Sztyber 		ctx->state = NVME_CTRLR_DETACH_GET_CSTS;
1254b6ecc372SKonrad Sztyber 		if (nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_shutdown_get_csts_done, ctx)) {
1255b6ecc372SKonrad Sztyber 			NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
1256257fcb73SShuhei Matsumoto 			return -EIO;
12571010fb3aSDaniel Verkamp 		}
1258b6ecc372SKonrad Sztyber 		return -EAGAIN;
1259b6ecc372SKonrad Sztyber 
1260b6ecc372SKonrad Sztyber 	case NVME_CTRLR_DETACH_GET_CSTS_DONE:
1261b6ecc372SKonrad Sztyber 		ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
1262b6ecc372SKonrad Sztyber 		break;
1263b6ecc372SKonrad Sztyber 
1264b6ecc372SKonrad Sztyber 	default:
1265b6ecc372SKonrad Sztyber 		assert(0 && "Should never happen");
1266b6ecc372SKonrad Sztyber 		return -EINVAL;
1267b6ecc372SKonrad Sztyber 	}
1268b6ecc372SKonrad Sztyber 
1269b6ecc372SKonrad Sztyber 	ms_waited = (spdk_get_ticks() - ctx->shutdown_start_tsc) * 1000 / spdk_get_ticks_hz();
1270b6ecc372SKonrad Sztyber 	csts.raw = ctx->csts.raw;
1271179f122cSDaniel Verkamp 
1272179f122cSDaniel Verkamp 	if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
127301f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown complete in %u milliseconds\n", ms_waited);
1274257fcb73SShuhei Matsumoto 		return 0;
1275179f122cSDaniel Verkamp 	}
1276179f122cSDaniel Verkamp 
1277257fcb73SShuhei Matsumoto 	if (ms_waited < ctx->shutdown_timeout_ms) {
1278257fcb73SShuhei Matsumoto 		return -EAGAIN;
1279257fcb73SShuhei Matsumoto 	}
1280179f122cSDaniel Verkamp 
128101f45ecdSGangCao 	NVME_CTRLR_ERRLOG(ctrlr, "did not shutdown within %u milliseconds\n",
128201f45ecdSGangCao 			  ctx->shutdown_timeout_ms);
1283518c8addSJim Harris 	if (ctrlr->quirks & NVME_QUIRK_SHST_COMPLETE) {
128401f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "likely due to shutdown handling in the VMWare emulated NVMe SSD\n");
1285518c8addSJim Harris 	}
1286257fcb73SShuhei Matsumoto 
1287257fcb73SShuhei Matsumoto 	return 0;
12881010fb3aSDaniel Verkamp }
12891010fb3aSDaniel Verkamp 
129038091aadSKonrad Sztyber static inline uint64_t
129138091aadSKonrad Sztyber nvme_ctrlr_get_ready_timeout(struct spdk_nvme_ctrlr *ctrlr)
129238091aadSKonrad Sztyber {
129338091aadSKonrad Sztyber 	return ctrlr->cap.bits.to * 500;
129438091aadSKonrad Sztyber }
129538091aadSKonrad Sztyber 
129673050d51SKonrad Sztyber static void
129773050d51SKonrad Sztyber nvme_ctrlr_set_cc_en_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
129873050d51SKonrad Sztyber {
129973050d51SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
130073050d51SKonrad Sztyber 
130173050d51SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
130273050d51SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to set the CC register\n");
130373050d51SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
130473050d51SKonrad Sztyber 		return;
130573050d51SKonrad Sztyber 	}
130673050d51SKonrad Sztyber 
130773050d51SKonrad Sztyber 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
130873050d51SKonrad Sztyber 			     nvme_ctrlr_get_ready_timeout(ctrlr));
130973050d51SKonrad Sztyber }
131073050d51SKonrad Sztyber 
13111010fb3aSDaniel Verkamp static int
13126ce73aa6SDaniel Verkamp nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
13131010fb3aSDaniel Verkamp {
1314ad35d6cdSDaniel Verkamp 	union spdk_nvme_cc_register	cc;
1315ff386978SDaniel Verkamp 	int				rc;
1316ff386978SDaniel Verkamp 
13171ffec5d5SDaniel Verkamp 	rc = nvme_transport_ctrlr_enable(ctrlr);
1318ff386978SDaniel Verkamp 	if (rc != 0) {
131901f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "transport ctrlr_enable failed\n");
1320ff386978SDaniel Verkamp 		return rc;
1321ff386978SDaniel Verkamp 	}
1322179f122cSDaniel Verkamp 
132373050d51SKonrad Sztyber 	cc.raw = ctrlr->process_init_cc.raw;
132420abbe8aSDaniel Verkamp 	if (cc.bits.en != 0) {
132501f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "called with CC.EN = 1\n");
1326f0b20026SMinfei Huang 		return -EINVAL;
13271010fb3aSDaniel Verkamp 	}
13281010fb3aSDaniel Verkamp 
13291010fb3aSDaniel Verkamp 	cc.bits.en = 1;
13301010fb3aSDaniel Verkamp 	cc.bits.css = 0;
13311010fb3aSDaniel Verkamp 	cc.bits.shn = 0;
13321010fb3aSDaniel Verkamp 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
13331010fb3aSDaniel Verkamp 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
13341010fb3aSDaniel Verkamp 
13351010fb3aSDaniel Verkamp 	/* Page size is 2 ^ (12 + mps). */
13362eec131eSDaniel Verkamp 	cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
13371010fb3aSDaniel Verkamp 
1338198a3ad2SNiklas Cassel 	/*
1339198a3ad2SNiklas Cassel 	 * Since NVMe 1.0, a controller should have at least one bit set in CAP.CSS.
1340198a3ad2SNiklas Cassel 	 * A controller that does not have any bit set in CAP.CSS is not spec compliant.
1341198a3ad2SNiklas Cassel 	 * Try to support such a controller regardless.
1342198a3ad2SNiklas Cassel 	 */
134399c8c6d8SBen Walker 	if (ctrlr->cap.bits.css == 0) {
134401f45ecdSGangCao 		NVME_CTRLR_INFOLOG(ctrlr, "Drive reports no command sets supported. Assuming NVM is supported.\n");
134599c8c6d8SBen Walker 		ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
134699c8c6d8SBen Walker 	}
134799c8c6d8SBen Walker 
1348198a3ad2SNiklas Cassel 	/*
1349198a3ad2SNiklas Cassel 	 * If the user did not explicitly request a command set, or supplied a value larger than
1350198a3ad2SNiklas Cassel 	 * what can be saved in CC.CSS, use the most reasonable default.
1351198a3ad2SNiklas Cassel 	 */
1352198a3ad2SNiklas Cassel 	if (ctrlr->opts.command_set >= CHAR_BIT) {
1353198a3ad2SNiklas Cassel 		if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS) {
1354198a3ad2SNiklas Cassel 			ctrlr->opts.command_set = SPDK_NVME_CC_CSS_IOCS;
1355198a3ad2SNiklas Cassel 		} else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NVM) {
1356198a3ad2SNiklas Cassel 			ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
1357198a3ad2SNiklas Cassel 		} else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NOIO) {
1358f41dbc23SJim Harris 			/* Technically we should respond with CC_CSS_NOIO in
1359f41dbc23SJim Harris 			 * this case, but we use NVM instead to work around
1360f41dbc23SJim Harris 			 * buggy targets and to match Linux driver behavior.
1361f41dbc23SJim Harris 			 */
1362f41dbc23SJim Harris 			ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
1363198a3ad2SNiklas Cassel 		} else {
1364198a3ad2SNiklas Cassel 			/* Invalid supported bits detected, falling back to NVM. */
1365198a3ad2SNiklas Cassel 			ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
1366198a3ad2SNiklas Cassel 		}
1367198a3ad2SNiklas Cassel 	}
1368198a3ad2SNiklas Cassel 
1369198a3ad2SNiklas Cassel 	/* Verify that the selected command set is supported by the controller. */
137099c8c6d8SBen Walker 	if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
137101f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Requested I/O command set %u but supported mask is 0x%x\n",
137299c8c6d8SBen Walker 				    ctrlr->opts.command_set, ctrlr->cap.bits.css);
137301f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Falling back to NVM. Assuming NVM is supported.\n");
1374ae6c40a4SSimon A. F. Lund 		ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
137599c8c6d8SBen Walker 	}
137699c8c6d8SBen Walker 
137799c8c6d8SBen Walker 	cc.bits.css = ctrlr->opts.command_set;
137899c8c6d8SBen Walker 
13797991eb19SZiye Yang 	switch (ctrlr->opts.arb_mechanism) {
13807991eb19SZiye Yang 	case SPDK_NVME_CC_AMS_RR:
13817991eb19SZiye Yang 		break;
13827991eb19SZiye Yang 	case SPDK_NVME_CC_AMS_WRR:
138376469b2cSDaniel Verkamp 		if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
13847991eb19SZiye Yang 			break;
13857991eb19SZiye Yang 		}
13867991eb19SZiye Yang 		return -EINVAL;
13877991eb19SZiye Yang 	case SPDK_NVME_CC_AMS_VS:
138876469b2cSDaniel Verkamp 		if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
13897991eb19SZiye Yang 			break;
13907991eb19SZiye Yang 		}
13917991eb19SZiye Yang 		return -EINVAL;
13927991eb19SZiye Yang 	default:
13937991eb19SZiye Yang 		return -EINVAL;
13947991eb19SZiye Yang 	}
13957991eb19SZiye Yang 
13967991eb19SZiye Yang 	cc.bits.ams = ctrlr->opts.arb_mechanism;
139773050d51SKonrad Sztyber 	ctrlr->process_init_cc.raw = cc.raw;
13987991eb19SZiye Yang 
139973050d51SKonrad Sztyber 	if (nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_set_cc_en_done, ctrlr)) {
140001f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
1401179f122cSDaniel Verkamp 		return -EIO;
1402179f122cSDaniel Verkamp 	}
14031010fb3aSDaniel Verkamp 
140420abbe8aSDaniel Verkamp 	return 0;
14051010fb3aSDaniel Verkamp }
14061010fb3aSDaniel Verkamp 
1407bdf98230SDaniel Verkamp static const char *
1408bdf98230SDaniel Verkamp nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
1409bdf98230SDaniel Verkamp {
1410bdf98230SDaniel Verkamp 	switch (state) {
1411951bb3a4SDarek Stojaczyk 	case NVME_CTRLR_STATE_INIT_DELAY:
1412951bb3a4SDarek Stojaczyk 		return "delay init";
1413d6f6ffd2SJim Harris 	case NVME_CTRLR_STATE_CONNECT_ADMINQ:
1414d6f6ffd2SJim Harris 		return "connect adminq";
141537c36ec1SKonrad Sztyber 	case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
141637c36ec1SKonrad Sztyber 		return "wait for connect adminq";
1417df01076fSJim Harris 	case NVME_CTRLR_STATE_READ_VS:
1418df01076fSJim Harris 		return "read vs";
141909acc162SKonrad Sztyber 	case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
142009acc162SKonrad Sztyber 		return "read vs wait for vs";
1421f5ba8a5eSJim Harris 	case NVME_CTRLR_STATE_READ_CAP:
1422f5ba8a5eSJim Harris 		return "read cap";
14239d8251f6SKonrad Sztyber 	case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
14249d8251f6SKonrad Sztyber 		return "read cap wait for cap";
1425a12cf26cSJim Harris 	case NVME_CTRLR_STATE_CHECK_EN:
1426a12cf26cSJim Harris 		return "check en";
1427ccc084f3SKonrad Sztyber 	case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
1428ccc084f3SKonrad Sztyber 		return "check en wait for cc";
1429bdf98230SDaniel Verkamp 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
1430bdf98230SDaniel Verkamp 		return "disable and wait for CSTS.RDY = 1";
14318da3c166SKonrad Sztyber 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
14328da3c166SKonrad Sztyber 		return "disable and wait for CSTS.RDY = 1 reg";
1433fc8d8618SJim Harris 	case NVME_CTRLR_STATE_SET_EN_0:
1434fc8d8618SJim Harris 		return "set CC.EN = 0";
14355f376485SKonrad Sztyber 	case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
14365f376485SKonrad Sztyber 		return "set CC.EN = 0 wait for cc";
1437bdf98230SDaniel Verkamp 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
1438bdf98230SDaniel Verkamp 		return "disable and wait for CSTS.RDY = 0";
14398da3c166SKonrad Sztyber 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
14408da3c166SKonrad Sztyber 		return "disable and wait for CSTS.RDY = 0 reg";
14415e5423deSShuhei Matsumoto 	case NVME_CTRLR_STATE_DISABLED:
14425e5423deSShuhei Matsumoto 		return "controller is disabled";
144336a793adSBen Walker 	case NVME_CTRLR_STATE_ENABLE:
144436a793adSBen Walker 		return "enable controller by writing CC.EN = 1";
144573050d51SKonrad Sztyber 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
144673050d51SKonrad Sztyber 		return "enable controller by writing CC.EN = 1 reg";
1447bdf98230SDaniel Verkamp 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
144836a793adSBen Walker 		return "wait for CSTS.RDY = 1";
14499e216680SKonrad Sztyber 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
14509e216680SKonrad Sztyber 		return "wait for CSTS.RDY = 1 reg";
14515cd76349SSeth Howell 	case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
14525cd76349SSeth Howell 		return "reset admin queue";
1453eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_IDENTIFY:
1454eb5cb3dbSChangpeng Liu 		return "identify controller";
1455a61aff77SChangpeng Liu 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
1456a61aff77SChangpeng Liu 		return "wait for identify controller";
14577e68d0baSJim Harris 	case NVME_CTRLR_STATE_CONFIGURE_AER:
14587e68d0baSJim Harris 		return "configure AER";
14597e68d0baSJim Harris 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
14607e68d0baSJim Harris 		return "wait for configure aer";
14611c083e62SJim Harris 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
14621c083e62SJim Harris 		return "set keep alive timeout";
14631c083e62SJim Harris 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
14641c083e62SJim Harris 		return "wait for set keep alive timeout";
146564563adaSNiklas Cassel 	case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
146664563adaSNiklas Cassel 		return "identify controller iocs specific";
146764563adaSNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
146864563adaSNiklas Cassel 		return "wait for identify controller iocs specific";
146938d59d8bSNiklas Cassel 	case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
147038d59d8bSNiklas Cassel 		return "get zns cmd and effects log page";
147138d59d8bSNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
147238d59d8bSNiklas Cassel 		return "wait for get zns cmd and effects log page";
14738b95dbabSChangpeng Liu 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
1474eb5cb3dbSChangpeng Liu 		return "set number of queues";
147538a396d9SChangpeng Liu 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
147638a396d9SChangpeng Liu 		return "wait for set number of queues";
1477d9ecb572SChangpeng Liu 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
1478d9ecb572SChangpeng Liu 		return "identify active ns";
147955e0ec89SJacek Kalwas 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
148055e0ec89SJacek Kalwas 		return "wait for identify active ns";
1481d9ecb572SChangpeng Liu 	case NVME_CTRLR_STATE_IDENTIFY_NS:
1482d9ecb572SChangpeng Liu 		return "identify ns";
148392bf76c9SChangpeng Liu 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
148492bf76c9SChangpeng Liu 		return "wait for identify ns";
1485d9ecb572SChangpeng Liu 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
1486a2fdc4ddSChangpeng Liu 		return "identify namespace id descriptors";
1487a2fdc4ddSChangpeng Liu 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
1488a2fdc4ddSChangpeng Liu 		return "wait for identify namespace id descriptors";
1489c4d1b7d5SNiklas Cassel 	case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
1490c4d1b7d5SNiklas Cassel 		return "identify ns iocs specific";
1491c4d1b7d5SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
1492c4d1b7d5SNiklas Cassel 		return "wait for identify ns iocs specific";
1493eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
1494eb5cb3dbSChangpeng Liu 		return "set supported log pages";
1495632c8d56SChangpeng Liu 	case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
1496632c8d56SChangpeng Liu 		return "set supported INTEL log pages";
1497632c8d56SChangpeng Liu 	case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
1498632c8d56SChangpeng Liu 		return "wait for supported INTEL log pages";
1499eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
1500eb5cb3dbSChangpeng Liu 		return "set supported features";
150160ce1414SShuhei Matsumoto 	case NVME_CTRLR_STATE_SET_HOST_FEATURE:
150260ce1414SShuhei Matsumoto 		return "set host behavior support feature";
150360ce1414SShuhei Matsumoto 	case NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE:
150460ce1414SShuhei Matsumoto 		return "wait for set host behavior support feature";
1505eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
1506eb5cb3dbSChangpeng Liu 		return "set doorbell buffer config";
15075a028860SChangpeng Liu 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
15085a028860SChangpeng Liu 		return "wait for doorbell buffer config";
1509eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_SET_HOST_ID:
1510eb5cb3dbSChangpeng Liu 		return "set host ID";
151198b19709SChangpeng Liu 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
151298b19709SChangpeng Liu 		return "wait for set host ID";
15133dd0bc9eSEvgeniy Kochetov 	case NVME_CTRLR_STATE_TRANSPORT_READY:
15143dd0bc9eSEvgeniy Kochetov 		return "transport ready";
1515bdf98230SDaniel Verkamp 	case NVME_CTRLR_STATE_READY:
1516bdf98230SDaniel Verkamp 		return "ready";
15171c79fadbSChangpeng Liu 	case NVME_CTRLR_STATE_ERROR:
15181c79fadbSChangpeng Liu 		return "error";
1519af130056SAlexey Marchuk 	case NVME_CTRLR_STATE_DISCONNECTED:
1520af130056SAlexey Marchuk 		return "disconnected";
1521bdf98230SDaniel Verkamp 	}
1522bdf98230SDaniel Verkamp 	return "unknown";
1523bdf98230SDaniel Verkamp };
1524bdf98230SDaniel Verkamp 
152520abbe8aSDaniel Verkamp static void
15262f9c97a4SKonrad Sztyber _nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
15272f9c97a4SKonrad Sztyber 		      uint64_t timeout_in_ms, bool quiet)
15281010fb3aSDaniel Verkamp {
152906746448SSeth Howell 	uint64_t ticks_per_ms, timeout_in_ticks, now_ticks;
153006746448SSeth Howell 
153120abbe8aSDaniel Verkamp 	ctrlr->state = state;
1532d953072dSKonrad Sztyber 	if (timeout_in_ms == NVME_TIMEOUT_KEEP_EXISTING) {
15332f9c97a4SKonrad Sztyber 		if (!quiet) {
1534d953072dSKonrad Sztyber 			NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (keeping existing timeout)\n",
1535d953072dSKonrad Sztyber 					    nvme_ctrlr_state_string(ctrlr->state));
15362f9c97a4SKonrad Sztyber 		}
1537d953072dSKonrad Sztyber 		return;
1538d953072dSKonrad Sztyber 	}
1539d953072dSKonrad Sztyber 
154006746448SSeth Howell 	if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
154106746448SSeth Howell 		goto inf;
154206746448SSeth Howell 	}
154306746448SSeth Howell 
154406746448SSeth Howell 	ticks_per_ms = spdk_get_ticks_hz() / 1000;
154506746448SSeth Howell 	if (timeout_in_ms > UINT64_MAX / ticks_per_ms) {
154601f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr,
154701f45ecdSGangCao 				  "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
154806746448SSeth Howell 		goto inf;
154906746448SSeth Howell 	}
155006746448SSeth Howell 
155106746448SSeth Howell 	now_ticks = spdk_get_ticks();
155206746448SSeth Howell 	timeout_in_ticks = timeout_in_ms * ticks_per_ms;
155306746448SSeth Howell 	if (timeout_in_ticks > UINT64_MAX - now_ticks) {
155401f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr,
155501f45ecdSGangCao 				  "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
155606746448SSeth Howell 		goto inf;
155706746448SSeth Howell 	}
155806746448SSeth Howell 
155906746448SSeth Howell 	ctrlr->state_timeout_tsc = timeout_in_ticks + now_ticks;
15602f9c97a4SKonrad Sztyber 	if (!quiet) {
156101f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (timeout %" PRIu64 " ms)\n",
156242f2f01aSJacek Kalwas 				    nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms);
15632f9c97a4SKonrad Sztyber 	}
156406746448SSeth Howell 	return;
156506746448SSeth Howell inf:
15662f9c97a4SKonrad Sztyber 	if (!quiet) {
156701f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (no timeout)\n",
1568d57f3db1SDaniel Verkamp 				    nvme_ctrlr_state_string(ctrlr->state));
15692f9c97a4SKonrad Sztyber 	}
157020abbe8aSDaniel Verkamp 	ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
15711010fb3aSDaniel Verkamp }
15721010fb3aSDaniel Verkamp 
15737e3a11f9SChangpeng Liu static void
15742f9c97a4SKonrad Sztyber nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
15752f9c97a4SKonrad Sztyber 		     uint64_t timeout_in_ms)
15762f9c97a4SKonrad Sztyber {
15772f9c97a4SKonrad Sztyber 	_nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, false);
15782f9c97a4SKonrad Sztyber }
15792f9c97a4SKonrad Sztyber 
15802f9c97a4SKonrad Sztyber static void
15812f9c97a4SKonrad Sztyber nvme_ctrlr_set_state_quiet(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
15822f9c97a4SKonrad Sztyber 			   uint64_t timeout_in_ms)
15832f9c97a4SKonrad Sztyber {
15842f9c97a4SKonrad Sztyber 	_nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, true);
15852f9c97a4SKonrad Sztyber }
15862f9c97a4SKonrad Sztyber 
15872f9c97a4SKonrad Sztyber static void
158864563adaSNiklas Cassel nvme_ctrlr_free_zns_specific_data(struct spdk_nvme_ctrlr *ctrlr)
158964563adaSNiklas Cassel {
159064563adaSNiklas Cassel 	spdk_free(ctrlr->cdata_zns);
159164563adaSNiklas Cassel 	ctrlr->cdata_zns = NULL;
159264563adaSNiklas Cassel }
159364563adaSNiklas Cassel 
159464563adaSNiklas Cassel static void
159564563adaSNiklas Cassel nvme_ctrlr_free_iocs_specific_data(struct spdk_nvme_ctrlr *ctrlr)
159664563adaSNiklas Cassel {
159764563adaSNiklas Cassel 	nvme_ctrlr_free_zns_specific_data(ctrlr);
159864563adaSNiklas Cassel }
159964563adaSNiklas Cassel 
160064563adaSNiklas Cassel static void
16017e3a11f9SChangpeng Liu nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
16027e3a11f9SChangpeng Liu {
16037e3a11f9SChangpeng Liu 	if (ctrlr->shadow_doorbell) {
1604f9a6588fSDarek Stojaczyk 		spdk_free(ctrlr->shadow_doorbell);
16057e3a11f9SChangpeng Liu 		ctrlr->shadow_doorbell = NULL;
16067e3a11f9SChangpeng Liu 	}
16077e3a11f9SChangpeng Liu 
16087e3a11f9SChangpeng Liu 	if (ctrlr->eventidx) {
1609f9a6588fSDarek Stojaczyk 		spdk_free(ctrlr->eventidx);
16107e3a11f9SChangpeng Liu 		ctrlr->eventidx = NULL;
16117e3a11f9SChangpeng Liu 	}
16127e3a11f9SChangpeng Liu }
16137e3a11f9SChangpeng Liu 
16145a028860SChangpeng Liu static void
16155a028860SChangpeng Liu nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
16165a028860SChangpeng Liu {
16175a028860SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
16185a028860SChangpeng Liu 
16195a028860SChangpeng Liu 	if (spdk_nvme_cpl_is_error(cpl)) {
162001f45ecdSGangCao 		NVME_CTRLR_WARNLOG(ctrlr, "Doorbell buffer config failed\n");
16215a028860SChangpeng Liu 	} else {
162201f45ecdSGangCao 		NVME_CTRLR_INFOLOG(ctrlr, "Doorbell buffer config enabled\n");
16235a028860SChangpeng Liu 	}
16241c083e62SJim Harris 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
16252706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
16265a028860SChangpeng Liu }
16275a028860SChangpeng Liu 
16287e3a11f9SChangpeng Liu static int
16297e3a11f9SChangpeng Liu nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
16307e3a11f9SChangpeng Liu {
16317c0e9faaSChangpeng Liu 	int rc = 0;
163227c42e31SDarek Stojaczyk 	uint64_t prp1, prp2, len;
16337e3a11f9SChangpeng Liu 
16347c0e9faaSChangpeng Liu 	if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
16351c083e62SJim Harris 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
16362706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
16377c0e9faaSChangpeng Liu 		return 0;
16387c0e9faaSChangpeng Liu 	}
16397c0e9faaSChangpeng Liu 
16407e3a11f9SChangpeng Liu 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
16411c083e62SJim Harris 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
16422706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
16437e3a11f9SChangpeng Liu 		return 0;
16447e3a11f9SChangpeng Liu 	}
16457e3a11f9SChangpeng Liu 
16467e3a11f9SChangpeng Liu 	/* only 1 page size for doorbell buffer */
1647f9a6588fSDarek Stojaczyk 	ctrlr->shadow_doorbell = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
1648f9a6588fSDarek Stojaczyk 					      NULL, SPDK_ENV_LCORE_ID_ANY,
1649f9a6588fSDarek Stojaczyk 					      SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
16507e3a11f9SChangpeng Liu 	if (ctrlr->shadow_doorbell == NULL) {
16515a028860SChangpeng Liu 		rc = -ENOMEM;
16525a028860SChangpeng Liu 		goto error;
16537e3a11f9SChangpeng Liu 	}
16547e3a11f9SChangpeng Liu 
165527c42e31SDarek Stojaczyk 	len = ctrlr->page_size;
165627c42e31SDarek Stojaczyk 	prp1 = spdk_vtophys(ctrlr->shadow_doorbell, &len);
165727c42e31SDarek Stojaczyk 	if (prp1 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
165827c42e31SDarek Stojaczyk 		rc = -EFAULT;
165927c42e31SDarek Stojaczyk 		goto error;
166027c42e31SDarek Stojaczyk 	}
166127c42e31SDarek Stojaczyk 
1662f9a6588fSDarek Stojaczyk 	ctrlr->eventidx = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
1663f9a6588fSDarek Stojaczyk 				       NULL, SPDK_ENV_LCORE_ID_ANY,
1664f9a6588fSDarek Stojaczyk 				       SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
16657e3a11f9SChangpeng Liu 	if (ctrlr->eventidx == NULL) {
16667c0e9faaSChangpeng Liu 		rc = -ENOMEM;
16677e3a11f9SChangpeng Liu 		goto error;
16687e3a11f9SChangpeng Liu 	}
16697e3a11f9SChangpeng Liu 
167027c42e31SDarek Stojaczyk 	len = ctrlr->page_size;
167127c42e31SDarek Stojaczyk 	prp2 = spdk_vtophys(ctrlr->eventidx, &len);
167227c42e31SDarek Stojaczyk 	if (prp2 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
167327c42e31SDarek Stojaczyk 		rc = -EFAULT;
167427c42e31SDarek Stojaczyk 		goto error;
167527c42e31SDarek Stojaczyk 	}
167627c42e31SDarek Stojaczyk 
16772706cd42SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
16782706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
16795a028860SChangpeng Liu 
16807e3a11f9SChangpeng Liu 	rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
16815a028860SChangpeng Liu 			nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
16827e3a11f9SChangpeng Liu 	if (rc != 0) {
16837e3a11f9SChangpeng Liu 		goto error;
16847e3a11f9SChangpeng Liu 	}
16857e3a11f9SChangpeng Liu 
16867e3a11f9SChangpeng Liu 	return 0;
16877e3a11f9SChangpeng Liu 
16887e3a11f9SChangpeng Liu error:
16895a028860SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
16907e3a11f9SChangpeng Liu 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
16917c0e9faaSChangpeng Liu 	return rc;
16927e3a11f9SChangpeng Liu }
16937e3a11f9SChangpeng Liu 
16944be6d304SShuhei Matsumoto void
16955322f307SShuhei Matsumoto nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr)
16965322f307SShuhei Matsumoto {
16975322f307SShuhei Matsumoto 	struct nvme_request	*req, *tmp;
16985322f307SShuhei Matsumoto 	struct spdk_nvme_cpl	cpl = {};
16995322f307SShuhei Matsumoto 
17005322f307SShuhei Matsumoto 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
17015322f307SShuhei Matsumoto 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
17025322f307SShuhei Matsumoto 
17035322f307SShuhei Matsumoto 	STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
17045322f307SShuhei Matsumoto 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
1705ceaa4ee0SShuhei Matsumoto 		ctrlr->outstanding_aborts++;
17065322f307SShuhei Matsumoto 
17075322f307SShuhei Matsumoto 		nvme_complete_request(req->cb_fn, req->cb_arg, req->qpair, req, &cpl);
17085322f307SShuhei Matsumoto 	}
17095322f307SShuhei Matsumoto }
17105322f307SShuhei Matsumoto 
171121322e01SShuhei Matsumoto static int
171221322e01SShuhei Matsumoto nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
17131010fb3aSDaniel Verkamp {
171481b20a4dSSeth Howell 	if (ctrlr->is_resetting || ctrlr->is_removed) {
17151010fb3aSDaniel Verkamp 		/*
171681b20a4dSSeth Howell 		 * Controller is already resetting or has been removed. Return
17171010fb3aSDaniel Verkamp 		 *  immediately since there is no need to kick off another
17181010fb3aSDaniel Verkamp 		 *  reset in these cases.
17191010fb3aSDaniel Verkamp 		 */
17203387fbbcSJonathan Teh 		return ctrlr->is_resetting ? -EBUSY : -ENXIO;
17211010fb3aSDaniel Verkamp 	}
17221010fb3aSDaniel Verkamp 
1723bc185fe7SDaniel Verkamp 	ctrlr->is_resetting = true;
172481b20a4dSSeth Howell 	ctrlr->is_failed = false;
1725df7c2a22SShuhei Matsumoto 	ctrlr->is_disconnecting = true;
172664454afbSShuhei Matsumoto 	ctrlr->prepare_for_reset = true;
17271010fb3aSDaniel Verkamp 
172801f45ecdSGangCao 	NVME_CTRLR_NOTICELOG(ctrlr, "resetting controller\n");
172920abbe8aSDaniel Verkamp 
173075aa60e1SKonrad Sztyber 	/* Disable keep-alive, it'll be re-enabled as part of the init process */
173175aa60e1SKonrad Sztyber 	ctrlr->keep_alive_interval_ticks = 0;
173275aa60e1SKonrad Sztyber 
17335322f307SShuhei Matsumoto 	/* Abort all of the queued abort requests */
17345322f307SShuhei Matsumoto 	nvme_ctrlr_abort_queued_aborts(ctrlr);
1735193f4f83SBen Walker 
1736f366e261SJim Harris 	nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
1737f366e261SJim Harris 
173824bca2eaSSeth Howell 	ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
1739bb01a089SJim Harris 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
174067882ed7SJim Harris 
174121322e01SShuhei Matsumoto 	return 0;
174221322e01SShuhei Matsumoto }
174321322e01SShuhei Matsumoto 
174421322e01SShuhei Matsumoto static void
174521322e01SShuhei Matsumoto nvme_ctrlr_disconnect_done(struct spdk_nvme_ctrlr *ctrlr)
174621322e01SShuhei Matsumoto {
1747df7c2a22SShuhei Matsumoto 	assert(ctrlr->is_failed == false);
1748df7c2a22SShuhei Matsumoto 	ctrlr->is_disconnecting = false;
1749df7c2a22SShuhei Matsumoto 
17507e3a11f9SChangpeng Liu 	/* Doorbell buffer config is invalid during reset */
17517e3a11f9SChangpeng Liu 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
17527e3a11f9SChangpeng Liu 
175364563adaSNiklas Cassel 	/* I/O Command Set Specific Identify Controller data is invalidated during reset */
175464563adaSNiklas Cassel 	nvme_ctrlr_free_iocs_specific_data(ctrlr);
175564563adaSNiklas Cassel 
17569ae19a41SChangpeng Liu 	spdk_bit_array_free(&ctrlr->free_io_qids);
1757af130056SAlexey Marchuk 
1758af130056SAlexey Marchuk 	/* Set the state back to DISCONNECTED to cause a full hardware reset. */
1759af130056SAlexey Marchuk 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISCONNECTED, NVME_TIMEOUT_INFINITE);
176021322e01SShuhei Matsumoto }
176121322e01SShuhei Matsumoto 
176221322e01SShuhei Matsumoto int
176321322e01SShuhei Matsumoto spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
176421322e01SShuhei Matsumoto {
176521322e01SShuhei Matsumoto 	int rc;
176621322e01SShuhei Matsumoto 
1767e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
176821322e01SShuhei Matsumoto 	rc = nvme_ctrlr_disconnect(ctrlr);
1769e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
1770df7c2a22SShuhei Matsumoto 
177121322e01SShuhei Matsumoto 	return rc;
17727a0a2800SShuhei Matsumoto }
17737a0a2800SShuhei Matsumoto 
17747a0a2800SShuhei Matsumoto void
17757a0a2800SShuhei Matsumoto spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
17767a0a2800SShuhei Matsumoto {
1777e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
17787a0a2800SShuhei Matsumoto 
177964454afbSShuhei Matsumoto 	ctrlr->prepare_for_reset = false;
178064454afbSShuhei Matsumoto 
178120abbe8aSDaniel Verkamp 	/* Set the state back to INIT to cause a full hardware reset. */
178220abbe8aSDaniel Verkamp 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
178320abbe8aSDaniel Verkamp 
17847a0a2800SShuhei Matsumoto 	/* Return without releasing ctrlr_lock. ctrlr_lock will be released when
17857a0a2800SShuhei Matsumoto 	 * spdk_nvme_ctrlr_reset_poll_async() returns 0.
17867a0a2800SShuhei Matsumoto 	 */
17877a0a2800SShuhei Matsumoto }
17887a0a2800SShuhei Matsumoto 
178919ab54cbSJim Harris int
179019ab54cbSJim Harris nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
179119ab54cbSJim Harris {
179219ab54cbSJim Harris 	bool async;
179319ab54cbSJim Harris 	int rc;
179419ab54cbSJim Harris 
179592180eedSJim Harris 	if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc ||
179692180eedSJim Harris 	    spdk_nvme_ctrlr_is_fabrics(ctrlr) || nvme_qpair_is_admin_queue(qpair)) {
179719ab54cbSJim Harris 		assert(false);
179819ab54cbSJim Harris 		return -EINVAL;
179919ab54cbSJim Harris 	}
180019ab54cbSJim Harris 
180119ab54cbSJim Harris 	/* Force a synchronous connect. */
180219ab54cbSJim Harris 	async = qpair->async;
180319ab54cbSJim Harris 	qpair->async = false;
180419ab54cbSJim Harris 	rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
180519ab54cbSJim Harris 	qpair->async = async;
180619ab54cbSJim Harris 
180719ab54cbSJim Harris 	if (rc != 0) {
180819ab54cbSJim Harris 		qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
180919ab54cbSJim Harris 	}
181019ab54cbSJim Harris 
181119ab54cbSJim Harris 	return rc;
181219ab54cbSJim Harris }
181319ab54cbSJim Harris 
18143387fbbcSJonathan Teh /**
18153387fbbcSJonathan Teh  * This function will be called when the controller is being reinitialized.
18163387fbbcSJonathan Teh  * Note: the ctrlr_lock must be held when calling this function.
18173387fbbcSJonathan Teh  */
18187a0a2800SShuhei Matsumoto int
18197a0a2800SShuhei Matsumoto spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
18203387fbbcSJonathan Teh {
1821e3eeb6bdSBen Walker 	struct spdk_nvme_ns *ns, *tmp_ns;
18223387fbbcSJonathan Teh 	struct spdk_nvme_qpair	*qpair;
18233387fbbcSJonathan Teh 	int rc = 0, rc_tmp = 0;
18243387fbbcSJonathan Teh 
182520abbe8aSDaniel Verkamp 	if (nvme_ctrlr_process_init(ctrlr) != 0) {
182601f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "controller reinitialization failed\n");
182720abbe8aSDaniel Verkamp 		rc = -1;
182820abbe8aSDaniel Verkamp 	}
18293387fbbcSJonathan Teh 	if (ctrlr->state != NVME_CTRLR_STATE_READY && rc != -1) {
18303387fbbcSJonathan Teh 		return -EAGAIN;
18311010fb3aSDaniel Verkamp 	}
18321010fb3aSDaniel Verkamp 
183313f30a25SSeth Howell 	/*
183443b48832SJim Harris 	 * For non-fabrics controllers, the memory locations of the transport qpair
183513f30a25SSeth Howell 	 * don't change when the controller is reset. They simply need to be
183613f30a25SSeth Howell 	 * re-enabled with admin commands to the controller. For fabric
183713f30a25SSeth Howell 	 * controllers we need to disconnect and reconnect the qpair on its
183813f30a25SSeth Howell 	 * own thread outside of the context of the reset.
183913f30a25SSeth Howell 	 */
184043b48832SJim Harris 	if (rc == 0 && !spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
18413272320cSDaniel Verkamp 		/* Reinitialize qpairs */
18423272320cSDaniel Verkamp 		TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
184392180eedSJim Harris 			/* Always clear the qid bit here, even for a foreign qpair. We need
184492180eedSJim Harris 			 * to make sure another process doesn't get the chance to grab that
184592180eedSJim Harris 			 * qid.
184692180eedSJim Harris 			 */
18479ae19a41SChangpeng Liu 			assert(spdk_bit_array_get(ctrlr->free_io_qids, qpair->id));
18489ae19a41SChangpeng Liu 			spdk_bit_array_clear(ctrlr->free_io_qids, qpair->id);
184992180eedSJim Harris 			if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc) {
185092180eedSJim Harris 				/*
185192180eedSJim Harris 				 * We cannot reinitialize a foreign qpair. The qpair's owning
185292180eedSJim Harris 				 * process will take care of it. Set failure reason to FAILURE_RESET
185392180eedSJim Harris 				 * to ensure that happens.
185492180eedSJim Harris 				 */
185592180eedSJim Harris 				qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_RESET;
185692180eedSJim Harris 				continue;
185792180eedSJim Harris 			}
185819ab54cbSJim Harris 			rc_tmp = nvme_ctrlr_reinitialize_io_qpair(ctrlr, qpair);
185930920066SSeth Howell 			if (rc_tmp != 0) {
186030920066SSeth Howell 				rc = rc_tmp;
18613272320cSDaniel Verkamp 			}
18623272320cSDaniel Verkamp 		}
18633272320cSDaniel Verkamp 	}
18643272320cSDaniel Verkamp 
1865e3eeb6bdSBen Walker 	/*
1866e3eeb6bdSBen Walker 	 * Take this opportunity to remove inactive namespaces. During a reset namespace
1867e3eeb6bdSBen Walker 	 * handles can be invalidated.
1868e3eeb6bdSBen Walker 	 */
1869e3eeb6bdSBen Walker 	RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
1870e3eeb6bdSBen Walker 		if (!ns->active) {
1871e3eeb6bdSBen Walker 			RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
1872e3eeb6bdSBen Walker 			spdk_free(ns);
1873e3eeb6bdSBen Walker 		}
1874e3eeb6bdSBen Walker 	}
1875e3eeb6bdSBen Walker 
18763e1569e8SSeth Howell 	if (rc) {
18773e1569e8SSeth Howell 		nvme_ctrlr_fail(ctrlr, false);
18783e1569e8SSeth Howell 	}
187981b20a4dSSeth Howell 	ctrlr->is_resetting = false;
18801010fb3aSDaniel Verkamp 
1881e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
18821010fb3aSDaniel Verkamp 
1883550d4867STomasz Kulasek 	if (!ctrlr->cdata.oaes.ns_attribute_notices) {
1884550d4867STomasz Kulasek 		/*
1885550d4867STomasz Kulasek 		 * If controller doesn't support ns_attribute_notices and
1886550d4867STomasz Kulasek 		 * namespace attributes change (e.g. number of namespaces)
1887550d4867STomasz Kulasek 		 * we need to update system handling device reset.
1888550d4867STomasz Kulasek 		 */
1889550d4867STomasz Kulasek 		nvme_io_msg_ctrlr_update(ctrlr);
1890550d4867STomasz Kulasek 	}
1891550d4867STomasz Kulasek 
18921010fb3aSDaniel Verkamp 	return rc;
18931010fb3aSDaniel Verkamp }
18941010fb3aSDaniel Verkamp 
1895bdc9fa83SShuhei Matsumoto /*
1896bdc9fa83SShuhei Matsumoto  * For PCIe transport, spdk_nvme_ctrlr_disconnect() will do a Controller Level Reset
1897bdc9fa83SShuhei Matsumoto  * (Change CC.EN from 1 to 0) as a operation to disconnect the admin qpair.
1898bdc9fa83SShuhei Matsumoto  * The following two functions are added to do a Controller Level Reset. They have
1899bdc9fa83SShuhei Matsumoto  * to be called under the nvme controller's lock.
1900bdc9fa83SShuhei Matsumoto  */
1901bdc9fa83SShuhei Matsumoto void
1902bdc9fa83SShuhei Matsumoto nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr)
1903bdc9fa83SShuhei Matsumoto {
1904bdc9fa83SShuhei Matsumoto 	assert(ctrlr->is_disconnecting == true);
1905bdc9fa83SShuhei Matsumoto 
1906bdc9fa83SShuhei Matsumoto 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
1907bdc9fa83SShuhei Matsumoto }
1908bdc9fa83SShuhei Matsumoto 
1909bdc9fa83SShuhei Matsumoto int
1910bdc9fa83SShuhei Matsumoto nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr)
1911bdc9fa83SShuhei Matsumoto {
1912bdc9fa83SShuhei Matsumoto 	int rc = 0;
1913bdc9fa83SShuhei Matsumoto 
1914bdc9fa83SShuhei Matsumoto 	if (nvme_ctrlr_process_init(ctrlr) != 0) {
1915bdc9fa83SShuhei Matsumoto 		NVME_CTRLR_ERRLOG(ctrlr, "failed to disable controller\n");
1916bdc9fa83SShuhei Matsumoto 		rc = -1;
1917bdc9fa83SShuhei Matsumoto 	}
1918bdc9fa83SShuhei Matsumoto 
1919bdc9fa83SShuhei Matsumoto 	if (ctrlr->state != NVME_CTRLR_STATE_DISABLED && rc != -1) {
1920bdc9fa83SShuhei Matsumoto 		return -EAGAIN;
1921bdc9fa83SShuhei Matsumoto 	}
1922bdc9fa83SShuhei Matsumoto 
1923bdc9fa83SShuhei Matsumoto 	return rc;
1924bdc9fa83SShuhei Matsumoto }
1925bdc9fa83SShuhei Matsumoto 
192609c7c768SShuhei Matsumoto static void
192709c7c768SShuhei Matsumoto nvme_ctrlr_fail_io_qpairs(struct spdk_nvme_ctrlr *ctrlr)
192809c7c768SShuhei Matsumoto {
192909c7c768SShuhei Matsumoto 	struct spdk_nvme_qpair	*qpair;
193009c7c768SShuhei Matsumoto 
193109c7c768SShuhei Matsumoto 	TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
193209c7c768SShuhei Matsumoto 		qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
193309c7c768SShuhei Matsumoto 	}
193409c7c768SShuhei Matsumoto }
193509c7c768SShuhei Matsumoto 
1936ac24039dSJonathan Teh int
1937ac24039dSJonathan Teh spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
1938ac24039dSJonathan Teh {
19393387fbbcSJonathan Teh 	int rc;
1940ac24039dSJonathan Teh 
1941e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
194221322e01SShuhei Matsumoto 
194321322e01SShuhei Matsumoto 	rc = nvme_ctrlr_disconnect(ctrlr);
194409c7c768SShuhei Matsumoto 	if (rc == 0) {
194509c7c768SShuhei Matsumoto 		nvme_ctrlr_fail_io_qpairs(ctrlr);
194609c7c768SShuhei Matsumoto 	}
194721322e01SShuhei Matsumoto 
1948e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
194921322e01SShuhei Matsumoto 
19503387fbbcSJonathan Teh 	if (rc != 0) {
19513387fbbcSJonathan Teh 		if (rc == -EBUSY) {
19523387fbbcSJonathan Teh 			rc = 0;
19533387fbbcSJonathan Teh 		}
19543387fbbcSJonathan Teh 		return rc;
19553387fbbcSJonathan Teh 	}
19564a73675dSShuhei Matsumoto 
1957df7c2a22SShuhei Matsumoto 	while (1) {
1958df7c2a22SShuhei Matsumoto 		rc = spdk_nvme_ctrlr_process_admin_completions(ctrlr);
1959df7c2a22SShuhei Matsumoto 		if (rc == -ENXIO) {
1960df7c2a22SShuhei Matsumoto 			break;
1961df7c2a22SShuhei Matsumoto 		}
1962df7c2a22SShuhei Matsumoto 	}
1963df7c2a22SShuhei Matsumoto 
19644a73675dSShuhei Matsumoto 	spdk_nvme_ctrlr_reconnect_async(ctrlr);
1965ac24039dSJonathan Teh 
1966ac24039dSJonathan Teh 	while (true) {
19674a73675dSShuhei Matsumoto 		rc = spdk_nvme_ctrlr_reconnect_poll_async(ctrlr);
1968ac24039dSJonathan Teh 		if (rc != -EAGAIN) {
1969ac24039dSJonathan Teh 			break;
1970ac24039dSJonathan Teh 		}
1971ac24039dSJonathan Teh 	}
1972ac24039dSJonathan Teh 
1973ac24039dSJonathan Teh 	return rc;
1974ac24039dSJonathan Teh }
1975ac24039dSJonathan Teh 
19765215fad6SSeth Howell int
197702d3d439SMichael Haeuptle spdk_nvme_ctrlr_reset_subsystem(struct spdk_nvme_ctrlr *ctrlr)
197802d3d439SMichael Haeuptle {
197902d3d439SMichael Haeuptle 	union spdk_nvme_cap_register cap;
198002d3d439SMichael Haeuptle 	int rc = 0;
198102d3d439SMichael Haeuptle 
198202d3d439SMichael Haeuptle 	cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr);
198302d3d439SMichael Haeuptle 	if (cap.bits.nssrs == 0) {
198401f45ecdSGangCao 		NVME_CTRLR_WARNLOG(ctrlr, "subsystem reset is not supported\n");
198502d3d439SMichael Haeuptle 		return -ENOTSUP;
198602d3d439SMichael Haeuptle 	}
198702d3d439SMichael Haeuptle 
198801f45ecdSGangCao 	NVME_CTRLR_NOTICELOG(ctrlr, "resetting subsystem\n");
1989e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
199002d3d439SMichael Haeuptle 	ctrlr->is_resetting = true;
199102d3d439SMichael Haeuptle 	rc = nvme_ctrlr_set_nssr(ctrlr, SPDK_NVME_NSSR_VALUE);
199202d3d439SMichael Haeuptle 	ctrlr->is_resetting = false;
199302d3d439SMichael Haeuptle 
1994e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
199502d3d439SMichael Haeuptle 	/*
199602d3d439SMichael Haeuptle 	 * No more cleanup at this point like in the ctrlr reset. A subsystem reset will cause
199702d3d439SMichael Haeuptle 	 * a hot remove for PCIe transport. The hot remove handling does all the necessary ctrlr cleanup.
199802d3d439SMichael Haeuptle 	 */
199902d3d439SMichael Haeuptle 	return rc;
200002d3d439SMichael Haeuptle }
200102d3d439SMichael Haeuptle 
200202d3d439SMichael Haeuptle int
20035215fad6SSeth Howell spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transport_id *trid)
20045215fad6SSeth Howell {
20055215fad6SSeth Howell 	int rc = 0;
20065215fad6SSeth Howell 
2007e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
20085215fad6SSeth Howell 
20095215fad6SSeth Howell 	if (ctrlr->is_failed == false) {
20105215fad6SSeth Howell 		rc = -EPERM;
20115215fad6SSeth Howell 		goto out;
20125215fad6SSeth Howell 	}
20135215fad6SSeth Howell 
20145215fad6SSeth Howell 	if (trid->trtype != ctrlr->trid.trtype) {
20155215fad6SSeth Howell 		rc = -EINVAL;
20165215fad6SSeth Howell 		goto out;
20175215fad6SSeth Howell 	}
20185215fad6SSeth Howell 
20195215fad6SSeth Howell 	if (strncmp(trid->subnqn, ctrlr->trid.subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
20205215fad6SSeth Howell 		rc = -EINVAL;
20215215fad6SSeth Howell 		goto out;
20225215fad6SSeth Howell 	}
20235215fad6SSeth Howell 
20245215fad6SSeth Howell 	ctrlr->trid = *trid;
20255215fad6SSeth Howell 
20265215fad6SSeth Howell out:
2027e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
20285215fad6SSeth Howell 	return rc;
20295215fad6SSeth Howell }
20305215fad6SSeth Howell 
203117afd268SJim Harris void
203217afd268SJim Harris spdk_nvme_ctrlr_set_remove_cb(struct spdk_nvme_ctrlr *ctrlr,
203317afd268SJim Harris 			      spdk_nvme_remove_cb remove_cb, void *remove_ctx)
203417afd268SJim Harris {
203517afd268SJim Harris 	if (!spdk_process_is_primary()) {
203617afd268SJim Harris 		return;
203717afd268SJim Harris 	}
203817afd268SJim Harris 
2039e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
20406d306ae5Ssuhua1 	ctrlr->remove_cb = remove_cb;
204117afd268SJim Harris 	ctrlr->cb_ctx = remove_ctx;
2042e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
204317afd268SJim Harris }
204417afd268SJim Harris 
20454a62ec38SKonrad Sztyber int
20464a62ec38SKonrad Sztyber spdk_nvme_ctrlr_set_keys(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts)
20474a62ec38SKonrad Sztyber {
20484a62ec38SKonrad Sztyber 	nvme_ctrlr_lock(ctrlr);
20494a62ec38SKonrad Sztyber 	if (SPDK_GET_FIELD(opts, dhchap_key, ctrlr->opts.dhchap_key) == NULL &&
20504a62ec38SKonrad Sztyber 	    SPDK_GET_FIELD(opts, dhchap_ctrlr_key, ctrlr->opts.dhchap_ctrlr_key) != NULL) {
20514a62ec38SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "DH-HMAC-CHAP controller key requires host key to be set\n");
20524a62ec38SKonrad Sztyber 		nvme_ctrlr_unlock(ctrlr);
20534a62ec38SKonrad Sztyber 		return -EINVAL;
20544a62ec38SKonrad Sztyber 	}
20554a62ec38SKonrad Sztyber 
20564a62ec38SKonrad Sztyber 	ctrlr->opts.dhchap_key =
20574a62ec38SKonrad Sztyber 		SPDK_GET_FIELD(opts, dhchap_key, ctrlr->opts.dhchap_key);
20584a62ec38SKonrad Sztyber 	ctrlr->opts.dhchap_ctrlr_key =
20594a62ec38SKonrad Sztyber 		SPDK_GET_FIELD(opts, dhchap_ctrlr_key, ctrlr->opts.dhchap_ctrlr_key);
20604a62ec38SKonrad Sztyber 	nvme_ctrlr_unlock(ctrlr);
20614a62ec38SKonrad Sztyber 
20624a62ec38SKonrad Sztyber 	return 0;
20634a62ec38SKonrad Sztyber }
20644a62ec38SKonrad Sztyber 
2065e9b5420fSChangpeng Liu static void
2066e9b5420fSChangpeng Liu nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
20671010fb3aSDaniel Verkamp {
2068e9b5420fSChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
20691010fb3aSDaniel Verkamp 
2070a61aff77SChangpeng Liu 	if (spdk_nvme_cpl_is_error(cpl)) {
207101f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "nvme_identify_controller failed!\n");
2072a61aff77SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2073a61aff77SChangpeng Liu 		return;
2074a61aff77SChangpeng Liu 	}
2075a61aff77SChangpeng Liu 
20761010fb3aSDaniel Verkamp 	/*
20771010fb3aSDaniel Verkamp 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
20781010fb3aSDaniel Verkamp 	 *  controller supports.
20791010fb3aSDaniel Verkamp 	 */
20801ffec5d5SDaniel Verkamp 	ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
208101f45ecdSGangCao 	NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
20821010fb3aSDaniel Verkamp 	if (ctrlr->cdata.mdts > 0) {
208384d90484SDaniel Verkamp 		ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
2084efedaf17SNiklas Cassel 						ctrlr->min_page_size * (1 << ctrlr->cdata.mdts));
208501f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
20861010fb3aSDaniel Verkamp 	}
20871010fb3aSDaniel Verkamp 
208801f45ecdSGangCao 	NVME_CTRLR_DEBUGLOG(ctrlr, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
20899f5fb75dSDaniel Verkamp 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
20909f5fb75dSDaniel Verkamp 		ctrlr->cntlid = ctrlr->cdata.cntlid;
20919f5fb75dSDaniel Verkamp 	} else {
20929f5fb75dSDaniel Verkamp 		/*
20939f5fb75dSDaniel Verkamp 		 * Fabrics controllers should already have CNTLID from the Connect command.
20949f5fb75dSDaniel Verkamp 		 *
20959f5fb75dSDaniel Verkamp 		 * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
20969f5fb75dSDaniel Verkamp 		 * trust the one from Connect.
20979f5fb75dSDaniel Verkamp 		 */
20989f5fb75dSDaniel Verkamp 		if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
209901f45ecdSGangCao 			NVME_CTRLR_DEBUGLOG(ctrlr, "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
21009f5fb75dSDaniel Verkamp 					    ctrlr->cdata.cntlid, ctrlr->cntlid);
21019f5fb75dSDaniel Verkamp 		}
21029f5fb75dSDaniel Verkamp 	}
21039f5fb75dSDaniel Verkamp 
21044e241cbaSChangpeng Liu 	if (ctrlr->cdata.sgls.supported && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
2105121aca0bSChangpeng Liu 		assert(ctrlr->cdata.sgls.supported != 0x3);
2106cab35b6bSChangpeng Liu 		ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
2107121aca0bSChangpeng Liu 		if (ctrlr->cdata.sgls.supported == 0x2) {
2108121aca0bSChangpeng Liu 			ctrlr->flags |= SPDK_NVME_CTRLR_SGL_REQUIRES_DWORD_ALIGNMENT;
2109121aca0bSChangpeng Liu 		}
21105c80b1e5SEvgeniy Kochetov 
2111cab35b6bSChangpeng Liu 		ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
211201f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_sges %u\n", ctrlr->max_sges);
2113cab35b6bSChangpeng Liu 	}
2114a61aff77SChangpeng Liu 
2115074c62d0SMarcin Spiewak 	if (ctrlr->cdata.sgls.metadata_address && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
2116074c62d0SMarcin Spiewak 		ctrlr->flags |= SPDK_NVME_CTRLR_MPTR_SGL_SUPPORTED;
2117074c62d0SMarcin Spiewak 	}
2118074c62d0SMarcin Spiewak 
21197ff9609fSJim Harris 	if (ctrlr->cdata.oacs.security && !(ctrlr->quirks & NVME_QUIRK_OACS_SECURITY)) {
212051ab3788SChunyang Hui 		ctrlr->flags |= SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED;
212151ab3788SChunyang Hui 	}
212251ab3788SChunyang Hui 
21235fba455dSsunshihao520 	if (ctrlr->cdata.oacs.directives) {
21245fba455dSsunshihao520 		ctrlr->flags |= SPDK_NVME_CTRLR_DIRECTIVES_SUPPORTED;
21255fba455dSsunshihao520 	}
21265fba455dSsunshihao520 
212701f45ecdSGangCao 	NVME_CTRLR_DEBUGLOG(ctrlr, "fuses compare and write: %d\n",
212801f45ecdSGangCao 			    ctrlr->cdata.fuses.compare_and_write);
2129535dd2ecSTomasz Kulasek 	if (ctrlr->cdata.fuses.compare_and_write) {
2130535dd2ecSTomasz Kulasek 		ctrlr->flags |= SPDK_NVME_CTRLR_COMPARE_AND_WRITE_SUPPORTED;
2131535dd2ecSTomasz Kulasek 	}
2132535dd2ecSTomasz Kulasek 
21337e68d0baSJim Harris 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
21342706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
2135e9b5420fSChangpeng Liu }
2136e9b5420fSChangpeng Liu 
2137e9b5420fSChangpeng Liu static int
2138e9b5420fSChangpeng Liu nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
2139e9b5420fSChangpeng Liu {
2140e9b5420fSChangpeng Liu 	int	rc;
2141e9b5420fSChangpeng Liu 
21422706cd42SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
21432706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
2144a61aff77SChangpeng Liu 
2145d1db0209SNiklas Cassel 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0, 0,
2146e9b5420fSChangpeng Liu 				     &ctrlr->cdata, sizeof(ctrlr->cdata),
2147a61aff77SChangpeng Liu 				     nvme_ctrlr_identify_done, ctrlr);
2148e9b5420fSChangpeng Liu 	if (rc != 0) {
2149a61aff77SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2150e9b5420fSChangpeng Liu 		return rc;
2151e9b5420fSChangpeng Liu 	}
2152e9b5420fSChangpeng Liu 
21531010fb3aSDaniel Verkamp 	return 0;
21541010fb3aSDaniel Verkamp }
21551010fb3aSDaniel Verkamp 
215664563adaSNiklas Cassel static void
215738d59d8bSNiklas Cassel nvme_ctrlr_get_zns_cmd_and_effects_log_done(void *arg, const struct spdk_nvme_cpl *cpl)
215838d59d8bSNiklas Cassel {
215938d59d8bSNiklas Cassel 	struct spdk_nvme_cmds_and_effect_log_page *log_page;
216038d59d8bSNiklas Cassel 	struct spdk_nvme_ctrlr *ctrlr = arg;
216138d59d8bSNiklas Cassel 
216238d59d8bSNiklas Cassel 	if (spdk_nvme_cpl_is_error(cpl)) {
216301f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_get_zns_cmd_and_effects_log failed!\n");
216438d59d8bSNiklas Cassel 		spdk_free(ctrlr->tmp_ptr);
216538d59d8bSNiklas Cassel 		ctrlr->tmp_ptr = NULL;
216638d59d8bSNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
216738d59d8bSNiklas Cassel 		return;
216838d59d8bSNiklas Cassel 	}
216938d59d8bSNiklas Cassel 
217038d59d8bSNiklas Cassel 	log_page = ctrlr->tmp_ptr;
217138d59d8bSNiklas Cassel 
217238d59d8bSNiklas Cassel 	if (log_page->io_cmds_supported[SPDK_NVME_OPC_ZONE_APPEND].csupp) {
217338d59d8bSNiklas Cassel 		ctrlr->flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED;
217438d59d8bSNiklas Cassel 	}
217538d59d8bSNiklas Cassel 	spdk_free(ctrlr->tmp_ptr);
217638d59d8bSNiklas Cassel 	ctrlr->tmp_ptr = NULL;
217738d59d8bSNiklas Cassel 
217838d59d8bSNiklas Cassel 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES, ctrlr->opts.admin_timeout_ms);
217938d59d8bSNiklas Cassel }
218038d59d8bSNiklas Cassel 
218138d59d8bSNiklas Cassel static int
218238d59d8bSNiklas Cassel nvme_ctrlr_get_zns_cmd_and_effects_log(struct spdk_nvme_ctrlr *ctrlr)
218338d59d8bSNiklas Cassel {
218438d59d8bSNiklas Cassel 	int rc;
218538d59d8bSNiklas Cassel 
218638d59d8bSNiklas Cassel 	assert(!ctrlr->tmp_ptr);
218738d59d8bSNiklas Cassel 	ctrlr->tmp_ptr = spdk_zmalloc(sizeof(struct spdk_nvme_cmds_and_effect_log_page), 64, NULL,
2188186b109dSJim Harris 				      SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
218938d59d8bSNiklas Cassel 	if (!ctrlr->tmp_ptr) {
219038d59d8bSNiklas Cassel 		rc = -ENOMEM;
219138d59d8bSNiklas Cassel 		goto error;
219238d59d8bSNiklas Cassel 	}
219338d59d8bSNiklas Cassel 
219438d59d8bSNiklas Cassel 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
219538d59d8bSNiklas Cassel 			     ctrlr->opts.admin_timeout_ms);
219638d59d8bSNiklas Cassel 
219738d59d8bSNiklas Cassel 	rc = spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, SPDK_NVME_LOG_COMMAND_EFFECTS_LOG,
219838d59d8bSNiklas Cassel 			0, ctrlr->tmp_ptr, sizeof(struct spdk_nvme_cmds_and_effect_log_page),
219938d59d8bSNiklas Cassel 			0, 0, 0, SPDK_NVME_CSI_ZNS << 24,
220038d59d8bSNiklas Cassel 			nvme_ctrlr_get_zns_cmd_and_effects_log_done, ctrlr);
220138d59d8bSNiklas Cassel 	if (rc != 0) {
220238d59d8bSNiklas Cassel 		goto error;
220338d59d8bSNiklas Cassel 	}
220438d59d8bSNiklas Cassel 
220538d59d8bSNiklas Cassel 	return 0;
220638d59d8bSNiklas Cassel 
220738d59d8bSNiklas Cassel error:
220838d59d8bSNiklas Cassel 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
220938d59d8bSNiklas Cassel 	spdk_free(ctrlr->tmp_ptr);
221038d59d8bSNiklas Cassel 	ctrlr->tmp_ptr = NULL;
221138d59d8bSNiklas Cassel 	return rc;
221238d59d8bSNiklas Cassel }
221338d59d8bSNiklas Cassel 
221438d59d8bSNiklas Cassel static void
221564563adaSNiklas Cassel nvme_ctrlr_identify_zns_specific_done(void *arg, const struct spdk_nvme_cpl *cpl)
221664563adaSNiklas Cassel {
221764563adaSNiklas Cassel 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
221864563adaSNiklas Cassel 
221964563adaSNiklas Cassel 	if (spdk_nvme_cpl_is_error(cpl)) {
222064563adaSNiklas Cassel 		/* no need to print an error, the controller simply does not support ZNS */
222164563adaSNiklas Cassel 		nvme_ctrlr_free_zns_specific_data(ctrlr);
222238d59d8bSNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
222338d59d8bSNiklas Cassel 				     ctrlr->opts.admin_timeout_ms);
222438d59d8bSNiklas Cassel 		return;
222564563adaSNiklas Cassel 	}
222664563adaSNiklas Cassel 
222723a19169SNiklas Cassel 	/* A zero zasl value means use mdts */
222823a19169SNiklas Cassel 	if (ctrlr->cdata_zns->zasl) {
222923a19169SNiklas Cassel 		uint32_t max_append = ctrlr->min_page_size * (1 << ctrlr->cdata_zns->zasl);
223023a19169SNiklas Cassel 		ctrlr->max_zone_append_size = spdk_min(ctrlr->max_xfer_size, max_append);
223123a19169SNiklas Cassel 	} else {
223223a19169SNiklas Cassel 		ctrlr->max_zone_append_size = ctrlr->max_xfer_size;
223323a19169SNiklas Cassel 	}
223423a19169SNiklas Cassel 
223538d59d8bSNiklas Cassel 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
223664563adaSNiklas Cassel 			     ctrlr->opts.admin_timeout_ms);
223764563adaSNiklas Cassel }
223864563adaSNiklas Cassel 
223964563adaSNiklas Cassel /**
224064563adaSNiklas Cassel  * This function will try to fetch the I/O Command Specific Controller data structure for
224164563adaSNiklas Cassel  * each I/O Command Set supported by SPDK.
224264563adaSNiklas Cassel  *
224364563adaSNiklas Cassel  * If an I/O Command Set is not supported by the controller, "Invalid Field in Command"
224464563adaSNiklas Cassel  * will be returned. Since we are fetching in a exploratively way, getting an error back
224564563adaSNiklas Cassel  * from the controller should not be treated as fatal.
224664563adaSNiklas Cassel  *
224764563adaSNiklas Cassel  * I/O Command Sets not supported by SPDK will be skipped (e.g. Key Value Command Set).
224864563adaSNiklas Cassel  *
224964563adaSNiklas Cassel  * I/O Command Sets without a IOCS specific data structure (i.e. a zero-filled IOCS specific
225064563adaSNiklas Cassel  * data structure) will be skipped (e.g. NVM Command Set, Key Value Command Set).
225164563adaSNiklas Cassel  */
225264563adaSNiklas Cassel static int
225364563adaSNiklas Cassel nvme_ctrlr_identify_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
225464563adaSNiklas Cassel {
225564563adaSNiklas Cassel 	int	rc;
225664563adaSNiklas Cassel 
225764563adaSNiklas Cassel 	if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
225864563adaSNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
225964563adaSNiklas Cassel 				     ctrlr->opts.admin_timeout_ms);
226064563adaSNiklas Cassel 		return 0;
226164563adaSNiklas Cassel 	}
226264563adaSNiklas Cassel 
226364563adaSNiklas Cassel 	/*
226464563adaSNiklas Cassel 	 * Since SPDK currently only needs to fetch a single Command Set, keep the code here,
226564563adaSNiklas Cassel 	 * instead of creating multiple NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC substates,
226664563adaSNiklas Cassel 	 * which would require additional functions and complexity for no good reason.
226764563adaSNiklas Cassel 	 */
226864563adaSNiklas Cassel 	assert(!ctrlr->cdata_zns);
2269186b109dSJim Harris 	ctrlr->cdata_zns = spdk_zmalloc(sizeof(*ctrlr->cdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
227064563adaSNiklas Cassel 					SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
227164563adaSNiklas Cassel 	if (!ctrlr->cdata_zns) {
227264563adaSNiklas Cassel 		rc = -ENOMEM;
227364563adaSNiklas Cassel 		goto error;
227464563adaSNiklas Cassel 	}
227564563adaSNiklas Cassel 
227664563adaSNiklas Cassel 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
227764563adaSNiklas Cassel 			     ctrlr->opts.admin_timeout_ms);
227864563adaSNiklas Cassel 
227964563adaSNiklas Cassel 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR_IOCS, 0, 0, SPDK_NVME_CSI_ZNS,
228064563adaSNiklas Cassel 				     ctrlr->cdata_zns, sizeof(*ctrlr->cdata_zns),
228164563adaSNiklas Cassel 				     nvme_ctrlr_identify_zns_specific_done, ctrlr);
228264563adaSNiklas Cassel 	if (rc != 0) {
228364563adaSNiklas Cassel 		goto error;
228464563adaSNiklas Cassel 	}
228564563adaSNiklas Cassel 
228664563adaSNiklas Cassel 	return 0;
228764563adaSNiklas Cassel 
228864563adaSNiklas Cassel error:
228964563adaSNiklas Cassel 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
229064563adaSNiklas Cassel 	nvme_ctrlr_free_zns_specific_data(ctrlr);
229164563adaSNiklas Cassel 	return rc;
229264563adaSNiklas Cassel }
229364563adaSNiklas Cassel 
2294842ae79aSJacek Kalwas enum nvme_active_ns_state {
2295842ae79aSJacek Kalwas 	NVME_ACTIVE_NS_STATE_IDLE,
2296842ae79aSJacek Kalwas 	NVME_ACTIVE_NS_STATE_PROCESSING,
2297842ae79aSJacek Kalwas 	NVME_ACTIVE_NS_STATE_DONE,
2298842ae79aSJacek Kalwas 	NVME_ACTIVE_NS_STATE_ERROR
2299842ae79aSJacek Kalwas };
2300842ae79aSJacek Kalwas 
230155e0ec89SJacek Kalwas typedef void (*nvme_active_ns_ctx_deleter)(struct nvme_active_ns_ctx *);
230255e0ec89SJacek Kalwas 
2303842ae79aSJacek Kalwas struct nvme_active_ns_ctx {
2304842ae79aSJacek Kalwas 	struct spdk_nvme_ctrlr *ctrlr;
2305c7888feeSBen Walker 	uint32_t page_count;
2306842ae79aSJacek Kalwas 	uint32_t next_nsid;
2307842ae79aSJacek Kalwas 	uint32_t *new_ns_list;
230855e0ec89SJacek Kalwas 	nvme_active_ns_ctx_deleter deleter;
2309842ae79aSJacek Kalwas 
2310842ae79aSJacek Kalwas 	enum nvme_active_ns_state state;
2311842ae79aSJacek Kalwas };
2312842ae79aSJacek Kalwas 
2313842ae79aSJacek Kalwas static struct nvme_active_ns_ctx *
231455e0ec89SJacek Kalwas nvme_active_ns_ctx_create(struct spdk_nvme_ctrlr *ctrlr, nvme_active_ns_ctx_deleter deleter)
2315842ae79aSJacek Kalwas {
2316842ae79aSJacek Kalwas 	struct nvme_active_ns_ctx *ctx;
231706fbf4b3SEd Rodriguez 	uint32_t *new_ns_list = NULL;
231806fbf4b3SEd Rodriguez 
2319842ae79aSJacek Kalwas 	ctx = calloc(1, sizeof(*ctx));
2320842ae79aSJacek Kalwas 	if (!ctx) {
232101f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate nvme_active_ns_ctx!\n");
2322842ae79aSJacek Kalwas 		return NULL;
23235aace139SLiu Xiaodong 	}
232406fbf4b3SEd Rodriguez 
2325aebbce25SEvgeniy Kochetov 	new_ns_list = spdk_zmalloc(sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
2326aebbce25SEvgeniy Kochetov 				   NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
232706fbf4b3SEd Rodriguez 	if (!new_ns_list) {
232801f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate active_ns_list!\n");
2329842ae79aSJacek Kalwas 		free(ctx);
2330842ae79aSJacek Kalwas 		return NULL;
2331842ae79aSJacek Kalwas 	}
2332c4bb0ea6SDaniel Verkamp 
2333c7888feeSBen Walker 	ctx->page_count = 1;
2334842ae79aSJacek Kalwas 	ctx->new_ns_list = new_ns_list;
2335842ae79aSJacek Kalwas 	ctx->ctrlr = ctrlr;
233655e0ec89SJacek Kalwas 	ctx->deleter = deleter;
2337842ae79aSJacek Kalwas 
2338842ae79aSJacek Kalwas 	return ctx;
23398818ace2SAlexey Marchuk }
23408818ace2SAlexey Marchuk 
2341842ae79aSJacek Kalwas static void
2342842ae79aSJacek Kalwas nvme_active_ns_ctx_destroy(struct nvme_active_ns_ctx *ctx)
2343842ae79aSJacek Kalwas {
2344842ae79aSJacek Kalwas 	spdk_free(ctx->new_ns_list);
2345842ae79aSJacek Kalwas 	free(ctx);
2346842ae79aSJacek Kalwas }
2347842ae79aSJacek Kalwas 
2348f555a8afSBen Walker static int
2349f555a8afSBen Walker nvme_ctrlr_destruct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
2350f555a8afSBen Walker {
2351e7602c15SBen Walker 	struct spdk_nvme_ns tmp, *ns;
2352f555a8afSBen Walker 
2353f555a8afSBen Walker 	assert(ctrlr != NULL);
2354f555a8afSBen Walker 
2355e7602c15SBen Walker 	tmp.id = nsid;
2356e7602c15SBen Walker 	ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
2357b4dace73SBen Walker 	if (ns == NULL) {
2358e7602c15SBen Walker 		return -EINVAL;
2359b4dace73SBen Walker 	}
2360b4dace73SBen Walker 
2361f555a8afSBen Walker 	nvme_ns_destruct(ns);
2362517b5572SBen Walker 	ns->active = false;
2363f555a8afSBen Walker 
2364f555a8afSBen Walker 	return 0;
2365f555a8afSBen Walker }
2366f555a8afSBen Walker 
236793708637SBen Walker static int
236893708637SBen Walker nvme_ctrlr_construct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
236993708637SBen Walker {
237093708637SBen Walker 	struct spdk_nvme_ns *ns;
237193708637SBen Walker 
2372517b5572SBen Walker 	if (nsid < 1 || nsid > ctrlr->cdata.nn) {
237393708637SBen Walker 		return -EINVAL;
237493708637SBen Walker 	}
237593708637SBen Walker 
2376b4dace73SBen Walker 	/* Namespaces are constructed on demand, so simply request it. */
2377b4dace73SBen Walker 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
2378b4dace73SBen Walker 	if (ns == NULL) {
2379b4dace73SBen Walker 		return -ENOMEM;
2380b4dace73SBen Walker 	}
2381b4dace73SBen Walker 
2382517b5572SBen Walker 	ns->active = true;
2383517b5572SBen Walker 
2384b4dace73SBen Walker 	return 0;
238593708637SBen Walker }
238693708637SBen Walker 
2387842ae79aSJacek Kalwas static void
2388517b5572SBen Walker nvme_ctrlr_identify_active_ns_swap(struct spdk_nvme_ctrlr *ctrlr, uint32_t *new_ns_list,
2389c7888feeSBen Walker 				   size_t max_entries)
2390842ae79aSJacek Kalwas {
239184688fdbSBen Walker 	uint32_t active_ns_count = 0;
2392c7888feeSBen Walker 	size_t i;
2393517b5572SBen Walker 	uint32_t nsid;
2394517b5572SBen Walker 	struct spdk_nvme_ns *ns, *tmp_ns;
2395b4dace73SBen Walker 	int rc;
2396aebbce25SEvgeniy Kochetov 
2397b4dace73SBen Walker 	/* First, remove namespaces that no longer exist */
2398517b5572SBen Walker 	RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
2399517b5572SBen Walker 		nsid = new_ns_list[0];
2400b4dace73SBen Walker 		active_ns_count = 0;
2401517b5572SBen Walker 		while (nsid != 0) {
2402517b5572SBen Walker 			if (nsid == ns->id) {
2403c7888feeSBen Walker 				break;
2404c7888feeSBen Walker 			}
240584688fdbSBen Walker 
2406517b5572SBen Walker 			nsid = new_ns_list[active_ns_count++];
2407b4dace73SBen Walker 		}
2408b4dace73SBen Walker 
2409517b5572SBen Walker 		if (nsid != ns->id) {
2410b4dace73SBen Walker 			/* Did not find this namespace id in the new list. */
2411517b5572SBen Walker 			NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was removed\n", ns->id);
2412517b5572SBen Walker 			nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
2413b4dace73SBen Walker 		}
2414b4dace73SBen Walker 	}
2415b4dace73SBen Walker 
2416b4dace73SBen Walker 	/* Next, add new namespaces */
2417b4dace73SBen Walker 	active_ns_count = 0;
2418b4dace73SBen Walker 	for (i = 0; i < max_entries; i++) {
2419517b5572SBen Walker 		nsid = new_ns_list[active_ns_count];
2420b4dace73SBen Walker 
2421b4dace73SBen Walker 		if (nsid == 0) {
2422b4dace73SBen Walker 			break;
2423b4dace73SBen Walker 		}
2424b4dace73SBen Walker 
2425b4dace73SBen Walker 		/* If the namespace already exists, this will not construct it a second time. */
2426b4dace73SBen Walker 		rc = nvme_ctrlr_construct_namespace(ctrlr, nsid);
2427b4dace73SBen Walker 		if (rc != 0) {
2428b4dace73SBen Walker 			/* We can't easily handle a failure here. But just move on. */
2429b4dace73SBen Walker 			assert(false);
2430b4dace73SBen Walker 			NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to allocate a namespace object.\n");
2431b4dace73SBen Walker 			continue;
2432b4dace73SBen Walker 		}
2433b4dace73SBen Walker 
243484688fdbSBen Walker 		active_ns_count++;
2435c7888feeSBen Walker 	}
2436c7888feeSBen Walker 
243784688fdbSBen Walker 	ctrlr->active_ns_count = active_ns_count;
2438842ae79aSJacek Kalwas }
2439842ae79aSJacek Kalwas 
2440842ae79aSJacek Kalwas static void
2441842ae79aSJacek Kalwas nvme_ctrlr_identify_active_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
2442842ae79aSJacek Kalwas {
2443842ae79aSJacek Kalwas 	struct nvme_active_ns_ctx *ctx = arg;
2444aebbce25SEvgeniy Kochetov 	uint32_t *new_ns_list = NULL;
2445842ae79aSJacek Kalwas 
2446842ae79aSJacek Kalwas 	if (spdk_nvme_cpl_is_error(cpl)) {
2447842ae79aSJacek Kalwas 		ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
244855e0ec89SJacek Kalwas 		goto out;
2449842ae79aSJacek Kalwas 	}
2450842ae79aSJacek Kalwas 
2451c7888feeSBen Walker 	ctx->next_nsid = ctx->new_ns_list[1024 * ctx->page_count - 1];
2452aebbce25SEvgeniy Kochetov 	if (ctx->next_nsid == 0) {
2453842ae79aSJacek Kalwas 		ctx->state = NVME_ACTIVE_NS_STATE_DONE;
245455e0ec89SJacek Kalwas 		goto out;
2455842ae79aSJacek Kalwas 	}
2456842ae79aSJacek Kalwas 
2457c7888feeSBen Walker 	ctx->page_count++;
2458aebbce25SEvgeniy Kochetov 	new_ns_list = spdk_realloc(ctx->new_ns_list,
2459c7888feeSBen Walker 				   ctx->page_count * sizeof(struct spdk_nvme_ns_list),
2460aebbce25SEvgeniy Kochetov 				   ctx->ctrlr->page_size);
2461aebbce25SEvgeniy Kochetov 	if (!new_ns_list) {
2462aebbce25SEvgeniy Kochetov 		SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
2463aebbce25SEvgeniy Kochetov 		ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
2464aebbce25SEvgeniy Kochetov 		goto out;
2465aebbce25SEvgeniy Kochetov 	}
2466aebbce25SEvgeniy Kochetov 
2467aebbce25SEvgeniy Kochetov 	ctx->new_ns_list = new_ns_list;
2468842ae79aSJacek Kalwas 	nvme_ctrlr_identify_active_ns_async(ctx);
246955e0ec89SJacek Kalwas 	return;
247055e0ec89SJacek Kalwas 
247155e0ec89SJacek Kalwas out:
247255e0ec89SJacek Kalwas 	if (ctx->deleter) {
247355e0ec89SJacek Kalwas 		ctx->deleter(ctx);
247455e0ec89SJacek Kalwas 	}
2475842ae79aSJacek Kalwas }
2476842ae79aSJacek Kalwas 
2477842ae79aSJacek Kalwas static void
2478842ae79aSJacek Kalwas nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx)
2479842ae79aSJacek Kalwas {
2480842ae79aSJacek Kalwas 	struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
2481842ae79aSJacek Kalwas 	uint32_t i;
2482842ae79aSJacek Kalwas 	int rc;
2483842ae79aSJacek Kalwas 
2484aebbce25SEvgeniy Kochetov 	if (ctrlr->cdata.nn == 0) {
2485842ae79aSJacek Kalwas 		ctx->state = NVME_ACTIVE_NS_STATE_DONE;
248655e0ec89SJacek Kalwas 		goto out;
2487842ae79aSJacek Kalwas 	}
2488842ae79aSJacek Kalwas 
2489415d6640SGangCao 	assert(ctx->new_ns_list != NULL);
2490415d6640SGangCao 
249106fbf4b3SEd Rodriguez 	/*
2492842ae79aSJacek Kalwas 	 * If controller doesn't support active ns list CNS 0x02 dummy up
2493842ae79aSJacek Kalwas 	 * an active ns list, i.e. all namespaces report as active
249406fbf4b3SEd Rodriguez 	 */
2495842ae79aSJacek Kalwas 	if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 1, 0) || ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS) {
2496aebbce25SEvgeniy Kochetov 		uint32_t *new_ns_list;
2497aebbce25SEvgeniy Kochetov 
2498aebbce25SEvgeniy Kochetov 		/*
2499aebbce25SEvgeniy Kochetov 		 * Active NS list must always end with zero element.
2500aebbce25SEvgeniy Kochetov 		 * So, we allocate for cdata.nn+1.
2501aebbce25SEvgeniy Kochetov 		 */
2502c7888feeSBen Walker 		ctx->page_count = spdk_divide_round_up(ctrlr->cdata.nn + 1,
2503aebbce25SEvgeniy Kochetov 						       sizeof(struct spdk_nvme_ns_list) / sizeof(new_ns_list[0]));
2504aebbce25SEvgeniy Kochetov 		new_ns_list = spdk_realloc(ctx->new_ns_list,
2505c7888feeSBen Walker 					   ctx->page_count * sizeof(struct spdk_nvme_ns_list),
2506aebbce25SEvgeniy Kochetov 					   ctx->ctrlr->page_size);
2507aebbce25SEvgeniy Kochetov 		if (!new_ns_list) {
2508aebbce25SEvgeniy Kochetov 			SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
2509aebbce25SEvgeniy Kochetov 			ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
2510aebbce25SEvgeniy Kochetov 			goto out;
2511aebbce25SEvgeniy Kochetov 		}
2512aebbce25SEvgeniy Kochetov 
2513aebbce25SEvgeniy Kochetov 		ctx->new_ns_list = new_ns_list;
2514aebbce25SEvgeniy Kochetov 		ctx->new_ns_list[ctrlr->cdata.nn] = 0;
2515aebbce25SEvgeniy Kochetov 		for (i = 0; i < ctrlr->cdata.nn; i++) {
2516842ae79aSJacek Kalwas 			ctx->new_ns_list[i] = i + 1;
2517842ae79aSJacek Kalwas 		}
2518842ae79aSJacek Kalwas 
2519842ae79aSJacek Kalwas 		ctx->state = NVME_ACTIVE_NS_STATE_DONE;
252055e0ec89SJacek Kalwas 		goto out;
2521842ae79aSJacek Kalwas 	}
2522842ae79aSJacek Kalwas 
2523842ae79aSJacek Kalwas 	ctx->state = NVME_ACTIVE_NS_STATE_PROCESSING;
2524d1db0209SNiklas Cassel 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, ctx->next_nsid, 0,
2525c7888feeSBen Walker 				     &ctx->new_ns_list[1024 * (ctx->page_count - 1)], sizeof(struct spdk_nvme_ns_list),
2526842ae79aSJacek Kalwas 				     nvme_ctrlr_identify_active_ns_async_done, ctx);
252706fbf4b3SEd Rodriguez 	if (rc != 0) {
2528842ae79aSJacek Kalwas 		ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
252955e0ec89SJacek Kalwas 		goto out;
253006fbf4b3SEd Rodriguez 	}
253155e0ec89SJacek Kalwas 
253255e0ec89SJacek Kalwas 	return;
253355e0ec89SJacek Kalwas 
253455e0ec89SJacek Kalwas out:
253555e0ec89SJacek Kalwas 	if (ctx->deleter) {
253655e0ec89SJacek Kalwas 		ctx->deleter(ctx);
253755e0ec89SJacek Kalwas 	}
253855e0ec89SJacek Kalwas }
253955e0ec89SJacek Kalwas 
254055e0ec89SJacek Kalwas static void
254155e0ec89SJacek Kalwas _nvme_active_ns_ctx_deleter(struct nvme_active_ns_ctx *ctx)
254255e0ec89SJacek Kalwas {
254355e0ec89SJacek Kalwas 	struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
2544dbde5eddSBen Walker 	struct spdk_nvme_ns *ns;
254555e0ec89SJacek Kalwas 
254655e0ec89SJacek Kalwas 	if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
254755e0ec89SJacek Kalwas 		nvme_active_ns_ctx_destroy(ctx);
254855e0ec89SJacek Kalwas 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
254955e0ec89SJacek Kalwas 		return;
255055e0ec89SJacek Kalwas 	}
255155e0ec89SJacek Kalwas 
255255e0ec89SJacek Kalwas 	assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
255372eab360SBen Walker 
2554dbde5eddSBen Walker 	RB_FOREACH(ns, nvme_ns_tree, &ctrlr->ns) {
2555dbde5eddSBen Walker 		nvme_ns_free_iocs_specific_data(ns);
255672eab360SBen Walker 	}
255772eab360SBen Walker 
2558517b5572SBen Walker 	nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
255955e0ec89SJacek Kalwas 	nvme_active_ns_ctx_destroy(ctx);
256072eab360SBen Walker 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS, ctrlr->opts.admin_timeout_ms);
256155e0ec89SJacek Kalwas }
256255e0ec89SJacek Kalwas 
256355e0ec89SJacek Kalwas static void
256455e0ec89SJacek Kalwas _nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
256555e0ec89SJacek Kalwas {
256655e0ec89SJacek Kalwas 	struct nvme_active_ns_ctx *ctx;
256755e0ec89SJacek Kalwas 
256855e0ec89SJacek Kalwas 	ctx = nvme_active_ns_ctx_create(ctrlr, _nvme_active_ns_ctx_deleter);
256955e0ec89SJacek Kalwas 	if (!ctx) {
257055e0ec89SJacek Kalwas 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
257155e0ec89SJacek Kalwas 		return;
257255e0ec89SJacek Kalwas 	}
257355e0ec89SJacek Kalwas 
257455e0ec89SJacek Kalwas 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
257555e0ec89SJacek Kalwas 			     ctrlr->opts.admin_timeout_ms);
257655e0ec89SJacek Kalwas 	nvme_ctrlr_identify_active_ns_async(ctx);
257706fbf4b3SEd Rodriguez }
2578842ae79aSJacek Kalwas 
2579842ae79aSJacek Kalwas int
2580842ae79aSJacek Kalwas nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
2581842ae79aSJacek Kalwas {
2582842ae79aSJacek Kalwas 	struct nvme_active_ns_ctx *ctx;
2583842ae79aSJacek Kalwas 	int rc;
2584842ae79aSJacek Kalwas 
258555e0ec89SJacek Kalwas 	ctx = nvme_active_ns_ctx_create(ctrlr, NULL);
2586842ae79aSJacek Kalwas 	if (!ctx) {
2587842ae79aSJacek Kalwas 		return -ENOMEM;
2588842ae79aSJacek Kalwas 	}
2589842ae79aSJacek Kalwas 
2590842ae79aSJacek Kalwas 	nvme_ctrlr_identify_active_ns_async(ctx);
2591842ae79aSJacek Kalwas 	while (ctx->state == NVME_ACTIVE_NS_STATE_PROCESSING) {
2592842ae79aSJacek Kalwas 		rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
2593842ae79aSJacek Kalwas 		if (rc < 0) {
2594842ae79aSJacek Kalwas 			ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
259506fbf4b3SEd Rodriguez 			break;
259606fbf4b3SEd Rodriguez 		}
259706fbf4b3SEd Rodriguez 	}
259806fbf4b3SEd Rodriguez 
2599842ae79aSJacek Kalwas 	if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
2600842ae79aSJacek Kalwas 		nvme_active_ns_ctx_destroy(ctx);
2601842ae79aSJacek Kalwas 		return -ENXIO;
260206fbf4b3SEd Rodriguez 	}
260306fbf4b3SEd Rodriguez 
2604842ae79aSJacek Kalwas 	assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
2605517b5572SBen Walker 	nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
2606842ae79aSJacek Kalwas 	nvme_active_ns_ctx_destroy(ctx);
260706fbf4b3SEd Rodriguez 
260806fbf4b3SEd Rodriguez 	return 0;
260906fbf4b3SEd Rodriguez }
261006fbf4b3SEd Rodriguez 
261192bf76c9SChangpeng Liu static void
261292bf76c9SChangpeng Liu nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
261392bf76c9SChangpeng Liu {
261492bf76c9SChangpeng Liu 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
261592bf76c9SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
261692bf76c9SChangpeng Liu 	uint32_t nsid;
261792bf76c9SChangpeng Liu 	int rc;
261892bf76c9SChangpeng Liu 
261992bf76c9SChangpeng Liu 	if (spdk_nvme_cpl_is_error(cpl)) {
262092bf76c9SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
262192bf76c9SChangpeng Liu 		return;
262292bf76c9SChangpeng Liu 	}
262392bf76c9SChangpeng Liu 
2624a7de7deeSNiklas Cassel 	nvme_ns_set_identify_data(ns);
2625a7de7deeSNiklas Cassel 
262692bf76c9SChangpeng Liu 	/* move on to the next active NS */
262792bf76c9SChangpeng Liu 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
262892bf76c9SChangpeng Liu 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
262992bf76c9SChangpeng Liu 	if (ns == NULL) {
26302706cd42SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
26312706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
263292bf76c9SChangpeng Liu 		return;
263392bf76c9SChangpeng Liu 	}
263492bf76c9SChangpeng Liu 	ns->ctrlr = ctrlr;
263592bf76c9SChangpeng Liu 	ns->id = nsid;
263692bf76c9SChangpeng Liu 
263792bf76c9SChangpeng Liu 	rc = nvme_ctrlr_identify_ns_async(ns);
263892bf76c9SChangpeng Liu 	if (rc) {
263992bf76c9SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
264092bf76c9SChangpeng Liu 	}
264192bf76c9SChangpeng Liu }
264292bf76c9SChangpeng Liu 
264392bf76c9SChangpeng Liu static int
264492bf76c9SChangpeng Liu nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
264592bf76c9SChangpeng Liu {
264692bf76c9SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
264792bf76c9SChangpeng Liu 	struct spdk_nvme_ns_data *nsdata;
264892bf76c9SChangpeng Liu 
26493a8852d3SEvgeniy Kochetov 	nsdata = &ns->nsdata;
265092bf76c9SChangpeng Liu 
26512706cd42SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
26522706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
2653d1db0209SNiklas Cassel 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id, 0,
265492bf76c9SChangpeng Liu 				       nsdata, sizeof(*nsdata),
265592bf76c9SChangpeng Liu 				       nvme_ctrlr_identify_ns_async_done, ns);
265692bf76c9SChangpeng Liu }
265792bf76c9SChangpeng Liu 
2658d9ecb572SChangpeng Liu static int
2659d9ecb572SChangpeng Liu nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
2660d9ecb572SChangpeng Liu {
2661d9ecb572SChangpeng Liu 	uint32_t nsid;
2662d9ecb572SChangpeng Liu 	struct spdk_nvme_ns *ns;
2663d9ecb572SChangpeng Liu 	int rc;
2664d9ecb572SChangpeng Liu 
266592bf76c9SChangpeng Liu 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
2666d9ecb572SChangpeng Liu 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
2667d9ecb572SChangpeng Liu 	if (ns == NULL) {
266892bf76c9SChangpeng Liu 		/* No active NS, move on to the next state */
26692910ba6cSNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
26702706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
267192bf76c9SChangpeng Liu 		return 0;
2672d9ecb572SChangpeng Liu 	}
2673d9ecb572SChangpeng Liu 
2674d9ecb572SChangpeng Liu 	ns->ctrlr = ctrlr;
2675d9ecb572SChangpeng Liu 	ns->id = nsid;
2676d9ecb572SChangpeng Liu 
267792bf76c9SChangpeng Liu 	rc = nvme_ctrlr_identify_ns_async(ns);
267892bf76c9SChangpeng Liu 	if (rc) {
267992bf76c9SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2680d9ecb572SChangpeng Liu 	}
2681d9ecb572SChangpeng Liu 
268292bf76c9SChangpeng Liu 	return rc;
2683d9ecb572SChangpeng Liu }
2684d9ecb572SChangpeng Liu 
2685c4d1b7d5SNiklas Cassel static int
2686c4d1b7d5SNiklas Cassel nvme_ctrlr_identify_namespaces_iocs_specific_next(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
2687c4d1b7d5SNiklas Cassel {
2688c4d1b7d5SNiklas Cassel 	uint32_t nsid;
2689c4d1b7d5SNiklas Cassel 	struct spdk_nvme_ns *ns;
2690c4d1b7d5SNiklas Cassel 	int rc;
2691c4d1b7d5SNiklas Cassel 
2692c4d1b7d5SNiklas Cassel 	if (!prev_nsid) {
2693c4d1b7d5SNiklas Cassel 		nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
2694c4d1b7d5SNiklas Cassel 	} else {
2695c4d1b7d5SNiklas Cassel 		/* move on to the next active NS */
2696c4d1b7d5SNiklas Cassel 		nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, prev_nsid);
2697c4d1b7d5SNiklas Cassel 	}
2698c4d1b7d5SNiklas Cassel 
2699c4d1b7d5SNiklas Cassel 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
2700c4d1b7d5SNiklas Cassel 	if (ns == NULL) {
27012910ba6cSNiklas Cassel 		/* No first/next active NS, move on to the next state */
27027e68d0baSJim Harris 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
2703c4d1b7d5SNiklas Cassel 				     ctrlr->opts.admin_timeout_ms);
2704c4d1b7d5SNiklas Cassel 		return 0;
2705c4d1b7d5SNiklas Cassel 	}
2706c4d1b7d5SNiklas Cassel 
2707c4d1b7d5SNiklas Cassel 	/* loop until we find a ns which has (supported) iocs specific data */
2708c4d1b7d5SNiklas Cassel 	while (!nvme_ns_has_supported_iocs_specific_data(ns)) {
2709c4d1b7d5SNiklas Cassel 		nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
2710c4d1b7d5SNiklas Cassel 		ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
2711c4d1b7d5SNiklas Cassel 		if (ns == NULL) {
2712c4d1b7d5SNiklas Cassel 			/* no namespace with (supported) iocs specific data found */
27137e68d0baSJim Harris 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
2714c4d1b7d5SNiklas Cassel 					     ctrlr->opts.admin_timeout_ms);
2715c4d1b7d5SNiklas Cassel 			return 0;
2716c4d1b7d5SNiklas Cassel 		}
2717c4d1b7d5SNiklas Cassel 	}
2718c4d1b7d5SNiklas Cassel 
2719c4d1b7d5SNiklas Cassel 	rc = nvme_ctrlr_identify_ns_iocs_specific_async(ns);
2720c4d1b7d5SNiklas Cassel 	if (rc) {
2721c4d1b7d5SNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2722c4d1b7d5SNiklas Cassel 	}
2723c4d1b7d5SNiklas Cassel 
2724c4d1b7d5SNiklas Cassel 	return rc;
2725c4d1b7d5SNiklas Cassel }
2726c4d1b7d5SNiklas Cassel 
2727c4d1b7d5SNiklas Cassel static void
2728c4d1b7d5SNiklas Cassel nvme_ctrlr_identify_ns_zns_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
2729c4d1b7d5SNiklas Cassel {
2730c4d1b7d5SNiklas Cassel 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
2731c4d1b7d5SNiklas Cassel 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
2732c4d1b7d5SNiklas Cassel 
2733c4d1b7d5SNiklas Cassel 	if (spdk_nvme_cpl_is_error(cpl)) {
2734c4d1b7d5SNiklas Cassel 		nvme_ns_free_zns_specific_data(ns);
2735c4d1b7d5SNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2736c4d1b7d5SNiklas Cassel 		return;
2737c4d1b7d5SNiklas Cassel 	}
2738c4d1b7d5SNiklas Cassel 
2739c4d1b7d5SNiklas Cassel 	nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
2740c4d1b7d5SNiklas Cassel }
2741c4d1b7d5SNiklas Cassel 
2742c4d1b7d5SNiklas Cassel static int
2743457d0f6dSShuhei Matsumoto nvme_ctrlr_identify_ns_zns_specific_async(struct spdk_nvme_ns *ns)
2744c4d1b7d5SNiklas Cassel {
2745c4d1b7d5SNiklas Cassel 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
2746c4d1b7d5SNiklas Cassel 	int rc;
2747c4d1b7d5SNiklas Cassel 
274860057756SEvgeniy Kochetov 	assert(!ns->nsdata_zns);
2749186b109dSJim Harris 	ns->nsdata_zns = spdk_zmalloc(sizeof(*ns->nsdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
275060057756SEvgeniy Kochetov 				      SPDK_MALLOC_SHARE);
275160057756SEvgeniy Kochetov 	if (!ns->nsdata_zns) {
2752c4d1b7d5SNiklas Cassel 		return -ENOMEM;
2753c4d1b7d5SNiklas Cassel 	}
2754c4d1b7d5SNiklas Cassel 
2755c4d1b7d5SNiklas Cassel 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
2756c4d1b7d5SNiklas Cassel 			     ctrlr->opts.admin_timeout_ms);
2757c4d1b7d5SNiklas Cassel 	rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
275860057756SEvgeniy Kochetov 				     ns->nsdata_zns, sizeof(*ns->nsdata_zns),
2759c4d1b7d5SNiklas Cassel 				     nvme_ctrlr_identify_ns_zns_specific_async_done, ns);
2760c4d1b7d5SNiklas Cassel 	if (rc) {
2761c4d1b7d5SNiklas Cassel 		nvme_ns_free_zns_specific_data(ns);
2762c4d1b7d5SNiklas Cassel 	}
2763c4d1b7d5SNiklas Cassel 
2764c4d1b7d5SNiklas Cassel 	return rc;
2765c4d1b7d5SNiklas Cassel }
2766c4d1b7d5SNiklas Cassel 
27674629dfb5SShuhei Matsumoto static void
27684629dfb5SShuhei Matsumoto nvme_ctrlr_identify_ns_nvm_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
27694629dfb5SShuhei Matsumoto {
27704629dfb5SShuhei Matsumoto 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
27714629dfb5SShuhei Matsumoto 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
27724629dfb5SShuhei Matsumoto 
27734629dfb5SShuhei Matsumoto 	if (spdk_nvme_cpl_is_error(cpl)) {
27744629dfb5SShuhei Matsumoto 		nvme_ns_free_nvm_specific_data(ns);
27754629dfb5SShuhei Matsumoto 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
27764629dfb5SShuhei Matsumoto 		return;
27774629dfb5SShuhei Matsumoto 	}
27784629dfb5SShuhei Matsumoto 
27794629dfb5SShuhei Matsumoto 	nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
27804629dfb5SShuhei Matsumoto }
27814629dfb5SShuhei Matsumoto 
27824629dfb5SShuhei Matsumoto static int
27834629dfb5SShuhei Matsumoto nvme_ctrlr_identify_ns_nvm_specific_async(struct spdk_nvme_ns *ns)
27844629dfb5SShuhei Matsumoto {
27854629dfb5SShuhei Matsumoto 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
27864629dfb5SShuhei Matsumoto 	int rc;
27874629dfb5SShuhei Matsumoto 
27884629dfb5SShuhei Matsumoto 	assert(!ns->nsdata_nvm);
2789186b109dSJim Harris 	ns->nsdata_nvm = spdk_zmalloc(sizeof(*ns->nsdata_nvm), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
27904629dfb5SShuhei Matsumoto 				      SPDK_MALLOC_SHARE);
27914629dfb5SShuhei Matsumoto 	if (!ns->nsdata_nvm) {
27924629dfb5SShuhei Matsumoto 		return -ENOMEM;
27934629dfb5SShuhei Matsumoto 	}
27944629dfb5SShuhei Matsumoto 
27954629dfb5SShuhei Matsumoto 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
27964629dfb5SShuhei Matsumoto 			     ctrlr->opts.admin_timeout_ms);
27974629dfb5SShuhei Matsumoto 	rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
27984629dfb5SShuhei Matsumoto 				     ns->nsdata_nvm, sizeof(*ns->nsdata_nvm),
27994629dfb5SShuhei Matsumoto 				     nvme_ctrlr_identify_ns_nvm_specific_async_done, ns);
28004629dfb5SShuhei Matsumoto 	if (rc) {
28014629dfb5SShuhei Matsumoto 		nvme_ns_free_nvm_specific_data(ns);
28024629dfb5SShuhei Matsumoto 	}
28034629dfb5SShuhei Matsumoto 
28044629dfb5SShuhei Matsumoto 	return rc;
28054629dfb5SShuhei Matsumoto }
28064629dfb5SShuhei Matsumoto 
2807c4d1b7d5SNiklas Cassel static int
2808457d0f6dSShuhei Matsumoto nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns)
2809457d0f6dSShuhei Matsumoto {
2810457d0f6dSShuhei Matsumoto 	switch (ns->csi) {
2811457d0f6dSShuhei Matsumoto 	case SPDK_NVME_CSI_ZNS:
2812457d0f6dSShuhei Matsumoto 		return nvme_ctrlr_identify_ns_zns_specific_async(ns);
28134629dfb5SShuhei Matsumoto 	case SPDK_NVME_CSI_NVM:
28144629dfb5SShuhei Matsumoto 		if (ns->ctrlr->cdata.ctratt.bits.elbas) {
28154629dfb5SShuhei Matsumoto 			return nvme_ctrlr_identify_ns_nvm_specific_async(ns);
28164629dfb5SShuhei Matsumoto 		}
28174629dfb5SShuhei Matsumoto 	/* fallthrough */
2818457d0f6dSShuhei Matsumoto 	default:
2819457d0f6dSShuhei Matsumoto 		/*
2820457d0f6dSShuhei Matsumoto 		 * This switch must handle all cases for which
2821457d0f6dSShuhei Matsumoto 		 * nvme_ns_has_supported_iocs_specific_data() returns true,
2822457d0f6dSShuhei Matsumoto 		 * other cases should never happen.
2823457d0f6dSShuhei Matsumoto 		 */
2824457d0f6dSShuhei Matsumoto 		assert(0);
2825457d0f6dSShuhei Matsumoto 	}
2826457d0f6dSShuhei Matsumoto 
2827457d0f6dSShuhei Matsumoto 	return -EINVAL;
2828457d0f6dSShuhei Matsumoto }
2829457d0f6dSShuhei Matsumoto 
2830457d0f6dSShuhei Matsumoto static int
2831c4d1b7d5SNiklas Cassel nvme_ctrlr_identify_namespaces_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
2832c4d1b7d5SNiklas Cassel {
2833c4d1b7d5SNiklas Cassel 	if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
28342910ba6cSNiklas Cassel 		/* Multi IOCS not supported/enabled, move on to the next state */
28357e68d0baSJim Harris 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
2836c4d1b7d5SNiklas Cassel 				     ctrlr->opts.admin_timeout_ms);
2837c4d1b7d5SNiklas Cassel 		return 0;
2838c4d1b7d5SNiklas Cassel 	}
2839c4d1b7d5SNiklas Cassel 
2840c4d1b7d5SNiklas Cassel 	return nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, 0);
2841c4d1b7d5SNiklas Cassel }
2842c4d1b7d5SNiklas Cassel 
2843a2fdc4ddSChangpeng Liu static void
2844a2fdc4ddSChangpeng Liu nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
2845a2fdc4ddSChangpeng Liu {
2846a2fdc4ddSChangpeng Liu 	struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
2847a2fdc4ddSChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
2848a2fdc4ddSChangpeng Liu 	uint32_t nsid;
2849a2fdc4ddSChangpeng Liu 	int rc;
2850a2fdc4ddSChangpeng Liu 
2851a2fdc4ddSChangpeng Liu 	if (spdk_nvme_cpl_is_error(cpl)) {
28522910ba6cSNiklas Cassel 		/*
28532910ba6cSNiklas Cassel 		 * Many controllers claim to be compatible with NVMe 1.3, however,
28542910ba6cSNiklas Cassel 		 * they do not implement NS ID Desc List. Therefore, instead of setting
28552910ba6cSNiklas Cassel 		 * the state to NVME_CTRLR_STATE_ERROR, silently ignore the completion
28562910ba6cSNiklas Cassel 		 * error and move on to the next state.
28572910ba6cSNiklas Cassel 		 *
28582910ba6cSNiklas Cassel 		 * The proper way is to create a new quirk for controllers that violate
28592910ba6cSNiklas Cassel 		 * the NVMe 1.3 spec by not supporting NS ID Desc List.
28602910ba6cSNiklas Cassel 		 * (Re-using the NVME_QUIRK_IDENTIFY_CNS quirk is not possible, since
28612910ba6cSNiklas Cassel 		 * it is too generic and was added in order to handle controllers that
28622910ba6cSNiklas Cassel 		 * violate the NVMe 1.1 spec by not supporting ACTIVE LIST).
28632910ba6cSNiklas Cassel 		 */
28642910ba6cSNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
28652706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
2866a2fdc4ddSChangpeng Liu 		return;
2867a2fdc4ddSChangpeng Liu 	}
2868a2fdc4ddSChangpeng Liu 
2869743271d9SNiklas Cassel 	nvme_ns_set_id_desc_list_data(ns);
2870743271d9SNiklas Cassel 
2871a2fdc4ddSChangpeng Liu 	/* move on to the next active NS */
2872a2fdc4ddSChangpeng Liu 	nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
2873a2fdc4ddSChangpeng Liu 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
2874a2fdc4ddSChangpeng Liu 	if (ns == NULL) {
2875c4d1b7d5SNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
28762706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
2877a2fdc4ddSChangpeng Liu 		return;
2878a2fdc4ddSChangpeng Liu 	}
2879a2fdc4ddSChangpeng Liu 
2880a2fdc4ddSChangpeng Liu 	rc = nvme_ctrlr_identify_id_desc_async(ns);
2881a2fdc4ddSChangpeng Liu 	if (rc) {
2882a2fdc4ddSChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2883a2fdc4ddSChangpeng Liu 	}
2884a2fdc4ddSChangpeng Liu }
2885a2fdc4ddSChangpeng Liu 
2886a2fdc4ddSChangpeng Liu static int
2887a2fdc4ddSChangpeng Liu nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
2888a2fdc4ddSChangpeng Liu {
2889a2fdc4ddSChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
2890a2fdc4ddSChangpeng Liu 
2891a2fdc4ddSChangpeng Liu 	memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
2892a2fdc4ddSChangpeng Liu 
28932706cd42SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
28942706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
2895a2fdc4ddSChangpeng Liu 	return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
2896d1db0209SNiklas Cassel 				       0, ns->id, 0, ns->id_desc_list, sizeof(ns->id_desc_list),
2897a2fdc4ddSChangpeng Liu 				       nvme_ctrlr_identify_id_desc_async_done, ns);
2898a2fdc4ddSChangpeng Liu }
2899a2fdc4ddSChangpeng Liu 
2900d9ecb572SChangpeng Liu static int
2901d9ecb572SChangpeng Liu nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
2902d9ecb572SChangpeng Liu {
2903d9ecb572SChangpeng Liu 	uint32_t nsid;
2904d9ecb572SChangpeng Liu 	struct spdk_nvme_ns *ns;
2905d9ecb572SChangpeng Liu 	int rc;
2906d9ecb572SChangpeng Liu 
2907174a5fe1SNiklas Cassel 	if ((ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) &&
2908174a5fe1SNiklas Cassel 	     !(ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS)) ||
2909a2fdc4ddSChangpeng Liu 	    (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
291001f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
29112910ba6cSNiklas Cassel 		/* NS ID Desc List not supported, move on to the next state */
29122910ba6cSNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
29132706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
2914a2fdc4ddSChangpeng Liu 		return 0;
2915d9ecb572SChangpeng Liu 	}
2916d9ecb572SChangpeng Liu 
2917a2fdc4ddSChangpeng Liu 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
2918a2fdc4ddSChangpeng Liu 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
2919a2fdc4ddSChangpeng Liu 	if (ns == NULL) {
2920a2fdc4ddSChangpeng Liu 		/* No active NS, move on to the next state */
29212910ba6cSNiklas Cassel 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
29222706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
2923d9ecb572SChangpeng Liu 		return 0;
2924d9ecb572SChangpeng Liu 	}
2925d9ecb572SChangpeng Liu 
2926a2fdc4ddSChangpeng Liu 	rc = nvme_ctrlr_identify_id_desc_async(ns);
2927a2fdc4ddSChangpeng Liu 	if (rc) {
2928a2fdc4ddSChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
2929a2fdc4ddSChangpeng Liu 	}
2930a2fdc4ddSChangpeng Liu 
2931a2fdc4ddSChangpeng Liu 	return rc;
2932a2fdc4ddSChangpeng Liu }
2933a2fdc4ddSChangpeng Liu 
293438a396d9SChangpeng Liu static void
2935c3ba9127SAlexey Marchuk nvme_ctrlr_update_nvmf_ioccsz(struct spdk_nvme_ctrlr *ctrlr)
2936c3ba9127SAlexey Marchuk {
2937227d83e2SShuhei Matsumoto 	if (spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
2938c3ba9127SAlexey Marchuk 		if (ctrlr->cdata.nvmf_specific.ioccsz < 4) {
293901f45ecdSGangCao 			NVME_CTRLR_ERRLOG(ctrlr, "Incorrect IOCCSZ %u, the minimum value should be 4\n",
2940c3ba9127SAlexey Marchuk 					  ctrlr->cdata.nvmf_specific.ioccsz);
2941c3ba9127SAlexey Marchuk 			ctrlr->cdata.nvmf_specific.ioccsz = 4;
2942c3ba9127SAlexey Marchuk 			assert(0);
2943c3ba9127SAlexey Marchuk 		}
2944c3ba9127SAlexey Marchuk 		ctrlr->ioccsz_bytes = ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd);
2945c3ba9127SAlexey Marchuk 		ctrlr->icdoff = ctrlr->cdata.nvmf_specific.icdoff;
2946c3ba9127SAlexey Marchuk 	}
2947c3ba9127SAlexey Marchuk }
2948c3ba9127SAlexey Marchuk 
294938a396d9SChangpeng Liu static void
29502ca71169SEvgeniy Kochetov nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
29518b95dbabSChangpeng Liu {
29528b95dbabSChangpeng Liu 	uint32_t cq_allocated, sq_allocated, min_allocated, i;
295338a396d9SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
29548b95dbabSChangpeng Liu 
295538a396d9SChangpeng Liu 	if (spdk_nvme_cpl_is_error(cpl)) {
295601f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Number of Queues failed!\n");
2957c9ef7642SDaniel Verkamp 		ctrlr->opts.num_io_queues = 0;
2958c9ef7642SDaniel Verkamp 	} else {
29591010fb3aSDaniel Verkamp 		/*
29601010fb3aSDaniel Verkamp 		 * Data in cdw0 is 0-based.
29611010fb3aSDaniel Verkamp 		 * Lower 16-bits indicate number of submission queues allocated.
29621010fb3aSDaniel Verkamp 		 * Upper 16-bits indicate number of completion queues allocated.
29631010fb3aSDaniel Verkamp 		 */
296438a396d9SChangpeng Liu 		sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
296538a396d9SChangpeng Liu 		cq_allocated = (cpl->cdw0 >> 16) + 1;
29661010fb3aSDaniel Verkamp 
2967305088e5SKonstantin Vyshetsky 		/*
2968305088e5SKonstantin Vyshetsky 		 * For 1:1 queue mapping, set number of allocated queues to be minimum of
2969305088e5SKonstantin Vyshetsky 		 * submission and completion queues.
2970305088e5SKonstantin Vyshetsky 		 */
2971305088e5SKonstantin Vyshetsky 		min_allocated = spdk_min(sq_allocated, cq_allocated);
2972305088e5SKonstantin Vyshetsky 
2973305088e5SKonstantin Vyshetsky 		/* Set number of queues to be minimum of requested and actually allocated. */
2974305088e5SKonstantin Vyshetsky 		ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
29753ab7a1f6SAnkit Kumar 
29763ab7a1f6SAnkit Kumar 		if (ctrlr->opts.enable_interrupts) {
29773ab7a1f6SAnkit Kumar 			ctrlr->opts.num_io_queues = spdk_min(MAX_IO_QUEUES_WITH_INTERRUPTS,
29783ab7a1f6SAnkit Kumar 							     ctrlr->opts.num_io_queues);
29793ab7a1f6SAnkit Kumar 			if (nvme_transport_ctrlr_enable_interrupts(ctrlr) < 0) {
29803ab7a1f6SAnkit Kumar 				NVME_CTRLR_ERRLOG(ctrlr, "Failed to enable interrupts!\n");
29813ab7a1f6SAnkit Kumar 				ctrlr->opts.enable_interrupts = false;
29823ab7a1f6SAnkit Kumar 			}
29833ab7a1f6SAnkit Kumar 		}
2984c9ef7642SDaniel Verkamp 	}
29851010fb3aSDaniel Verkamp 
2986c194ebd8SDaniel Verkamp 	ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
2987c194ebd8SDaniel Verkamp 	if (ctrlr->free_io_qids == NULL) {
298838a396d9SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
298938a396d9SChangpeng Liu 		return;
2990c194ebd8SDaniel Verkamp 	}
2991c194ebd8SDaniel Verkamp 
29924e06de69SJacek Kalwas 	/* Initialize list of free I/O queue IDs. QID 0 is the admin queue (implicitly allocated). */
2993c194ebd8SDaniel Verkamp 	for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
29944e06de69SJacek Kalwas 		spdk_nvme_ctrlr_free_qid(ctrlr, i);
2995c194ebd8SDaniel Verkamp 	}
29964e06de69SJacek Kalwas 
299765ff0771SEvgeniy Kochetov 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
29982706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
299938a396d9SChangpeng Liu }
300038a396d9SChangpeng Liu 
300138a396d9SChangpeng Liu static int
30022ca71169SEvgeniy Kochetov nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
300338a396d9SChangpeng Liu {
300438a396d9SChangpeng Liu 	int rc;
300538a396d9SChangpeng Liu 
30062ca71169SEvgeniy Kochetov 	if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
300701f45ecdSGangCao 		NVME_CTRLR_NOTICELOG(ctrlr, "Limiting requested num_io_queues %u to max %d\n",
30082ca71169SEvgeniy Kochetov 				     ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
30092ca71169SEvgeniy Kochetov 		ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
30102ca71169SEvgeniy Kochetov 	} else if (ctrlr->opts.num_io_queues < 1) {
301101f45ecdSGangCao 		NVME_CTRLR_NOTICELOG(ctrlr, "Requested num_io_queues 0, increasing to 1\n");
30122ca71169SEvgeniy Kochetov 		ctrlr->opts.num_io_queues = 1;
30132ca71169SEvgeniy Kochetov 	}
30142ca71169SEvgeniy Kochetov 
30152ca71169SEvgeniy Kochetov 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
30162706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
301738a396d9SChangpeng Liu 
30182ca71169SEvgeniy Kochetov 	rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
30192ca71169SEvgeniy Kochetov 					   nvme_ctrlr_set_num_queues_done, ctrlr);
302038a396d9SChangpeng Liu 	if (rc != 0) {
302138a396d9SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
302238a396d9SChangpeng Liu 		return rc;
302338a396d9SChangpeng Liu 	}
3024c194ebd8SDaniel Verkamp 
30251010fb3aSDaniel Verkamp 	return 0;
30261010fb3aSDaniel Verkamp }
30271010fb3aSDaniel Verkamp 
30288db29979SChangpeng Liu static void
30298db29979SChangpeng Liu nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
30308db29979SChangpeng Liu {
3031ba773a54SJin Yu 	uint32_t keep_alive_interval_us;
30328db29979SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
30338db29979SChangpeng Liu 
3034956bb46cSChangpeng Liu 	if (spdk_nvme_cpl_is_error(cpl)) {
30354b4b3ccaSEvgeniy Kochetov 		if ((cpl->status.sct == SPDK_NVME_SCT_GENERIC) &&
30364b4b3ccaSEvgeniy Kochetov 		    (cpl->status.sc == SPDK_NVME_SC_INVALID_FIELD)) {
303701f45ecdSGangCao 			NVME_CTRLR_DEBUGLOG(ctrlr, "Keep alive timeout Get Feature is not supported\n");
30384b4b3ccaSEvgeniy Kochetov 		} else {
303901f45ecdSGangCao 			NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: SC %x SCT %x\n",
3040956bb46cSChangpeng Liu 					  cpl->status.sc, cpl->status.sct);
3041956bb46cSChangpeng Liu 			ctrlr->opts.keep_alive_timeout_ms = 0;
3042956bb46cSChangpeng Liu 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
3043956bb46cSChangpeng Liu 			return;
3044956bb46cSChangpeng Liu 		}
30454b4b3ccaSEvgeniy Kochetov 	} else {
30468db29979SChangpeng Liu 		if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
304701f45ecdSGangCao 			NVME_CTRLR_DEBUGLOG(ctrlr, "Controller adjusted keep alive timeout to %u ms\n",
30488db29979SChangpeng Liu 					    cpl->cdw0);
30498db29979SChangpeng Liu 		}
30508db29979SChangpeng Liu 
30518db29979SChangpeng Liu 		ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
30524b4b3ccaSEvgeniy Kochetov 	}
30538db29979SChangpeng Liu 
3054ba773a54SJin Yu 	if (ctrlr->opts.keep_alive_timeout_ms == 0) {
3055ba773a54SJin Yu 		ctrlr->keep_alive_interval_ticks = 0;
3056ba773a54SJin Yu 	} else {
3057ba773a54SJin Yu 		keep_alive_interval_us = ctrlr->opts.keep_alive_timeout_ms * 1000 / 2;
30588db29979SChangpeng Liu 
305901f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Sending keep alive every %u us\n", keep_alive_interval_us);
3060ba773a54SJin Yu 
3061ba773a54SJin Yu 		ctrlr->keep_alive_interval_ticks = (keep_alive_interval_us * spdk_get_ticks_hz()) /
3062ba773a54SJin Yu 						   UINT64_C(1000000);
30638db29979SChangpeng Liu 
30648db29979SChangpeng Liu 		/* Schedule the first Keep Alive to be sent as soon as possible. */
30658db29979SChangpeng Liu 		ctrlr->next_keep_alive_tick = spdk_get_ticks();
3066ba773a54SJin Yu 	}
3067ba773a54SJin Yu 
30681c083e62SJim Harris 	if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
30691c083e62SJim Harris 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
30701c083e62SJim Harris 	} else {
30711c083e62SJim Harris 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
30722706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
30738db29979SChangpeng Liu 	}
30741c083e62SJim Harris }
30758db29979SChangpeng Liu 
307693de96b4SDaniel Verkamp static int
307793de96b4SDaniel Verkamp nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
307893de96b4SDaniel Verkamp {
307993de96b4SDaniel Verkamp 	int rc;
308093de96b4SDaniel Verkamp 
308193de96b4SDaniel Verkamp 	if (ctrlr->opts.keep_alive_timeout_ms == 0) {
30821c083e62SJim Harris 		if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
30831c083e62SJim Harris 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
30841c083e62SJim Harris 		} else {
30851c083e62SJim Harris 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
30862706cd42SChangpeng Liu 					     ctrlr->opts.admin_timeout_ms);
30871c083e62SJim Harris 		}
308893de96b4SDaniel Verkamp 		return 0;
308993de96b4SDaniel Verkamp 	}
309093de96b4SDaniel Verkamp 
30911c083e62SJim Harris 	/* Note: Discovery controller identify data does not populate KAS according to spec. */
30921c083e62SJim Harris 	if (!spdk_nvme_ctrlr_is_discovery(ctrlr) && ctrlr->cdata.kas == 0) {
309301f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Controller KAS is 0 - not enabling Keep Alive\n");
309493de96b4SDaniel Verkamp 		ctrlr->opts.keep_alive_timeout_ms = 0;
30951c083e62SJim Harris 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
30962706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
309793de96b4SDaniel Verkamp 		return 0;
309893de96b4SDaniel Verkamp 	}
309993de96b4SDaniel Verkamp 
31002706cd42SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
31012706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
3102956bb46cSChangpeng Liu 
310393de96b4SDaniel Verkamp 	/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
310493de96b4SDaniel Verkamp 	rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
3105956bb46cSChangpeng Liu 					     nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
310693de96b4SDaniel Verkamp 	if (rc != 0) {
310701f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: %d\n", rc);
310893de96b4SDaniel Verkamp 		ctrlr->opts.keep_alive_timeout_ms = 0;
3109956bb46cSChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
311093de96b4SDaniel Verkamp 		return rc;
311193de96b4SDaniel Verkamp 	}
311293de96b4SDaniel Verkamp 
311393de96b4SDaniel Verkamp 	return 0;
311493de96b4SDaniel Verkamp }
311593de96b4SDaniel Verkamp 
311698b19709SChangpeng Liu static void
311798b19709SChangpeng Liu nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
311898b19709SChangpeng Liu {
311998b19709SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
312098b19709SChangpeng Liu 
312198b19709SChangpeng Liu 	if (spdk_nvme_cpl_is_error(cpl)) {
312298b19709SChangpeng Liu 		/*
312398b19709SChangpeng Liu 		 * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
312498b19709SChangpeng Liu 		 * is optional.
312598b19709SChangpeng Liu 		 */
312601f45ecdSGangCao 		NVME_CTRLR_WARNLOG(ctrlr, "Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
312798b19709SChangpeng Liu 				   cpl->status.sc, cpl->status.sct);
312898b19709SChangpeng Liu 	} else {
312901f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Set Features - Host ID was successful\n");
313098b19709SChangpeng Liu 	}
313198b19709SChangpeng Liu 
31323dd0bc9eSEvgeniy Kochetov 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
313398b19709SChangpeng Liu }
313498b19709SChangpeng Liu 
31358acc4ae5SDaniel Verkamp static int
31368acc4ae5SDaniel Verkamp nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
31378acc4ae5SDaniel Verkamp {
31388acc4ae5SDaniel Verkamp 	uint8_t *host_id;
31398acc4ae5SDaniel Verkamp 	uint32_t host_id_size;
31408acc4ae5SDaniel Verkamp 	int rc;
31418acc4ae5SDaniel Verkamp 
31428acc4ae5SDaniel Verkamp 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
31438acc4ae5SDaniel Verkamp 		/*
31448acc4ae5SDaniel Verkamp 		 * NVMe-oF sends the host ID during Connect and doesn't allow
31458acc4ae5SDaniel Verkamp 		 * Set Features - Host Identifier after Connect, so we don't need to do anything here.
31468acc4ae5SDaniel Verkamp 		 */
314701f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "NVMe-oF transport - not sending Set Features - Host ID\n");
31483dd0bc9eSEvgeniy Kochetov 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
31498acc4ae5SDaniel Verkamp 		return 0;
31508acc4ae5SDaniel Verkamp 	}
31518acc4ae5SDaniel Verkamp 
3152b4d406b7SAnkit Kumar 	if (ctrlr->cdata.ctratt.bits.host_id_exhid_supported) {
315301f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Using 128-bit extended host identifier\n");
31548acc4ae5SDaniel Verkamp 		host_id = ctrlr->opts.extended_host_id;
31558acc4ae5SDaniel Verkamp 		host_id_size = sizeof(ctrlr->opts.extended_host_id);
31568acc4ae5SDaniel Verkamp 	} else {
315701f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Using 64-bit host identifier\n");
31588acc4ae5SDaniel Verkamp 		host_id = ctrlr->opts.host_id;
31598acc4ae5SDaniel Verkamp 		host_id_size = sizeof(ctrlr->opts.host_id);
31608acc4ae5SDaniel Verkamp 	}
31618acc4ae5SDaniel Verkamp 
31628acc4ae5SDaniel Verkamp 	/* If the user specified an all-zeroes host identifier, don't send the command. */
3163d81f3dfdSDaniel Verkamp 	if (spdk_mem_all_zero(host_id, host_id_size)) {
316401f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "User did not specify host ID - not sending Set Features - Host ID\n");
31653dd0bc9eSEvgeniy Kochetov 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
31668acc4ae5SDaniel Verkamp 		return 0;
31678acc4ae5SDaniel Verkamp 	}
31688acc4ae5SDaniel Verkamp 
31692172c432STomasz Zawadzki 	SPDK_LOGDUMP(nvme, "host_id", host_id, host_id_size);
31708acc4ae5SDaniel Verkamp 
31712706cd42SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
31722706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
317398b19709SChangpeng Liu 
317498b19709SChangpeng Liu 	rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
31758acc4ae5SDaniel Verkamp 	if (rc != 0) {
317601f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Host ID failed: %d\n", rc);
317798b19709SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
31788acc4ae5SDaniel Verkamp 		return rc;
31798acc4ae5SDaniel Verkamp 	}
31808acc4ae5SDaniel Verkamp 
31818acc4ae5SDaniel Verkamp 	return 0;
31828acc4ae5SDaniel Verkamp }
31838acc4ae5SDaniel Verkamp 
3184522faef3SCurt Bruns void
3185be1c82b7SChangpeng Liu nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
31861010fb3aSDaniel Verkamp {
3187b4dace73SBen Walker 	uint32_t nsid;
3188b4dace73SBen Walker 	struct spdk_nvme_ns *ns;
3189be1c82b7SChangpeng Liu 
3190b4dace73SBen Walker 	for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
3191b4dace73SBen Walker 	     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
3192b4dace73SBen Walker 		ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
3193b4dace73SBen Walker 		nvme_ns_construct(ns, nsid, ctrlr);
3194be1c82b7SChangpeng Liu 	}
3195be1c82b7SChangpeng Liu }
3196be1c82b7SChangpeng Liu 
3197be1c82b7SChangpeng Liu static int
31984f4f505cStyler_sun nvme_ctrlr_clear_changed_ns_log(struct spdk_nvme_ctrlr *ctrlr)
31994f4f505cStyler_sun {
32004f4f505cStyler_sun 	struct nvme_completion_poll_status	*status;
32014f4f505cStyler_sun 	int		rc = -ENOMEM;
32024f4f505cStyler_sun 	char		*buffer = NULL;
32034f4f505cStyler_sun 	uint32_t	nsid;
32044f4f505cStyler_sun 	size_t		buf_size = (SPDK_NVME_MAX_CHANGED_NAMESPACES * sizeof(uint32_t));
32054f4f505cStyler_sun 
3206b801af09SJim Harris 	if (ctrlr->opts.disable_read_changed_ns_list_log_page) {
3207b801af09SJim Harris 		return 0;
3208b801af09SJim Harris 	}
3209b801af09SJim Harris 
32104f4f505cStyler_sun 	buffer = spdk_dma_zmalloc(buf_size, 4096, NULL);
32114f4f505cStyler_sun 	if (!buffer) {
32124f4f505cStyler_sun 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate buffer for getting "
32134f4f505cStyler_sun 				  "changed ns log.\n");
32144f4f505cStyler_sun 		return rc;
32154f4f505cStyler_sun 	}
32164f4f505cStyler_sun 
32174f4f505cStyler_sun 	status = calloc(1, sizeof(*status));
32184f4f505cStyler_sun 	if (!status) {
32194f4f505cStyler_sun 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
32204f4f505cStyler_sun 		goto free_buffer;
32214f4f505cStyler_sun 	}
32224f4f505cStyler_sun 
32234f4f505cStyler_sun 	rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr,
32244f4f505cStyler_sun 					      SPDK_NVME_LOG_CHANGED_NS_LIST,
32254f4f505cStyler_sun 					      SPDK_NVME_GLOBAL_NS_TAG,
32264f4f505cStyler_sun 					      buffer, buf_size, 0,
32274f4f505cStyler_sun 					      nvme_completion_poll_cb, status);
32284f4f505cStyler_sun 
32294f4f505cStyler_sun 	if (rc) {
32304f4f505cStyler_sun 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_get_log_page() failed: rc=%d\n", rc);
32314f4f505cStyler_sun 		free(status);
32324f4f505cStyler_sun 		goto free_buffer;
32334f4f505cStyler_sun 	}
32344f4f505cStyler_sun 
32354f4f505cStyler_sun 	rc = nvme_wait_for_completion_timeout(ctrlr->adminq, status,
32364f4f505cStyler_sun 					      ctrlr->opts.admin_timeout_ms * 1000);
32374f4f505cStyler_sun 	if (!status->timed_out) {
32384f4f505cStyler_sun 		free(status);
32394f4f505cStyler_sun 	}
32404f4f505cStyler_sun 
32414f4f505cStyler_sun 	if (rc) {
32424f4f505cStyler_sun 		NVME_CTRLR_ERRLOG(ctrlr, "wait for spdk_nvme_ctrlr_cmd_get_log_page failed: rc=%d\n", rc);
32434f4f505cStyler_sun 		goto free_buffer;
32444f4f505cStyler_sun 	}
32454f4f505cStyler_sun 
32464f4f505cStyler_sun 	/* only check the case of overflow. */
32474f4f505cStyler_sun 	nsid = from_le32(buffer);
32484f4f505cStyler_sun 	if (nsid == 0xffffffffu) {
32494f4f505cStyler_sun 		NVME_CTRLR_WARNLOG(ctrlr, "changed ns log overflowed.\n");
32504f4f505cStyler_sun 	}
32514f4f505cStyler_sun 
32524f4f505cStyler_sun free_buffer:
32534f4f505cStyler_sun 	spdk_dma_free(buffer);
32544f4f505cStyler_sun 	return rc;
32554f4f505cStyler_sun }
32564f4f505cStyler_sun 
32572f57399eSJim Harris static void
3258391d89b8SG.Balaji nvme_ctrlr_process_async_event(struct spdk_nvme_ctrlr *ctrlr,
3259391d89b8SG.Balaji 			       const struct spdk_nvme_cpl *cpl)
3260391d89b8SG.Balaji {
3261391d89b8SG.Balaji 	union spdk_nvme_async_event_completion event;
3262391d89b8SG.Balaji 	struct spdk_nvme_ctrlr_process *active_proc;
3263391d89b8SG.Balaji 	int rc;
3264391d89b8SG.Balaji 
3265391d89b8SG.Balaji 	event.raw = cpl->cdw0;
3266391d89b8SG.Balaji 
3267391d89b8SG.Balaji 	if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
3268391d89b8SG.Balaji 	    (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
3269a119799bSChangpeng Liu 		nvme_ctrlr_clear_changed_ns_log(ctrlr);
3270a119799bSChangpeng Liu 
3271a119799bSChangpeng Liu 		rc = nvme_ctrlr_identify_active_ns(ctrlr);
3272a119799bSChangpeng Liu 		if (rc) {
3273a119799bSChangpeng Liu 			return;
3274a119799bSChangpeng Liu 		}
3275a119799bSChangpeng Liu 		nvme_ctrlr_update_namespaces(ctrlr);
3276a119799bSChangpeng Liu 		nvme_io_msg_ctrlr_update(ctrlr);
3277391d89b8SG.Balaji 	}
3278391d89b8SG.Balaji 
3279391d89b8SG.Balaji 	if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
3280391d89b8SG.Balaji 	    (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE)) {
3281e0715c2aSShuhei Matsumoto 		if (!ctrlr->opts.disable_read_ana_log_page) {
3282391d89b8SG.Balaji 			rc = nvme_ctrlr_update_ana_log_page(ctrlr);
3283391d89b8SG.Balaji 			if (rc) {
3284391d89b8SG.Balaji 				return;
3285391d89b8SG.Balaji 			}
3286e0715c2aSShuhei Matsumoto 			nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
3287e0715c2aSShuhei Matsumoto 						      ctrlr);
3288e0715c2aSShuhei Matsumoto 		}
3289391d89b8SG.Balaji 	}
3290391d89b8SG.Balaji 
3291391d89b8SG.Balaji 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
3292391d89b8SG.Balaji 	if (active_proc && active_proc->aer_cb_fn) {
3293391d89b8SG.Balaji 		active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
3294391d89b8SG.Balaji 	}
3295391d89b8SG.Balaji }
3296391d89b8SG.Balaji 
32971010fb3aSDaniel Verkamp static void
3298a2a82087Sbalaji nvme_ctrlr_queue_async_event(struct spdk_nvme_ctrlr *ctrlr,
3299a2a82087Sbalaji 			     const struct spdk_nvme_cpl *cpl)
3300a2a82087Sbalaji {
33013791dfc6SDamiano Cipriani 	struct  spdk_nvme_ctrlr_aer_completion *nvme_event;
33024ac203b2SCurt Bruns 	struct spdk_nvme_ctrlr_process *proc;
3303a2a82087Sbalaji 
33044ac203b2SCurt Bruns 	/* Add async event to each process objects event list */
33054ac203b2SCurt Bruns 	TAILQ_FOREACH(proc, &ctrlr->active_procs, tailq) {
33064ac203b2SCurt Bruns 		/* Must be shared memory so other processes can access */
3307186b109dSJim Harris 		nvme_event = spdk_zmalloc(sizeof(*nvme_event), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
3308a2a82087Sbalaji 		if (!nvme_event) {
3309a2a82087Sbalaji 			NVME_CTRLR_ERRLOG(ctrlr, "Alloc nvme event failed, ignore the event\n");
3310a2a82087Sbalaji 			return;
3311a2a82087Sbalaji 		}
3312a2a82087Sbalaji 		nvme_event->cpl = *cpl;
33134ac203b2SCurt Bruns 
33144ac203b2SCurt Bruns 		STAILQ_INSERT_TAIL(&proc->async_events, nvme_event, link);
33154ac203b2SCurt Bruns 	}
3316a2a82087Sbalaji }
3317a2a82087Sbalaji 
3318b4a375a1SJim Harris static void
3319a2a82087Sbalaji nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr)
3320a2a82087Sbalaji {
33213791dfc6SDamiano Cipriani 	struct  spdk_nvme_ctrlr_aer_completion  *nvme_event, *nvme_event_tmp;
33224ac203b2SCurt Bruns 	struct spdk_nvme_ctrlr_process	*active_proc;
3323a2a82087Sbalaji 
33244ac203b2SCurt Bruns 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
33254ac203b2SCurt Bruns 
33264ac203b2SCurt Bruns 	STAILQ_FOREACH_SAFE(nvme_event, &active_proc->async_events, link, nvme_event_tmp) {
33274ac203b2SCurt Bruns 		STAILQ_REMOVE(&active_proc->async_events, nvme_event,
33283791dfc6SDamiano Cipriani 			      spdk_nvme_ctrlr_aer_completion, link);
3329a2a82087Sbalaji 		nvme_ctrlr_process_async_event(ctrlr, &nvme_event->cpl);
33304ac203b2SCurt Bruns 		spdk_free(nvme_event);
33314ac203b2SCurt Bruns 
3332a2a82087Sbalaji 	}
3333a2a82087Sbalaji }
3334a2a82087Sbalaji 
3335a2a82087Sbalaji static void
3336ad35d6cdSDaniel Verkamp nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
33371010fb3aSDaniel Verkamp {
33381010fb3aSDaniel Verkamp 	struct nvme_async_event_request	*aer = arg;
33396ce73aa6SDaniel Verkamp 	struct spdk_nvme_ctrlr		*ctrlr = aer->ctrlr;
33401010fb3aSDaniel Verkamp 
3341073f2dd8SJim Harris 	if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
3342073f2dd8SJim Harris 	    cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
33431010fb3aSDaniel Verkamp 		/*
33441010fb3aSDaniel Verkamp 		 *  This is simulated when controller is being shut down, to
33451010fb3aSDaniel Verkamp 		 *  effectively abort outstanding asynchronous event requests
33461010fb3aSDaniel Verkamp 		 *  and make sure all memory is freed.  Do not repost the
33471010fb3aSDaniel Verkamp 		 *  request in this case.
33481010fb3aSDaniel Verkamp 		 */
33491010fb3aSDaniel Verkamp 		return;
33501010fb3aSDaniel Verkamp 	}
33511010fb3aSDaniel Verkamp 
3352073f2dd8SJim Harris 	if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
3353073f2dd8SJim Harris 	    cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
3354073f2dd8SJim Harris 		/*
3355073f2dd8SJim Harris 		 *  SPDK will only send as many AERs as the device says it supports,
3356073f2dd8SJim Harris 		 *  so this status code indicates an out-of-spec device.  Do not repost
3357073f2dd8SJim Harris 		 *  the request in this case.
3358073f2dd8SJim Harris 		 */
335901f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Controller appears out-of-spec for asynchronous event request\n"
3360073f2dd8SJim Harris 				  "handling.  Do not repost this AER.\n");
3361073f2dd8SJim Harris 		return;
3362073f2dd8SJim Harris 	}
3363073f2dd8SJim Harris 
3364a2a82087Sbalaji 	/* Add the events to the list */
3365a2a82087Sbalaji 	nvme_ctrlr_queue_async_event(ctrlr, cpl);
33661010fb3aSDaniel Verkamp 
336786c4d33fSChangpeng Liu 	/* If the ctrlr was removed or in the destruct state, we should not send aer again */
336886c4d33fSChangpeng Liu 	if (ctrlr->is_removed || ctrlr->is_destructed) {
33693608464fSZiye Yang 		return;
33703608464fSZiye Yang 	}
33713608464fSZiye Yang 
33721010fb3aSDaniel Verkamp 	/*
33731010fb3aSDaniel Verkamp 	 * Repost another asynchronous event request to replace the one
33741010fb3aSDaniel Verkamp 	 *  that just completed.
33751010fb3aSDaniel Verkamp 	 */
3376b96536e7SDaniel Verkamp 	if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
3377b96536e7SDaniel Verkamp 		/*
3378b96536e7SDaniel Verkamp 		 * We can't do anything to recover from a failure here,
3379b96536e7SDaniel Verkamp 		 * so just print a warning message and leave the AER unsubmitted.
3380b96536e7SDaniel Verkamp 		 */
338101f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "resubmitting AER failed!\n");
3382b96536e7SDaniel Verkamp 	}
33831010fb3aSDaniel Verkamp }
33841010fb3aSDaniel Verkamp 
3385b96536e7SDaniel Verkamp static int
33866ce73aa6SDaniel Verkamp nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
33871010fb3aSDaniel Verkamp 				    struct nvme_async_event_request *aer)
33881010fb3aSDaniel Verkamp {
33891010fb3aSDaniel Verkamp 	struct nvme_request *req;
33901010fb3aSDaniel Verkamp 
33911010fb3aSDaniel Verkamp 	aer->ctrlr = ctrlr;
3392cd13f280SDaniel Verkamp 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
33931010fb3aSDaniel Verkamp 	aer->req = req;
3394b96536e7SDaniel Verkamp 	if (req == NULL) {
3395b96536e7SDaniel Verkamp 		return -1;
3396b96536e7SDaniel Verkamp 	}
33971010fb3aSDaniel Verkamp 
3398ad35d6cdSDaniel Verkamp 	req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
3399eb555b13SDaniel Verkamp 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
34001010fb3aSDaniel Verkamp }
34011010fb3aSDaniel Verkamp 
3402cf5448a9SChangpeng Liu static void
3403cf5448a9SChangpeng Liu nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
3404cf5448a9SChangpeng Liu {
3405cf5448a9SChangpeng Liu 	struct nvme_async_event_request		*aer;
3406cf5448a9SChangpeng Liu 	int					rc;
3407cf5448a9SChangpeng Liu 	uint32_t				i;
3408cf5448a9SChangpeng Liu 	struct spdk_nvme_ctrlr *ctrlr =	(struct spdk_nvme_ctrlr *)arg;
3409cf5448a9SChangpeng Liu 
3410cf5448a9SChangpeng Liu 	if (spdk_nvme_cpl_is_error(cpl)) {
341101f45ecdSGangCao 		NVME_CTRLR_NOTICELOG(ctrlr, "nvme_ctrlr_configure_aer failed!\n");
3412f0f7005bSJim Harris 		ctrlr->num_aers = 0;
3413f0f7005bSJim Harris 	} else {
3414cf5448a9SChangpeng Liu 		/* aerl is a zero-based value, so we need to add 1 here. */
3415cf5448a9SChangpeng Liu 		ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
3416f0f7005bSJim Harris 	}
3417cf5448a9SChangpeng Liu 
3418cf5448a9SChangpeng Liu 	for (i = 0; i < ctrlr->num_aers; i++) {
3419cf5448a9SChangpeng Liu 		aer = &ctrlr->aer[i];
3420cf5448a9SChangpeng Liu 		rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
3421cf5448a9SChangpeng Liu 		if (rc) {
342201f45ecdSGangCao 			NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_construct_and_submit_aer failed!\n");
3423cf5448a9SChangpeng Liu 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
3424cf5448a9SChangpeng Liu 			return;
3425cf5448a9SChangpeng Liu 		}
3426cf5448a9SChangpeng Liu 	}
34277e68d0baSJim Harris 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, ctrlr->opts.admin_timeout_ms);
3428cf5448a9SChangpeng Liu }
3429cf5448a9SChangpeng Liu 
34301010fb3aSDaniel Verkamp static int
3431cf5448a9SChangpeng Liu nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
34321010fb3aSDaniel Verkamp {
34332d192cf8SChangpeng Liu 	union spdk_nvme_feat_async_event_configuration	config;
343489bf6d5cSDaniel Verkamp 	int						rc;
34351010fb3aSDaniel Verkamp 
34362d192cf8SChangpeng Liu 	config.raw = 0;
3437b962b6beSJim Harris 
3438b962b6beSJim Harris 	if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
3439b962b6beSJim Harris 		config.bits.discovery_log_change_notice = 1;
3440b962b6beSJim Harris 	} else {
34412d192cf8SChangpeng Liu 		config.bits.crit_warn.bits.available_spare = 1;
34422d192cf8SChangpeng Liu 		config.bits.crit_warn.bits.temperature = 1;
34432d192cf8SChangpeng Liu 		config.bits.crit_warn.bits.device_reliability = 1;
34442d192cf8SChangpeng Liu 		config.bits.crit_warn.bits.read_only = 1;
34452d192cf8SChangpeng Liu 		config.bits.crit_warn.bits.volatile_memory_backup = 1;
3446f0f3a48fSChangpeng Liu 
3447f7b58aeaSDaniel Verkamp 		if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
3448b8edc465SDaniel Verkamp 			if (ctrlr->cdata.oaes.ns_attribute_notices) {
34492d192cf8SChangpeng Liu 				config.bits.ns_attr_notice = 1;
3450b8edc465SDaniel Verkamp 			}
3451b8edc465SDaniel Verkamp 			if (ctrlr->cdata.oaes.fw_activation_notices) {
34522d192cf8SChangpeng Liu 				config.bits.fw_activation_notice = 1;
3453f0f3a48fSChangpeng Liu 			}
3454ce7eee98SShuhei Matsumoto 			if (ctrlr->cdata.oaes.ana_change_notices) {
3455ce7eee98SShuhei Matsumoto 				config.bits.ana_change_notice = 1;
3456ce7eee98SShuhei Matsumoto 			}
3457b8edc465SDaniel Verkamp 		}
3458f7b58aeaSDaniel Verkamp 		if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
34592d192cf8SChangpeng Liu 			config.bits.telemetry_log_notice = 1;
3460f0f3a48fSChangpeng Liu 		}
3461b962b6beSJim Harris 	}
3462f0f3a48fSChangpeng Liu 
34632706cd42SChangpeng Liu 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
34642706cd42SChangpeng Liu 			     ctrlr->opts.admin_timeout_ms);
3465cf5448a9SChangpeng Liu 
3466cf5448a9SChangpeng Liu 	rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
3467cf5448a9SChangpeng Liu 			nvme_ctrlr_configure_aer_done,
3468cf5448a9SChangpeng Liu 			ctrlr);
346989bf6d5cSDaniel Verkamp 	if (rc != 0) {
3470cf5448a9SChangpeng Liu 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
347189bf6d5cSDaniel Verkamp 		return rc;
347289bf6d5cSDaniel Verkamp 	}
34731010fb3aSDaniel Verkamp 
34741010fb3aSDaniel Verkamp 	return 0;
34751010fb3aSDaniel Verkamp }
34761010fb3aSDaniel Verkamp 
3477cbd9c241SDaniel Verkamp struct spdk_nvme_ctrlr_process *
34781a9c19a9SSeth Howell nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
3479cbd9c241SDaniel Verkamp {
3480cbd9c241SDaniel Verkamp 	struct spdk_nvme_ctrlr_process	*active_proc;
3481cbd9c241SDaniel Verkamp 
3482cbd9c241SDaniel Verkamp 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
3483cbd9c241SDaniel Verkamp 		if (active_proc->pid == pid) {
3484cbd9c241SDaniel Verkamp 			return active_proc;
3485cbd9c241SDaniel Verkamp 		}
3486cbd9c241SDaniel Verkamp 	}
3487cbd9c241SDaniel Verkamp 
3488cbd9c241SDaniel Verkamp 	return NULL;
3489cbd9c241SDaniel Verkamp }
3490cbd9c241SDaniel Verkamp 
3491cbd9c241SDaniel Verkamp struct spdk_nvme_ctrlr_process *
34921a9c19a9SSeth Howell nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
3493cbd9c241SDaniel Verkamp {
34941a9c19a9SSeth Howell 	return nvme_ctrlr_get_process(ctrlr, getpid());
3495cbd9c241SDaniel Verkamp }
3496cbd9c241SDaniel Verkamp 
349720abbe8aSDaniel Verkamp /**
34989ec380baSGangCao  * This function will be called when a process is using the controller.
3499bfc8bc87SGangCao  *  1. For the primary process, it is called when constructing the controller.
3500bfc8bc87SGangCao  *  2. For the secondary process, it is called at probing the controller.
35019ec380baSGangCao  * Note: will check whether the process is already added for the same process.
3502bfc8bc87SGangCao  */
3503bfc8bc87SGangCao int
3504bfc8bc87SGangCao nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
3505bfc8bc87SGangCao {
3506cbd9c241SDaniel Verkamp 	struct spdk_nvme_ctrlr_process	*ctrlr_proc;
35079ec380baSGangCao 	pid_t				pid = getpid();
35089ec380baSGangCao 
35099ec380baSGangCao 	/* Check whether the process is already added or not */
35101a9c19a9SSeth Howell 	if (nvme_ctrlr_get_process(ctrlr, pid)) {
35119ec380baSGangCao 		return 0;
35129ec380baSGangCao 	}
3513bfc8bc87SGangCao 
3514bfc8bc87SGangCao 	/* Initialize the per process properties for this ctrlr */
3515eb6a2cb8Szkhatami88 	ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
3516186b109dSJim Harris 				  64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
3517bfc8bc87SGangCao 	if (ctrlr_proc == NULL) {
351801f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "failed to allocate memory to track the process props\n");
3519bfc8bc87SGangCao 
3520bfc8bc87SGangCao 		return -1;
3521bfc8bc87SGangCao 	}
3522bfc8bc87SGangCao 
3523bfc8bc87SGangCao 	ctrlr_proc->is_primary = spdk_process_is_primary();
35249ec380baSGangCao 	ctrlr_proc->pid = pid;
3525bfc8bc87SGangCao 	STAILQ_INIT(&ctrlr_proc->active_reqs);
3526bfc8bc87SGangCao 	ctrlr_proc->devhandle = devhandle;
3527ec5b6fedSGangCao 	ctrlr_proc->ref = 0;
3528bb726d51SGangCao 	TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
35294ac203b2SCurt Bruns 	STAILQ_INIT(&ctrlr_proc->async_events);
3530bfc8bc87SGangCao 
3531bfc8bc87SGangCao 	TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
3532bfc8bc87SGangCao 
3533bfc8bc87SGangCao 	return 0;
3534bfc8bc87SGangCao }
3535bfc8bc87SGangCao 
3536bfc8bc87SGangCao /**
3537bb726d51SGangCao  * This function will be called when the process detaches the controller.
3538bb726d51SGangCao  * Note: the ctrlr_lock must be held when calling this function.
3539bb726d51SGangCao  */
3540bb726d51SGangCao static void
3541bb726d51SGangCao nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
3542bb726d51SGangCao 			  struct spdk_nvme_ctrlr_process *proc)
3543bb726d51SGangCao {
3544bb726d51SGangCao 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
3545bb726d51SGangCao 
3546bb726d51SGangCao 	assert(STAILQ_EMPTY(&proc->active_reqs));
3547bb726d51SGangCao 
3548bb726d51SGangCao 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
3549bb726d51SGangCao 		spdk_nvme_ctrlr_free_io_qpair(qpair);
3550bb726d51SGangCao 	}
3551bb726d51SGangCao 
3552bb726d51SGangCao 	TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
3553bb726d51SGangCao 
35541d3e0340SDarek Stojaczyk 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
35555a588715SDarek Stojaczyk 		spdk_pci_device_detach(proc->devhandle);
35565a588715SDarek Stojaczyk 	}
35575a588715SDarek Stojaczyk 
3558f9a6588fSDarek Stojaczyk 	spdk_free(proc);
3559bb726d51SGangCao }
3560bb726d51SGangCao 
3561bb726d51SGangCao /**
3562ec5b6fedSGangCao  * This function will be called when the process exited unexpectedly
356352f3c2faSGangCao  *  in order to free any incomplete nvme request, allocated IO qpairs
356452f3c2faSGangCao  *  and allocated memory.
3565bb726d51SGangCao  * Note: the ctrlr_lock must be held when calling this function.
3566ec5b6fedSGangCao  */
3567ec5b6fedSGangCao static void
3568af9eca84SGangCao nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
3569ec5b6fedSGangCao {
3570ec5b6fedSGangCao 	struct nvme_request	*req, *tmp_req;
357152f3c2faSGangCao 	struct spdk_nvme_qpair	*qpair, *tmp_qpair;
35723791dfc6SDamiano Cipriani 	struct spdk_nvme_ctrlr_aer_completion *event;
3573ec5b6fedSGangCao 
3574ec5b6fedSGangCao 	STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
3575ec5b6fedSGangCao 		STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
3576ec5b6fedSGangCao 
3577ec5b6fedSGangCao 		assert(req->pid == proc->pid);
35784b1aa5daSDeepak Abraham Tom 		nvme_cleanup_user_req(req);
3579ec5b6fedSGangCao 		nvme_free_request(req);
3580ec5b6fedSGangCao 	}
3581ec5b6fedSGangCao 
35824ac203b2SCurt Bruns 	/* Remove async event from each process objects event list */
35834ac203b2SCurt Bruns 	while (!STAILQ_EMPTY(&proc->async_events)) {
35844ac203b2SCurt Bruns 		event = STAILQ_FIRST(&proc->async_events);
35854ac203b2SCurt Bruns 		STAILQ_REMOVE_HEAD(&proc->async_events, link);
35864ac203b2SCurt Bruns 		spdk_free(event);
35874ac203b2SCurt Bruns 	}
35884ac203b2SCurt Bruns 
358952f3c2faSGangCao 	TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
359052f3c2faSGangCao 		TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
359152f3c2faSGangCao 
359237ccb50cSJim Harris 		/*
359337ccb50cSJim Harris 		 * The process may have been killed while some qpairs were in their
359437ccb50cSJim Harris 		 *  completion context.  Clear that flag here to allow these IO
359537ccb50cSJim Harris 		 *  qpairs to be deleted.
359637ccb50cSJim Harris 		 */
359737ccb50cSJim Harris 		qpair->in_completion_context = 0;
3598619ab1a7SGangCao 
3599619ab1a7SGangCao 		qpair->no_deletion_notification_needed = 1;
3600619ab1a7SGangCao 
360152f3c2faSGangCao 		spdk_nvme_ctrlr_free_io_qpair(qpair);
360252f3c2faSGangCao 	}
360352f3c2faSGangCao 
3604f9a6588fSDarek Stojaczyk 	spdk_free(proc);
3605ec5b6fedSGangCao }
3606ec5b6fedSGangCao 
3607ec5b6fedSGangCao /**
3608bfc8bc87SGangCao  * This function will be called when destructing the controller.
3609bfc8bc87SGangCao  *  1. There is no more admin request on this controller.
3610bfc8bc87SGangCao  *  2. Clean up any left resource allocation when its associated process is gone.
3611bfc8bc87SGangCao  */
3612bfc8bc87SGangCao void
3613bfc8bc87SGangCao nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
3614bfc8bc87SGangCao {
3615af9eca84SGangCao 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
3616bfc8bc87SGangCao 
3617bfc8bc87SGangCao 	/* Free all the processes' properties and make sure no pending admin IOs */
3618bfc8bc87SGangCao 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
3619bfc8bc87SGangCao 		TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
3620bfc8bc87SGangCao 
3621bfc8bc87SGangCao 		assert(STAILQ_EMPTY(&active_proc->active_reqs));
3622bfc8bc87SGangCao 
3623eb6a2cb8Szkhatami88 		spdk_free(active_proc);
3624bfc8bc87SGangCao 	}
3625bfc8bc87SGangCao }
3626bfc8bc87SGangCao 
3627bfc8bc87SGangCao /**
3628ec5b6fedSGangCao  * This function will be called when any other process attaches or
3629ec5b6fedSGangCao  *  detaches the controller in order to cleanup those unexpectedly
3630ec5b6fedSGangCao  *  terminated processes.
3631bb726d51SGangCao  * Note: the ctrlr_lock must be held when calling this function.
3632ec5b6fedSGangCao  */
3633bb726d51SGangCao static int
3634ec5b6fedSGangCao nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
3635ec5b6fedSGangCao {
3636af9eca84SGangCao 	struct spdk_nvme_ctrlr_process	*active_proc, *tmp;
3637bb726d51SGangCao 	int				active_proc_count = 0;
3638ec5b6fedSGangCao 
3639ec5b6fedSGangCao 	TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
3640ec5b6fedSGangCao 		if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
364101f45ecdSGangCao 			NVME_CTRLR_ERRLOG(ctrlr, "process %d terminated unexpected\n", active_proc->pid);
3642ec5b6fedSGangCao 
3643ec5b6fedSGangCao 			TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
3644ec5b6fedSGangCao 
3645ec5b6fedSGangCao 			nvme_ctrlr_cleanup_process(active_proc);
3646bb726d51SGangCao 		} else {
3647bb726d51SGangCao 			active_proc_count++;
3648ec5b6fedSGangCao 		}
3649ec5b6fedSGangCao 	}
3650bb726d51SGangCao 
3651bb726d51SGangCao 	return active_proc_count;
3652ec5b6fedSGangCao }
3653ec5b6fedSGangCao 
3654ec5b6fedSGangCao void
3655ec5b6fedSGangCao nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
3656ec5b6fedSGangCao {
3657af9eca84SGangCao 	struct spdk_nvme_ctrlr_process	*active_proc;
3658ec5b6fedSGangCao 
3659e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
3660ec5b6fedSGangCao 
3661ec5b6fedSGangCao 	nvme_ctrlr_remove_inactive_proc(ctrlr);
3662ec5b6fedSGangCao 
36631a9c19a9SSeth Howell 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
3664cbd9c241SDaniel Verkamp 	if (active_proc) {
3665ec5b6fedSGangCao 		active_proc->ref++;
3666ec5b6fedSGangCao 	}
3667ec5b6fedSGangCao 
3668e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
3669ec5b6fedSGangCao }
3670ec5b6fedSGangCao 
3671ec5b6fedSGangCao void
3672ec5b6fedSGangCao nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
3673ec5b6fedSGangCao {
3674cbd9c241SDaniel Verkamp 	struct spdk_nvme_ctrlr_process	*active_proc;
3675bb726d51SGangCao 	int				proc_count;
3676ec5b6fedSGangCao 
3677e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
3678ec5b6fedSGangCao 
3679bb726d51SGangCao 	proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
3680ec5b6fedSGangCao 
36811a9c19a9SSeth Howell 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
3682cbd9c241SDaniel Verkamp 	if (active_proc) {
3683ec5b6fedSGangCao 		active_proc->ref--;
3684ec5b6fedSGangCao 		assert(active_proc->ref >= 0);
3685bb726d51SGangCao 
3686bb726d51SGangCao 		/*
3687bb726d51SGangCao 		 * The last active process will be removed at the end of
3688bb726d51SGangCao 		 * the destruction of the controller.
3689bb726d51SGangCao 		 */
3690bb726d51SGangCao 		if (active_proc->ref == 0 && proc_count != 1) {
3691bb726d51SGangCao 			nvme_ctrlr_remove_process(ctrlr, active_proc);
3692bb726d51SGangCao 		}
3693ec5b6fedSGangCao 	}
3694ec5b6fedSGangCao 
3695e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
3696ec5b6fedSGangCao }
3697ec5b6fedSGangCao 
3698ec5b6fedSGangCao int
3699ec5b6fedSGangCao nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
3700ec5b6fedSGangCao {
3701af9eca84SGangCao 	struct spdk_nvme_ctrlr_process	*active_proc;
3702ec5b6fedSGangCao 	int				ref = 0;
3703ec5b6fedSGangCao 
3704e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
3705ec5b6fedSGangCao 
3706ec5b6fedSGangCao 	nvme_ctrlr_remove_inactive_proc(ctrlr);
3707ec5b6fedSGangCao 
3708ec5b6fedSGangCao 	TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
3709ec5b6fedSGangCao 		ref += active_proc->ref;
3710ec5b6fedSGangCao 	}
3711ec5b6fedSGangCao 
3712e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
3713ec5b6fedSGangCao 
3714ec5b6fedSGangCao 	return ref;
3715ec5b6fedSGangCao }
3716ec5b6fedSGangCao 
3717ec5b6fedSGangCao /**
3718b347d551SGangCao  *  Get the PCI device handle which is only visible to its associated process.
3719b347d551SGangCao  */
3720b347d551SGangCao struct spdk_pci_device *
3721b347d551SGangCao nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
3722b347d551SGangCao {
3723b347d551SGangCao 	struct spdk_nvme_ctrlr_process	*active_proc;
3724b347d551SGangCao 	struct spdk_pci_device		*devhandle = NULL;
3725b347d551SGangCao 
3726e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
3727b347d551SGangCao 
37281a9c19a9SSeth Howell 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
3729cbd9c241SDaniel Verkamp 	if (active_proc) {
3730b347d551SGangCao 		devhandle = active_proc->devhandle;
3731b347d551SGangCao 	}
3732b347d551SGangCao 
3733e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
3734b347d551SGangCao 
3735b347d551SGangCao 	return devhandle;
3736b347d551SGangCao }
3737b347d551SGangCao 
373809acc162SKonrad Sztyber static void
373909acc162SKonrad Sztyber nvme_ctrlr_process_init_vs_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
374009acc162SKonrad Sztyber {
374109acc162SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
374209acc162SKonrad Sztyber 
374309acc162SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
374409acc162SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the VS register\n");
374509acc162SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
374609acc162SKonrad Sztyber 		return;
374709acc162SKonrad Sztyber 	}
374809acc162SKonrad Sztyber 
374909acc162SKonrad Sztyber 	assert(value <= UINT32_MAX);
375009acc162SKonrad Sztyber 	ctrlr->vs.raw = (uint32_t)value;
375109acc162SKonrad Sztyber 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP, NVME_TIMEOUT_INFINITE);
375209acc162SKonrad Sztyber }
375309acc162SKonrad Sztyber 
37549d8251f6SKonrad Sztyber static void
37559d8251f6SKonrad Sztyber nvme_ctrlr_process_init_cap_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
37569d8251f6SKonrad Sztyber {
37579d8251f6SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
37589d8251f6SKonrad Sztyber 
37599d8251f6SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
37609d8251f6SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CAP register\n");
37619d8251f6SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
37629d8251f6SKonrad Sztyber 		return;
37639d8251f6SKonrad Sztyber 	}
37649d8251f6SKonrad Sztyber 
37659d8251f6SKonrad Sztyber 	ctrlr->cap.raw = value;
37669d8251f6SKonrad Sztyber 	nvme_ctrlr_init_cap(ctrlr);
37679d8251f6SKonrad Sztyber 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
37689d8251f6SKonrad Sztyber }
37699d8251f6SKonrad Sztyber 
3770ccc084f3SKonrad Sztyber static void
3771ccc084f3SKonrad Sztyber nvme_ctrlr_process_init_check_en(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
3772ccc084f3SKonrad Sztyber {
3773ccc084f3SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
3774ccc084f3SKonrad Sztyber 	enum nvme_ctrlr_state state;
3775ccc084f3SKonrad Sztyber 
3776ccc084f3SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
3777ccc084f3SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
3778ccc084f3SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
3779ccc084f3SKonrad Sztyber 		return;
3780ccc084f3SKonrad Sztyber 	}
3781ccc084f3SKonrad Sztyber 
3782ccc084f3SKonrad Sztyber 	assert(value <= UINT32_MAX);
3783ccc084f3SKonrad Sztyber 	ctrlr->process_init_cc.raw = (uint32_t)value;
3784ccc084f3SKonrad Sztyber 
3785ccc084f3SKonrad Sztyber 	if (ctrlr->process_init_cc.bits.en) {
3786ccc084f3SKonrad Sztyber 		NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1\n");
3787ccc084f3SKonrad Sztyber 		state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1;
3788ccc084f3SKonrad Sztyber 	} else {
3789ccc084f3SKonrad Sztyber 		state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
3790ccc084f3SKonrad Sztyber 	}
3791ccc084f3SKonrad Sztyber 
3792ccc084f3SKonrad Sztyber 	nvme_ctrlr_set_state(ctrlr, state, nvme_ctrlr_get_ready_timeout(ctrlr));
3793ccc084f3SKonrad Sztyber }
3794ccc084f3SKonrad Sztyber 
37958da3c166SKonrad Sztyber static void
37965f376485SKonrad Sztyber nvme_ctrlr_process_init_set_en_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
37975f376485SKonrad Sztyber {
37985f376485SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
37995f376485SKonrad Sztyber 
38005f376485SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
38015f376485SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to write the CC register\n");
38025f376485SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
38035f376485SKonrad Sztyber 		return;
38045f376485SKonrad Sztyber 	}
38055f376485SKonrad Sztyber 
38065f376485SKonrad Sztyber 	/*
38075f376485SKonrad Sztyber 	 * Wait 2.5 seconds before accessing PCI registers.
38085f376485SKonrad Sztyber 	 * Not using sleep() to avoid blocking other controller's initialization.
38095f376485SKonrad Sztyber 	 */
38105f376485SKonrad Sztyber 	if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
38115f376485SKonrad Sztyber 		NVME_CTRLR_DEBUGLOG(ctrlr, "Applying quirk: delay 2.5 seconds before reading registers\n");
38125f376485SKonrad Sztyber 		ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
38135f376485SKonrad Sztyber 	}
38145f376485SKonrad Sztyber 
38155f376485SKonrad Sztyber 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
38165f376485SKonrad Sztyber 			     nvme_ctrlr_get_ready_timeout(ctrlr));
38175f376485SKonrad Sztyber }
38185f376485SKonrad Sztyber 
38195f376485SKonrad Sztyber static void
38205f376485SKonrad Sztyber nvme_ctrlr_process_init_set_en_0_read_cc(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
38215f376485SKonrad Sztyber {
38225f376485SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
38235f376485SKonrad Sztyber 	union spdk_nvme_cc_register cc;
38245f376485SKonrad Sztyber 	int rc;
38255f376485SKonrad Sztyber 
38265f376485SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
38275f376485SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
38285f376485SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
38295f376485SKonrad Sztyber 		return;
38305f376485SKonrad Sztyber 	}
38315f376485SKonrad Sztyber 
38325f376485SKonrad Sztyber 	assert(value <= UINT32_MAX);
38335f376485SKonrad Sztyber 	cc.raw = (uint32_t)value;
38345f376485SKonrad Sztyber 	cc.bits.en = 0;
38355f376485SKonrad Sztyber 	ctrlr->process_init_cc.raw = cc.raw;
38365f376485SKonrad Sztyber 
38375f376485SKonrad Sztyber 	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
38385f376485SKonrad Sztyber 			     nvme_ctrlr_get_ready_timeout(ctrlr));
38395f376485SKonrad Sztyber 
38405f376485SKonrad Sztyber 	rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_process_init_set_en_0, ctrlr);
38415f376485SKonrad Sztyber 	if (rc != 0) {
38425f376485SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
38435f376485SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
38445f376485SKonrad Sztyber 	}
38455f376485SKonrad Sztyber }
38465f376485SKonrad Sztyber 
38475f376485SKonrad Sztyber static void
38488da3c166SKonrad Sztyber nvme_ctrlr_process_init_wait_for_ready_1(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
38498da3c166SKonrad Sztyber {
38508da3c166SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
38518da3c166SKonrad Sztyber 	union spdk_nvme_csts_register csts;
38528da3c166SKonrad Sztyber 
38538da3c166SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
38548da3c166SKonrad Sztyber 		/* While a device is resetting, it may be unable to service MMIO reads
38558da3c166SKonrad Sztyber 		 * temporarily. Allow for this case.
38568da3c166SKonrad Sztyber 		 */
38578da3c166SKonrad Sztyber 		if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
3858e4e94c38SKonrad Sztyber 			NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
38598da3c166SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
38608da3c166SKonrad Sztyber 					     NVME_TIMEOUT_KEEP_EXISTING);
38618da3c166SKonrad Sztyber 		} else {
3862e4e94c38SKonrad Sztyber 			NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
38638da3c166SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
38648da3c166SKonrad Sztyber 		}
38658da3c166SKonrad Sztyber 
38668da3c166SKonrad Sztyber 		return;
38678da3c166SKonrad Sztyber 	}
38688da3c166SKonrad Sztyber 
38698da3c166SKonrad Sztyber 	assert(value <= UINT32_MAX);
38708da3c166SKonrad Sztyber 	csts.raw = (uint32_t)value;
3871a8185643SKonrad Sztyber 	if (csts.bits.rdy == 1 || csts.bits.cfs == 1) {
38728da3c166SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0,
38738da3c166SKonrad Sztyber 				     nvme_ctrlr_get_ready_timeout(ctrlr));
38748da3c166SKonrad Sztyber 	} else {
38758da3c166SKonrad Sztyber 		NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
38762f9c97a4SKonrad Sztyber 		nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
38778da3c166SKonrad Sztyber 					   NVME_TIMEOUT_KEEP_EXISTING);
38788da3c166SKonrad Sztyber 	}
38798da3c166SKonrad Sztyber }
38808da3c166SKonrad Sztyber 
38818da3c166SKonrad Sztyber static void
38828da3c166SKonrad Sztyber nvme_ctrlr_process_init_wait_for_ready_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
38838da3c166SKonrad Sztyber {
38848da3c166SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
38858da3c166SKonrad Sztyber 	union spdk_nvme_csts_register csts;
38868da3c166SKonrad Sztyber 
38878da3c166SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
38888da3c166SKonrad Sztyber 		/* While a device is resetting, it may be unable to service MMIO reads
38898da3c166SKonrad Sztyber 		 * temporarily. Allow for this case.
38908da3c166SKonrad Sztyber 		 */
38918da3c166SKonrad Sztyber 		if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
3892e4e94c38SKonrad Sztyber 			NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
38938da3c166SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
38948da3c166SKonrad Sztyber 					     NVME_TIMEOUT_KEEP_EXISTING);
38958da3c166SKonrad Sztyber 		} else {
3896e4e94c38SKonrad Sztyber 			NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
38978da3c166SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
38988da3c166SKonrad Sztyber 		}
38998da3c166SKonrad Sztyber 
39008da3c166SKonrad Sztyber 		return;
39018da3c166SKonrad Sztyber 	}
39028da3c166SKonrad Sztyber 
39038da3c166SKonrad Sztyber 	assert(value <= UINT32_MAX);
39048da3c166SKonrad Sztyber 	csts.raw = (uint32_t)value;
39058da3c166SKonrad Sztyber 	if (csts.bits.rdy == 0) {
39068da3c166SKonrad Sztyber 		NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 0 && CSTS.RDY = 0\n");
39075e5423deSShuhei Matsumoto 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLED,
39088da3c166SKonrad Sztyber 				     nvme_ctrlr_get_ready_timeout(ctrlr));
39098da3c166SKonrad Sztyber 	} else {
39102f9c97a4SKonrad Sztyber 		nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
39118da3c166SKonrad Sztyber 					   NVME_TIMEOUT_KEEP_EXISTING);
39128da3c166SKonrad Sztyber 	}
39138da3c166SKonrad Sztyber }
39148da3c166SKonrad Sztyber 
39159e216680SKonrad Sztyber static void
39169e216680SKonrad Sztyber nvme_ctrlr_process_init_enable_wait_for_ready_1(void *ctx, uint64_t value,
39179e216680SKonrad Sztyber 		const struct spdk_nvme_cpl *cpl)
39189e216680SKonrad Sztyber {
39199e216680SKonrad Sztyber 	struct spdk_nvme_ctrlr *ctrlr = ctx;
39209e216680SKonrad Sztyber 	union spdk_nvme_csts_register csts;
39219e216680SKonrad Sztyber 
39229e216680SKonrad Sztyber 	if (spdk_nvme_cpl_is_error(cpl)) {
39239e216680SKonrad Sztyber 		/* While a device is resetting, it may be unable to service MMIO reads
39249e216680SKonrad Sztyber 		 * temporarily. Allow for this case.
39259e216680SKonrad Sztyber 		 */
39269e216680SKonrad Sztyber 		if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
39279e216680SKonrad Sztyber 			NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
39289e216680SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
39299e216680SKonrad Sztyber 					     NVME_TIMEOUT_KEEP_EXISTING);
39309e216680SKonrad Sztyber 		} else {
39319e216680SKonrad Sztyber 			NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
39329e216680SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
39339e216680SKonrad Sztyber 		}
39349e216680SKonrad Sztyber 
39359e216680SKonrad Sztyber 		return;
39369e216680SKonrad Sztyber 	}
39379e216680SKonrad Sztyber 
39389e216680SKonrad Sztyber 	assert(value <= UINT32_MAX);
39399e216680SKonrad Sztyber 	csts.raw = value;
39409e216680SKonrad Sztyber 	if (csts.bits.rdy == 1) {
39419e216680SKonrad Sztyber 		NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
39429e216680SKonrad Sztyber 		/*
39439e216680SKonrad Sztyber 		 * The controller has been enabled.
39449e216680SKonrad Sztyber 		 *  Perform the rest of initialization serially.
39459e216680SKonrad Sztyber 		 */
39469e216680SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
39479e216680SKonrad Sztyber 				     ctrlr->opts.admin_timeout_ms);
39489e216680SKonrad Sztyber 	} else {
39492f9c97a4SKonrad Sztyber 		nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
39509e216680SKonrad Sztyber 					   NVME_TIMEOUT_KEEP_EXISTING);
39519e216680SKonrad Sztyber 	}
39529e216680SKonrad Sztyber }
39539e216680SKonrad Sztyber 
3954b347d551SGangCao /**
395520abbe8aSDaniel Verkamp  * This function will be called repeatedly during initialization until the controller is ready.
395620abbe8aSDaniel Verkamp  */
39571010fb3aSDaniel Verkamp int
395820abbe8aSDaniel Verkamp nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
39591010fb3aSDaniel Verkamp {
396020abbe8aSDaniel Verkamp 	uint32_t ready_timeout_in_ms;
39613c2190c2SJim Harris 	uint64_t ticks;
3962eb5cb3dbSChangpeng Liu 	int rc = 0;
396320abbe8aSDaniel Verkamp 
39643c2190c2SJim Harris 	ticks = spdk_get_ticks();
39653c2190c2SJim Harris 
3966c257e5b4SWenbo Wang 	/*
3967c257e5b4SWenbo Wang 	 * May need to avoid accessing any register on the target controller
3968c257e5b4SWenbo Wang 	 * for a while. Return early without touching the FSM.
3969c257e5b4SWenbo Wang 	 * Check sleep_timeout_tsc > 0 for unit test.
3970c257e5b4SWenbo Wang 	 */
3971c257e5b4SWenbo Wang 	if ((ctrlr->sleep_timeout_tsc > 0) &&
39723c2190c2SJim Harris 	    (ticks <= ctrlr->sleep_timeout_tsc)) {
3973c257e5b4SWenbo Wang 		return 0;
3974c257e5b4SWenbo Wang 	}
3975c257e5b4SWenbo Wang 	ctrlr->sleep_timeout_tsc = 0;
3976c257e5b4SWenbo Wang 
397738091aadSKonrad Sztyber 	ready_timeout_in_ms = nvme_ctrlr_get_ready_timeout(ctrlr);
397820abbe8aSDaniel Verkamp 
397920abbe8aSDaniel Verkamp 	/*
398020abbe8aSDaniel Verkamp 	 * Check if the current initialization step is done or has timed out.
398120abbe8aSDaniel Verkamp 	 */
398220abbe8aSDaniel Verkamp 	switch (ctrlr->state) {
3983951bb3a4SDarek Stojaczyk 	case NVME_CTRLR_STATE_INIT_DELAY:
3984951bb3a4SDarek Stojaczyk 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
398532e22643SJim Harris 		if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_INIT) {
3986951bb3a4SDarek Stojaczyk 			/*
3987951bb3a4SDarek Stojaczyk 			 * Controller may need some delay before it's enabled.
3988951bb3a4SDarek Stojaczyk 			 *
3989951bb3a4SDarek Stojaczyk 			 * This is a workaround for an issue where the PCIe-attached NVMe controller
3990951bb3a4SDarek Stojaczyk 			 * is not ready after VFIO reset. We delay the initialization rather than the
3991951bb3a4SDarek Stojaczyk 			 * enabling itself, because this is required only for the very first enabling
3992951bb3a4SDarek Stojaczyk 			 * - directly after a VFIO reset.
3993951bb3a4SDarek Stojaczyk 			 */
399401f45ecdSGangCao 			NVME_CTRLR_DEBUGLOG(ctrlr, "Adding 2 second delay before initializing the controller\n");
39953c2190c2SJim Harris 			ctrlr->sleep_timeout_tsc = ticks + (2000 * spdk_get_ticks_hz() / 1000);
399632e22643SJim Harris 		}
3997951bb3a4SDarek Stojaczyk 		break;
3998951bb3a4SDarek Stojaczyk 
3999af130056SAlexey Marchuk 	case NVME_CTRLR_STATE_DISCONNECTED:
4000af130056SAlexey Marchuk 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
4001af130056SAlexey Marchuk 		break;
4002af130056SAlexey Marchuk 
4003af130056SAlexey Marchuk 	case NVME_CTRLR_STATE_CONNECT_ADMINQ: /* synonymous with NVME_CTRLR_STATE_INIT and NVME_CTRLR_STATE_DISCONNECTED */
4004d6f6ffd2SJim Harris 		rc = nvme_transport_ctrlr_connect_qpair(ctrlr, ctrlr->adminq);
4005d6f6ffd2SJim Harris 		if (rc == 0) {
400637c36ec1SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
400737c36ec1SKonrad Sztyber 					     NVME_TIMEOUT_INFINITE);
4008d6f6ffd2SJim Harris 		} else {
4009d6f6ffd2SJim Harris 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
4010d6f6ffd2SJim Harris 		}
4011d6f6ffd2SJim Harris 		break;
4012d6f6ffd2SJim Harris 
401337c36ec1SKonrad Sztyber 	case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
401437c36ec1SKonrad Sztyber 		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
401537c36ec1SKonrad Sztyber 
401637c36ec1SKonrad Sztyber 		switch (nvme_qpair_get_state(ctrlr->adminq)) {
401737c36ec1SKonrad Sztyber 		case NVME_QPAIR_CONNECTING:
4018e0ab59f1SAlex Michon 			if (ctrlr->is_failed) {
4019e0ab59f1SAlex Michon 				nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
4020e0ab59f1SAlex Michon 				break;
4021e0ab59f1SAlex Michon 			}
4022e0ab59f1SAlex Michon 
402337c36ec1SKonrad Sztyber 			break;
402437c36ec1SKonrad Sztyber 		case NVME_QPAIR_CONNECTED:
402537c36ec1SKonrad Sztyber 			nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_ENABLED);
402637c36ec1SKonrad Sztyber 		/* Fall through */
402737c36ec1SKonrad Sztyber 		case NVME_QPAIR_ENABLED:
402837c36ec1SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS,
402937c36ec1SKonrad Sztyber 					     NVME_TIMEOUT_INFINITE);
4030a4b7f87bSKonrad Sztyber 			/* Abort any queued requests that were sent while the adminq was connecting
4031a4b7f87bSKonrad Sztyber 			 * to avoid stalling the init process during a reset, as requests don't get
4032a4b7f87bSKonrad Sztyber 			 * resubmitted while the controller is resetting and subsequent commands
4033a4b7f87bSKonrad Sztyber 			 * would get queued too.
4034a4b7f87bSKonrad Sztyber 			 */
403549d3a5e4SShuhei Matsumoto 			nvme_qpair_abort_queued_reqs(ctrlr->adminq);
403637c36ec1SKonrad Sztyber 			break;
40378926303bSShuhei Matsumoto 		case NVME_QPAIR_DISCONNECTING:
40388926303bSShuhei Matsumoto 			assert(ctrlr->adminq->async == true);
40398926303bSShuhei Matsumoto 			break;
40408926303bSShuhei Matsumoto 		case NVME_QPAIR_DISCONNECTED:
40418926303bSShuhei Matsumoto 		/* fallthrough */
404237c36ec1SKonrad Sztyber 		default:
404337c36ec1SKonrad Sztyber 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
404437c36ec1SKonrad Sztyber 			break;
404537c36ec1SKonrad Sztyber 		}
404637c36ec1SKonrad Sztyber 
404737c36ec1SKonrad Sztyber 		break;
404837c36ec1SKonrad Sztyber 
4049d6f6ffd2SJim Harris 	case NVME_CTRLR_STATE_READ_VS:
405009acc162SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS, NVME_TIMEOUT_INFINITE);
405109acc162SKonrad Sztyber 		rc = nvme_ctrlr_get_vs_async(ctrlr, nvme_ctrlr_process_init_vs_done, ctrlr);
4052f5ba8a5eSJim Harris 		break;
4053f5ba8a5eSJim Harris 
4054f5ba8a5eSJim Harris 	case NVME_CTRLR_STATE_READ_CAP:
40559d8251f6SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP, NVME_TIMEOUT_INFINITE);
40569d8251f6SKonrad Sztyber 		rc = nvme_ctrlr_get_cap_async(ctrlr, nvme_ctrlr_process_init_cap_done, ctrlr);
4057df01076fSJim Harris 		break;
4058df01076fSJim Harris 
4059df01076fSJim Harris 	case NVME_CTRLR_STATE_CHECK_EN:
406020abbe8aSDaniel Verkamp 		/* Begin the hardware initialization by making sure the controller is disabled. */
4061ccc084f3SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC, ready_timeout_in_ms);
4062ccc084f3SKonrad Sztyber 		rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_check_en, ctrlr);
4063ccc084f3SKonrad Sztyber 		break;
4064fc8d8618SJim Harris 
4065fc8d8618SJim Harris 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
406620abbe8aSDaniel Verkamp 		/*
406720abbe8aSDaniel Verkamp 		 * Controller is currently enabled. We need to disable it to cause a reset.
406820abbe8aSDaniel Verkamp 		 *
406920abbe8aSDaniel Verkamp 		 * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
407020abbe8aSDaniel Verkamp 		 *  Wait for the ready bit to be 1 before disabling the controller.
407120abbe8aSDaniel Verkamp 		 */
40722f9c97a4SKonrad Sztyber 		nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
40738da3c166SKonrad Sztyber 					   NVME_TIMEOUT_KEEP_EXISTING);
40748da3c166SKonrad Sztyber 		rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_1, ctrlr);
40758da3c166SKonrad Sztyber 		break;
407620abbe8aSDaniel Verkamp 
4077fc8d8618SJim Harris 	case NVME_CTRLR_STATE_SET_EN_0:
407801f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 0\n");
40795f376485SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC, ready_timeout_in_ms);
40805f376485SKonrad Sztyber 		rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_set_en_0_read_cc, ctrlr);
40815f376485SKonrad Sztyber 		break;
408220abbe8aSDaniel Verkamp 
408320abbe8aSDaniel Verkamp 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
40842f9c97a4SKonrad Sztyber 		nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
40858da3c166SKonrad Sztyber 					   NVME_TIMEOUT_KEEP_EXISTING);
40868da3c166SKonrad Sztyber 		rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_0, ctrlr);
408736a793adSBen Walker 		break;
408836a793adSBen Walker 
40895e5423deSShuhei Matsumoto 	case NVME_CTRLR_STATE_DISABLED:
4090bdc9fa83SShuhei Matsumoto 		if (ctrlr->is_disconnecting) {
4091bdc9fa83SShuhei Matsumoto 			NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr was disabled.\n");
4092bdc9fa83SShuhei Matsumoto 		} else {
40935e5423deSShuhei Matsumoto 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
40945e5423deSShuhei Matsumoto 
40955e5423deSShuhei Matsumoto 			/*
40965e5423deSShuhei Matsumoto 			 * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
40975e5423deSShuhei Matsumoto 			 *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
40985e5423deSShuhei Matsumoto 			 */
40995e5423deSShuhei Matsumoto 			spdk_delay_us(100);
4100bdc9fa83SShuhei Matsumoto 		}
41015e5423deSShuhei Matsumoto 		break;
41025e5423deSShuhei Matsumoto 
410336a793adSBen Walker 	case NVME_CTRLR_STATE_ENABLE:
410401f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 1\n");
410573050d51SKonrad Sztyber 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC, ready_timeout_in_ms);
4106c65210d0SGangCao 		rc = nvme_ctrlr_enable(ctrlr);
4107a9a55513SKrzysztof Karas 		if (rc) {
4108a9a55513SKrzysztof Karas 			NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr enable failed with error: %d", rc);
4109a9a55513SKrzysztof Karas 		}
4110c65210d0SGangCao 		return rc;
411120abbe8aSDaniel Verkamp 
411220abbe8aSDaniel Verkamp 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
41132f9c97a4SKonrad Sztyber 		nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
41149e216680SKonrad Sztyber 					   NVME_TIMEOUT_KEEP_EXISTING);
41159e216680SKonrad Sztyber 		rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_enable_wait_for_ready_1,
41169e216680SKonrad Sztyber 					       ctrlr);
411720abbe8aSDaniel Verkamp 		break;
411820abbe8aSDaniel Verkamp 
41195cd76349SSeth Howell 	case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
41205cd76349SSeth Howell 		nvme_transport_qpair_reset(ctrlr->adminq);
41211c083e62SJim Harris 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY, NVME_TIMEOUT_INFINITE);
4122eb5cb3dbSChangpeng Liu 		break;
4123eb5cb3dbSChangpeng Liu 
4124eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_IDENTIFY:
4125eb5cb3dbSChangpeng Liu 		rc = nvme_ctrlr_identify(ctrlr);
4126a61aff77SChangpeng Liu 		break;
4127a61aff77SChangpeng Liu 
41287e68d0baSJim Harris 	case NVME_CTRLR_STATE_CONFIGURE_AER:
41297e68d0baSJim Harris 		rc = nvme_ctrlr_configure_aer(ctrlr);
41307e68d0baSJim Harris 		break;
41317e68d0baSJim Harris 
41321c083e62SJim Harris 	case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
41331c083e62SJim Harris 		rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
41341c083e62SJim Harris 		break;
41351c083e62SJim Harris 
413664563adaSNiklas Cassel 	case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
413764563adaSNiklas Cassel 		rc = nvme_ctrlr_identify_iocs_specific(ctrlr);
413864563adaSNiklas Cassel 		break;
413964563adaSNiklas Cassel 
414038d59d8bSNiklas Cassel 	case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
414138d59d8bSNiklas Cassel 		rc = nvme_ctrlr_get_zns_cmd_and_effects_log(ctrlr);
414238d59d8bSNiklas Cassel 		break;
414338d59d8bSNiklas Cassel 
41448b95dbabSChangpeng Liu 	case NVME_CTRLR_STATE_SET_NUM_QUEUES:
4145c3ba9127SAlexey Marchuk 		nvme_ctrlr_update_nvmf_ioccsz(ctrlr);
41468b95dbabSChangpeng Liu 		rc = nvme_ctrlr_set_num_queues(ctrlr);
414738a396d9SChangpeng Liu 		break;
414838a396d9SChangpeng Liu 
414965ff0771SEvgeniy Kochetov 	case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
415065ff0771SEvgeniy Kochetov 		_nvme_ctrlr_identify_active_ns(ctrlr);
415165ff0771SEvgeniy Kochetov 		break;
415265ff0771SEvgeniy Kochetov 
4153d9ecb572SChangpeng Liu 	case NVME_CTRLR_STATE_IDENTIFY_NS:
4154d9ecb572SChangpeng Liu 		rc = nvme_ctrlr_identify_namespaces(ctrlr);
415592bf76c9SChangpeng Liu 		break;
415692bf76c9SChangpeng Liu 
4157d9ecb572SChangpeng Liu 	case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
4158d9ecb572SChangpeng Liu 		rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
4159eb5cb3dbSChangpeng Liu 		break;
4160eb5cb3dbSChangpeng Liu 
4161c4d1b7d5SNiklas Cassel 	case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
4162c4d1b7d5SNiklas Cassel 		rc = nvme_ctrlr_identify_namespaces_iocs_specific(ctrlr);
4163c4d1b7d5SNiklas Cassel 		break;
4164c4d1b7d5SNiklas Cassel 
4165eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
4166eb5cb3dbSChangpeng Liu 		rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
4167632c8d56SChangpeng Liu 		break;
4168632c8d56SChangpeng Liu 
4169632c8d56SChangpeng Liu 	case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
4170632c8d56SChangpeng Liu 		rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
4171eb5cb3dbSChangpeng Liu 		break;
4172eb5cb3dbSChangpeng Liu 
4173eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
4174eb5cb3dbSChangpeng Liu 		nvme_ctrlr_set_supported_features(ctrlr);
417560ce1414SShuhei Matsumoto 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_FEATURE,
41762706cd42SChangpeng Liu 				     ctrlr->opts.admin_timeout_ms);
4177eb5cb3dbSChangpeng Liu 		break;
4178eb5cb3dbSChangpeng Liu 
417960ce1414SShuhei Matsumoto 	case NVME_CTRLR_STATE_SET_HOST_FEATURE:
418060ce1414SShuhei Matsumoto 		rc = nvme_ctrlr_set_host_feature(ctrlr);
418160ce1414SShuhei Matsumoto 		break;
418260ce1414SShuhei Matsumoto 
4183eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
4184eb5cb3dbSChangpeng Liu 		rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
41855a028860SChangpeng Liu 		break;
41865a028860SChangpeng Liu 
4187eb5cb3dbSChangpeng Liu 	case NVME_CTRLR_STATE_SET_HOST_ID:
4188eb5cb3dbSChangpeng Liu 		rc = nvme_ctrlr_set_host_id(ctrlr);
418998b19709SChangpeng Liu 		break;
419098b19709SChangpeng Liu 
41913dd0bc9eSEvgeniy Kochetov 	case NVME_CTRLR_STATE_TRANSPORT_READY:
41923dd0bc9eSEvgeniy Kochetov 		rc = nvme_transport_ctrlr_ready(ctrlr);
41933dd0bc9eSEvgeniy Kochetov 		if (rc) {
41943dd0bc9eSEvgeniy Kochetov 			NVME_CTRLR_ERRLOG(ctrlr, "Transport controller ready step failed: rc %d\n", rc);
41953dd0bc9eSEvgeniy Kochetov 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
41963dd0bc9eSEvgeniy Kochetov 		} else {
41973dd0bc9eSEvgeniy Kochetov 			nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
41983dd0bc9eSEvgeniy Kochetov 		}
41993dd0bc9eSEvgeniy Kochetov 		break;
42003dd0bc9eSEvgeniy Kochetov 
42016368d6c0SGangCao 	case NVME_CTRLR_STATE_READY:
420201f45ecdSGangCao 		NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr already in ready state\n");
42036368d6c0SGangCao 		return 0;
42046368d6c0SGangCao 
42051c79fadbSChangpeng Liu 	case NVME_CTRLR_STATE_ERROR:
420601f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr is in error state\n");
42071c79fadbSChangpeng Liu 		return -1;
42081c79fadbSChangpeng Liu 
420909acc162SKonrad Sztyber 	case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
42109d8251f6SKonrad Sztyber 	case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
4211ccc084f3SKonrad Sztyber 	case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
42125f376485SKonrad Sztyber 	case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
42138da3c166SKonrad Sztyber 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
42148da3c166SKonrad Sztyber 	case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
421573050d51SKonrad Sztyber 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
42169e216680SKonrad Sztyber 	case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
4217d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
42187e68d0baSJim Harris 	case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
42191c083e62SJim Harris 	case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
4220d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
422138d59d8bSNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
4222d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
4223d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
4224d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
4225d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
4226d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
4227632c8d56SChangpeng Liu 	case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
422860ce1414SShuhei Matsumoto 	case NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE:
4229d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
4230d7345e08SNiklas Cassel 	case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
4231bdc9fa83SShuhei Matsumoto 		/*
4232bdc9fa83SShuhei Matsumoto 		 * nvme_ctrlr_process_init() may be called from the completion context
4233bdc9fa83SShuhei Matsumoto 		 * for the admin qpair. Avoid recursive calls for this case.
4234bdc9fa83SShuhei Matsumoto 		 */
4235bdc9fa83SShuhei Matsumoto 		if (!ctrlr->adminq->in_completion_context) {
4236d7345e08SNiklas Cassel 			spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
4237bdc9fa83SShuhei Matsumoto 		}
4238d7345e08SNiklas Cassel 		break;
4239d7345e08SNiklas Cassel 
424020abbe8aSDaniel Verkamp 	default:
42410606eaadSBen Walker 		assert(0);
42421010fb3aSDaniel Verkamp 		return -1;
42431010fb3aSDaniel Verkamp 	}
42441010fb3aSDaniel Verkamp 
4245a9a55513SKrzysztof Karas 	if (rc) {
4246cff39ee7SKonrad Sztyber 		NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr operation failed with error: %d, ctrlr state: %d (%s)\n",
4247cff39ee7SKonrad Sztyber 				  rc, ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
4248a9a55513SKrzysztof Karas 	}
4249a9a55513SKrzysztof Karas 
42503c2190c2SJim Harris 	/* Note: we use the ticks captured when we entered this function.
42513c2190c2SJim Harris 	 * This covers environments where the SPDK process gets swapped out after
42523c2190c2SJim Harris 	 * we tried to advance the state but before we check the timeout here.
42533c2190c2SJim Harris 	 * It is not normal for this to happen, but harmless to handle it in this
42543c2190c2SJim Harris 	 * way.
42553c2190c2SJim Harris 	 */
425620abbe8aSDaniel Verkamp 	if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
42573c2190c2SJim Harris 	    ticks > ctrlr->state_timeout_tsc) {
4258e91d7428SJim Harris 		NVME_CTRLR_ERRLOG(ctrlr, "Initialization timed out in state %d (%s)\n",
4259e91d7428SJim Harris 				  ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
426020abbe8aSDaniel Verkamp 		return -1;
426120abbe8aSDaniel Verkamp 	}
426220abbe8aSDaniel Verkamp 
42637c0e9faaSChangpeng Liu 	return rc;
42641010fb3aSDaniel Verkamp }
42651010fb3aSDaniel Verkamp 
42665ba51e50SGangCao int
426747341b89SGangCao nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
426888801428SBen Walker {
426988801428SBen Walker 	pthread_mutexattr_t attr;
427088801428SBen Walker 	int rc = 0;
427188801428SBen Walker 
427288801428SBen Walker 	if (pthread_mutexattr_init(&attr)) {
427388801428SBen Walker 		return -1;
427488801428SBen Walker 	}
427588801428SBen Walker 	if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
4276bfc8bc87SGangCao #ifndef __FreeBSD__
427747341b89SGangCao 	    pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
42785ba51e50SGangCao 	    pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
4279bfc8bc87SGangCao #endif
428088801428SBen Walker 	    pthread_mutex_init(mtx, &attr)) {
428188801428SBen Walker 		rc = -1;
428288801428SBen Walker 	}
428388801428SBen Walker 	pthread_mutexattr_destroy(&attr);
428488801428SBen Walker 	return rc;
428588801428SBen Walker }
428688801428SBen Walker 
42871010fb3aSDaniel Verkamp int
428882395855SDaniel Verkamp nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
42891010fb3aSDaniel Verkamp {
429065155778SGangCao 	int rc;
42918bf37ee7SWenbo Wang 
4292951bb3a4SDarek Stojaczyk 	if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
4293951bb3a4SDarek Stojaczyk 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
4294951bb3a4SDarek Stojaczyk 	} else {
429520abbe8aSDaniel Verkamp 		nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
4296951bb3a4SDarek Stojaczyk 	}
4297951bb3a4SDarek Stojaczyk 
4298daa8f941SJacek Kalwas 	if (ctrlr->opts.admin_queue_size > SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES) {
429901f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "admin_queue_size %u exceeds max defined by NVMe spec, use max value\n",
4300daa8f941SJacek Kalwas 				  ctrlr->opts.admin_queue_size);
4301daa8f941SJacek Kalwas 		ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES;
4302daa8f941SJacek Kalwas 	}
4303daa8f941SJacek Kalwas 
4304b90d7b5bSJim Harris 	if (ctrlr->quirks & NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE &&
4305b90d7b5bSJim Harris 	    (ctrlr->opts.admin_queue_size % SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE) != 0) {
4306b90d7b5bSJim Harris 		NVME_CTRLR_ERRLOG(ctrlr,
4307b90d7b5bSJim Harris 				  "admin_queue_size %u is invalid for this NVMe device, adjust to next multiple\n",
4308b90d7b5bSJim Harris 				  ctrlr->opts.admin_queue_size);
4309b90d7b5bSJim Harris 		ctrlr->opts.admin_queue_size = SPDK_ALIGN_CEIL(ctrlr->opts.admin_queue_size,
4310b90d7b5bSJim Harris 					       SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE);
4311b90d7b5bSJim Harris 	}
4312b90d7b5bSJim Harris 
4313daa8f941SJacek Kalwas 	if (ctrlr->opts.admin_queue_size < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES) {
431401f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr,
431501f45ecdSGangCao 				  "admin_queue_size %u is less than minimum defined by NVMe spec, use min value\n",
4316daa8f941SJacek Kalwas 				  ctrlr->opts.admin_queue_size);
4317daa8f941SJacek Kalwas 		ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES;
4318daa8f941SJacek Kalwas 	}
4319daa8f941SJacek Kalwas 
4320ca3d1c5bSChangpeng Liu 	ctrlr->flags = 0;
4321c194ebd8SDaniel Verkamp 	ctrlr->free_io_qids = NULL;
4322bc185fe7SDaniel Verkamp 	ctrlr->is_resetting = false;
43231010fb3aSDaniel Verkamp 	ctrlr->is_failed = false;
432486c4d33fSChangpeng Liu 	ctrlr->is_destructed = false;
43251010fb3aSDaniel Verkamp 
43263272320cSDaniel Verkamp 	TAILQ_INIT(&ctrlr->active_io_qpairs);
4327193f4f83SBen Walker 	STAILQ_INIT(&ctrlr->queued_aborts);
4328193f4f83SBen Walker 	ctrlr->outstanding_aborts = 0;
43293272320cSDaniel Verkamp 
4330a0befabdSShuhei Matsumoto 	ctrlr->ana_log_page = NULL;
4331a0befabdSShuhei Matsumoto 	ctrlr->ana_log_page_size = 0;
4332a0befabdSShuhei Matsumoto 
433347341b89SGangCao 	rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
433465155778SGangCao 	if (rc != 0) {
433565155778SGangCao 		return rc;
433665155778SGangCao 	}
43371010fb3aSDaniel Verkamp 
4338bfc8bc87SGangCao 	TAILQ_INIT(&ctrlr->active_procs);
43391bea8805SKonrad Sztyber 	STAILQ_INIT(&ctrlr->register_operations);
4340bfc8bc87SGangCao 
4341e7602c15SBen Walker 	RB_INIT(&ctrlr->ns);
4342e7602c15SBen Walker 
434365155778SGangCao 	return rc;
43441010fb3aSDaniel Verkamp }
43451010fb3aSDaniel Verkamp 
4346f5ba8a5eSJim Harris static void
4347f5ba8a5eSJim Harris nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr)
4348f505f57bSDaniel Verkamp {
43496ad44e8bSChangpeng Liu 	if (ctrlr->cap.bits.ams & SPDK_NVME_CAP_AMS_WRR) {
43506ad44e8bSChangpeng Liu 		ctrlr->flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
43516ad44e8bSChangpeng Liu 	}
43526ad44e8bSChangpeng Liu 
4353f505f57bSDaniel Verkamp 	ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
435498890613SDaniel Verkamp 
43552eec131eSDaniel Verkamp 	/* For now, always select page_size == min_page_size. */
43562eec131eSDaniel Verkamp 	ctrlr->page_size = ctrlr->min_page_size;
43572eec131eSDaniel Verkamp 
4358cb0538aeSDaniel Verkamp 	ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
4359564db674SDariusz Stojaczyk 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
43606564bd94SJim Harris 	if (ctrlr->quirks & NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE &&
43616564bd94SJim Harris 	    ctrlr->opts.io_queue_size == DEFAULT_IO_QUEUE_SIZE) {
43626564bd94SJim Harris 		/* If the user specifically set an IO queue size different than the
43636564bd94SJim Harris 		 * default, use that value.  Otherwise overwrite with the quirked value.
43646564bd94SJim Harris 		 * This allows this quirk to be overridden when necessary.
43656564bd94SJim Harris 		 * However, cap.mqes still needs to be respected.
43666564bd94SJim Harris 		 */
43676564bd94SJim Harris 		ctrlr->opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK;
43686564bd94SJim Harris 	}
436984d90484SDaniel Verkamp 	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
43705742e9b9SDaniel Verkamp 
43715742e9b9SDaniel Verkamp 	ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
4372f505f57bSDaniel Verkamp }
4373f505f57bSDaniel Verkamp 
43741010fb3aSDaniel Verkamp void
4375c83cd937SEhud Naim nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
4376c83cd937SEhud Naim {
43776b7c4ce1SJim Harris 	int rc;
43786b7c4ce1SJim Harris 
4379a51f4d49SJim Harris 	if (ctrlr->lock_depth > 0) {
4380a51f4d49SJim Harris 		SPDK_ERRLOG("lock currently held (depth=%d)!\n", ctrlr->lock_depth);
4381a51f4d49SJim Harris 		assert(false);
4382a51f4d49SJim Harris 	}
4383a51f4d49SJim Harris 
43846b7c4ce1SJim Harris 	rc = pthread_mutex_destroy(&ctrlr->ctrlr_lock);
43856b7c4ce1SJim Harris 	if (rc) {
43866b7c4ce1SJim Harris 		SPDK_ERRLOG("could not destroy ctrlr_lock: %s\n", spdk_strerror(rc));
43876b7c4ce1SJim Harris 		assert(false);
43886b7c4ce1SJim Harris 	}
43897bf78abbSMarcin Spiewak 
43907bf78abbSMarcin Spiewak 	nvme_ctrlr_free_processes(ctrlr);
4391c83cd937SEhud Naim }
4392c83cd937SEhud Naim 
4393c83cd937SEhud Naim void
43943806b2e1SShuhei Matsumoto nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
43953806b2e1SShuhei Matsumoto 			  struct nvme_ctrlr_detach_ctx *ctx)
43961010fb3aSDaniel Verkamp {
439750cc397dSDaniel Verkamp 	struct spdk_nvme_qpair *qpair, *tmp;
43981f19be65SDaniel Verkamp 
439901f45ecdSGangCao 	NVME_CTRLR_DEBUGLOG(ctrlr, "Prepare to destruct SSD\n");
4400f366e261SJim Harris 
440164454afbSShuhei Matsumoto 	ctrlr->prepare_for_reset = false;
440286c4d33fSChangpeng Liu 	ctrlr->is_destructed = true;
440386c4d33fSChangpeng Liu 
4404bad2c8e8SChangpeng Liu 	spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
44055322f307SShuhei Matsumoto 
44065322f307SShuhei Matsumoto 	nvme_ctrlr_abort_queued_aborts(ctrlr);
4407f366e261SJim Harris 	nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
4408f366e261SJim Harris 
440950cc397dSDaniel Verkamp 	TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
441084b7670dSGangCao 		spdk_nvme_ctrlr_free_io_qpair(qpair);
44111f19be65SDaniel Verkamp 	}
44121f19be65SDaniel Verkamp 
44137e3a11f9SChangpeng Liu 	nvme_ctrlr_free_doorbell_buffer(ctrlr);
441464563adaSNiklas Cassel 	nvme_ctrlr_free_iocs_specific_data(ctrlr);
44157e3a11f9SChangpeng Liu 
44163806b2e1SShuhei Matsumoto 	nvme_ctrlr_shutdown_async(ctrlr, ctx);
4417257fcb73SShuhei Matsumoto }
44183806b2e1SShuhei Matsumoto 
44193806b2e1SShuhei Matsumoto int
44203806b2e1SShuhei Matsumoto nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
44213806b2e1SShuhei Matsumoto 			       struct nvme_ctrlr_detach_ctx *ctx)
44223806b2e1SShuhei Matsumoto {
4423dbde5eddSBen Walker 	struct spdk_nvme_ns *ns, *tmp_ns;
44243806b2e1SShuhei Matsumoto 	int rc = 0;
44253806b2e1SShuhei Matsumoto 
44263806b2e1SShuhei Matsumoto 	if (!ctx->shutdown_complete) {
44273806b2e1SShuhei Matsumoto 		rc = nvme_ctrlr_shutdown_poll_async(ctrlr, ctx);
44283806b2e1SShuhei Matsumoto 		if (rc == -EAGAIN) {
44293806b2e1SShuhei Matsumoto 			return -EAGAIN;
44303806b2e1SShuhei Matsumoto 		}
44313806b2e1SShuhei Matsumoto 		/* Destruct ctrlr forcefully for any other error. */
44322226750aSChangpeng Liu 	}
44331010fb3aSDaniel Verkamp 
4434ea1bfd84SShuhei Matsumoto 	if (ctx->cb_fn) {
4435ea1bfd84SShuhei Matsumoto 		ctx->cb_fn(ctrlr);
4436ea1bfd84SShuhei Matsumoto 	}
4437ea1bfd84SShuhei Matsumoto 
443880c88ab3SChangpeng Liu 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
443980c88ab3SChangpeng Liu 
4440dbde5eddSBen Walker 	RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
4441dbde5eddSBen Walker 		nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
4442dbde5eddSBen Walker 		RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
4443dbde5eddSBen Walker 		spdk_free(ns);
4444dbde5eddSBen Walker 	}
4445dbde5eddSBen Walker 
444684688fdbSBen Walker 	ctrlr->active_ns_count = 0;
44471010fb3aSDaniel Verkamp 
4448c194ebd8SDaniel Verkamp 	spdk_bit_array_free(&ctrlr->free_io_qids);
44491010fb3aSDaniel Verkamp 
445049e433f8SEvgeniy Kochetov 	free(ctrlr->ana_log_page);
4451a066f0c3SShuhei Matsumoto 	free(ctrlr->copied_ana_desc);
4452a0befabdSShuhei Matsumoto 	ctrlr->ana_log_page = NULL;
4453a066f0c3SShuhei Matsumoto 	ctrlr->copied_ana_desc = NULL;
4454a0befabdSShuhei Matsumoto 	ctrlr->ana_log_page_size = 0;
4455a0befabdSShuhei Matsumoto 
44561ffec5d5SDaniel Verkamp 	nvme_transport_ctrlr_destruct(ctrlr);
44573806b2e1SShuhei Matsumoto 
44583806b2e1SShuhei Matsumoto 	return rc;
44593806b2e1SShuhei Matsumoto }
44603806b2e1SShuhei Matsumoto 
44613806b2e1SShuhei Matsumoto void
44623806b2e1SShuhei Matsumoto nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
44633806b2e1SShuhei Matsumoto {
4464b6ecc372SKonrad Sztyber 	struct nvme_ctrlr_detach_ctx ctx = { .ctrlr = ctrlr };
44653806b2e1SShuhei Matsumoto 	int rc;
44663806b2e1SShuhei Matsumoto 
44673806b2e1SShuhei Matsumoto 	nvme_ctrlr_destruct_async(ctrlr, &ctx);
44683806b2e1SShuhei Matsumoto 
44693806b2e1SShuhei Matsumoto 	while (1) {
44703806b2e1SShuhei Matsumoto 		rc = nvme_ctrlr_destruct_poll_async(ctrlr, &ctx);
44713806b2e1SShuhei Matsumoto 		if (rc != -EAGAIN) {
44723806b2e1SShuhei Matsumoto 			break;
44733806b2e1SShuhei Matsumoto 		}
44743806b2e1SShuhei Matsumoto 		nvme_delay(1000);
44753806b2e1SShuhei Matsumoto 	}
44761010fb3aSDaniel Verkamp }
44771010fb3aSDaniel Verkamp 
4478eb555b13SDaniel Verkamp int
44796ce73aa6SDaniel Verkamp nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
44801010fb3aSDaniel Verkamp 				struct nvme_request *req)
44811010fb3aSDaniel Verkamp {
4482a987bd16SDaniel Verkamp 	return nvme_qpair_submit_request(ctrlr->adminq, req);
44831010fb3aSDaniel Verkamp }
44841010fb3aSDaniel Verkamp 
448593de96b4SDaniel Verkamp static void
448693de96b4SDaniel Verkamp nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
448793de96b4SDaniel Verkamp {
448893de96b4SDaniel Verkamp 	/* Do nothing */
448993de96b4SDaniel Verkamp }
449093de96b4SDaniel Verkamp 
449193de96b4SDaniel Verkamp /*
449293de96b4SDaniel Verkamp  * Check if we need to send a Keep Alive command.
449393de96b4SDaniel Verkamp  * Caller must hold ctrlr->ctrlr_lock.
449493de96b4SDaniel Verkamp  */
44952031f8f7SZiye Yang static int
449693de96b4SDaniel Verkamp nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
449793de96b4SDaniel Verkamp {
449893de96b4SDaniel Verkamp 	uint64_t now;
449993de96b4SDaniel Verkamp 	struct nvme_request *req;
450093de96b4SDaniel Verkamp 	struct spdk_nvme_cmd *cmd;
45012031f8f7SZiye Yang 	int rc = 0;
450293de96b4SDaniel Verkamp 
450393de96b4SDaniel Verkamp 	now = spdk_get_ticks();
450493de96b4SDaniel Verkamp 	if (now < ctrlr->next_keep_alive_tick) {
45052031f8f7SZiye Yang 		return rc;
450693de96b4SDaniel Verkamp 	}
450793de96b4SDaniel Verkamp 
4508cd13f280SDaniel Verkamp 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
450993de96b4SDaniel Verkamp 	if (req == NULL) {
45102031f8f7SZiye Yang 		return rc;
451193de96b4SDaniel Verkamp 	}
451293de96b4SDaniel Verkamp 
451393de96b4SDaniel Verkamp 	cmd = &req->cmd;
451493de96b4SDaniel Verkamp 	cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
451593de96b4SDaniel Verkamp 
451693de96b4SDaniel Verkamp 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
451793de96b4SDaniel Verkamp 	if (rc != 0) {
451801f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Submitting Keep Alive failed\n");
45192031f8f7SZiye Yang 		rc = -ENXIO;
452093de96b4SDaniel Verkamp 	}
452193de96b4SDaniel Verkamp 
452293de96b4SDaniel Verkamp 	ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
45232031f8f7SZiye Yang 	return rc;
452493de96b4SDaniel Verkamp }
452593de96b4SDaniel Verkamp 
45262ced60e9SDaniel Verkamp int32_t
45276ce73aa6SDaniel Verkamp spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
45281010fb3aSDaniel Verkamp {
45292ced60e9SDaniel Verkamp 	int32_t num_completions;
453011739f3cSBen Walker 	int32_t rc;
45314ac203b2SCurt Bruns 	struct spdk_nvme_ctrlr_process	*active_proc;
45322ced60e9SDaniel Verkamp 
4533e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
453411739f3cSBen Walker 
453593de96b4SDaniel Verkamp 	if (ctrlr->keep_alive_interval_ticks) {
45362031f8f7SZiye Yang 		rc = nvme_ctrlr_keep_alive(ctrlr);
45372031f8f7SZiye Yang 		if (rc) {
4538e10b4806SJim Harris 			nvme_ctrlr_unlock(ctrlr);
45392031f8f7SZiye Yang 			return rc;
45402031f8f7SZiye Yang 		}
454193de96b4SDaniel Verkamp 	}
454211739f3cSBen Walker 
4543a3f72b2eSSeth Howell 	rc = nvme_io_msg_process(ctrlr);
454411739f3cSBen Walker 	if (rc < 0) {
4545e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
454611739f3cSBen Walker 		return rc;
454711739f3cSBen Walker 	}
454811739f3cSBen Walker 	num_completions = rc;
454911739f3cSBen Walker 
455011739f3cSBen Walker 	rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
4551a2a82087Sbalaji 
45524ac203b2SCurt Bruns 	/* Each process has an async list, complete the ones for this process object */
45534ac203b2SCurt Bruns 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
45544ac203b2SCurt Bruns 	if (active_proc) {
4555a2a82087Sbalaji 		nvme_ctrlr_complete_queued_async_events(ctrlr);
45564ac203b2SCurt Bruns 	}
4557a2a82087Sbalaji 
4558df7c2a22SShuhei Matsumoto 	if (rc == -ENXIO && ctrlr->is_disconnecting) {
4559df7c2a22SShuhei Matsumoto 		nvme_ctrlr_disconnect_done(ctrlr);
4560df7c2a22SShuhei Matsumoto 	}
4561df7c2a22SShuhei Matsumoto 
4562e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
456311739f3cSBen Walker 
456411739f3cSBen Walker 	if (rc < 0) {
456511739f3cSBen Walker 		num_completions = rc;
456611739f3cSBen Walker 	} else {
456711739f3cSBen Walker 		num_completions += rc;
456811739f3cSBen Walker 	}
45692ced60e9SDaniel Verkamp 
45702ced60e9SDaniel Verkamp 	return num_completions;
45711010fb3aSDaniel Verkamp }
45721010fb3aSDaniel Verkamp 
4573ad35d6cdSDaniel Verkamp const struct spdk_nvme_ctrlr_data *
45746ce73aa6SDaniel Verkamp spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
45751010fb3aSDaniel Verkamp {
45761010fb3aSDaniel Verkamp 	return &ctrlr->cdata;
45771010fb3aSDaniel Verkamp }
45781010fb3aSDaniel Verkamp 
4579b28125b9STsuyoshi Uchida union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
4580b28125b9STsuyoshi Uchida {
4581b28125b9STsuyoshi Uchida 	union spdk_nvme_csts_register csts;
4582b28125b9STsuyoshi Uchida 
4583b28125b9STsuyoshi Uchida 	if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
45847fc48a5fSMichael Haeuptle 		csts.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
4585b28125b9STsuyoshi Uchida 	}
4586b28125b9STsuyoshi Uchida 	return csts;
4587b28125b9STsuyoshi Uchida }
4588b28125b9STsuyoshi Uchida 
4589296ae15fSTomasz Bielecki union spdk_nvme_cc_register spdk_nvme_ctrlr_get_regs_cc(struct spdk_nvme_ctrlr *ctrlr)
4590296ae15fSTomasz Bielecki {
4591296ae15fSTomasz Bielecki 	union spdk_nvme_cc_register cc;
4592296ae15fSTomasz Bielecki 
4593296ae15fSTomasz Bielecki 	if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
4594296ae15fSTomasz Bielecki 		cc.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
4595296ae15fSTomasz Bielecki 	}
4596296ae15fSTomasz Bielecki 	return cc;
4597296ae15fSTomasz Bielecki }
4598296ae15fSTomasz Bielecki 
4599a464f139SDaniel Verkamp union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
4600a464f139SDaniel Verkamp {
460176469b2cSDaniel Verkamp 	return ctrlr->cap;
4602a464f139SDaniel Verkamp }
4603a464f139SDaniel Verkamp 
4604a464f139SDaniel Verkamp union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
4605a464f139SDaniel Verkamp {
4606f7b58aeaSDaniel Verkamp 	return ctrlr->vs;
4607a464f139SDaniel Verkamp }
4608a464f139SDaniel Verkamp 
4609993c4a07SBen Walker union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr)
4610993c4a07SBen Walker {
4611993c4a07SBen Walker 	union spdk_nvme_cmbsz_register cmbsz;
4612993c4a07SBen Walker 
4613993c4a07SBen Walker 	if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) {
4614993c4a07SBen Walker 		cmbsz.raw = 0;
4615993c4a07SBen Walker 	}
4616993c4a07SBen Walker 
4617993c4a07SBen Walker 	return cmbsz;
4618993c4a07SBen Walker }
4619993c4a07SBen Walker 
462048aed8a5SKrishna Kanth Reddy union spdk_nvme_pmrcap_register spdk_nvme_ctrlr_get_regs_pmrcap(struct spdk_nvme_ctrlr *ctrlr)
462148aed8a5SKrishna Kanth Reddy {
462248aed8a5SKrishna Kanth Reddy 	union spdk_nvme_pmrcap_register pmrcap;
462348aed8a5SKrishna Kanth Reddy 
462448aed8a5SKrishna Kanth Reddy 	if (nvme_ctrlr_get_pmrcap(ctrlr, &pmrcap)) {
462548aed8a5SKrishna Kanth Reddy 		pmrcap.raw = 0;
462648aed8a5SKrishna Kanth Reddy 	}
462748aed8a5SKrishna Kanth Reddy 
462848aed8a5SKrishna Kanth Reddy 	return pmrcap;
462948aed8a5SKrishna Kanth Reddy }
463048aed8a5SKrishna Kanth Reddy 
4631fec55c84SKrishna Kanth Reddy union spdk_nvme_bpinfo_register spdk_nvme_ctrlr_get_regs_bpinfo(struct spdk_nvme_ctrlr *ctrlr)
4632fec55c84SKrishna Kanth Reddy {
4633fec55c84SKrishna Kanth Reddy 	union spdk_nvme_bpinfo_register bpinfo;
4634fec55c84SKrishna Kanth Reddy 
4635fec55c84SKrishna Kanth Reddy 	if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
4636fec55c84SKrishna Kanth Reddy 		bpinfo.raw = 0;
4637fec55c84SKrishna Kanth Reddy 	}
4638fec55c84SKrishna Kanth Reddy 
4639fec55c84SKrishna Kanth Reddy 	return bpinfo;
4640fec55c84SKrishna Kanth Reddy }
4641fec55c84SKrishna Kanth Reddy 
4642f98b792dSKrishna Kanth Reddy uint64_t
4643f98b792dSKrishna Kanth Reddy spdk_nvme_ctrlr_get_pmrsz(struct spdk_nvme_ctrlr *ctrlr)
4644f98b792dSKrishna Kanth Reddy {
4645f98b792dSKrishna Kanth Reddy 	return ctrlr->pmr_size;
4646f98b792dSKrishna Kanth Reddy }
4647f98b792dSKrishna Kanth Reddy 
46481010fb3aSDaniel Verkamp uint32_t
46496ce73aa6SDaniel Verkamp spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
46501010fb3aSDaniel Verkamp {
4651517b5572SBen Walker 	return ctrlr->cdata.nn;
465206fbf4b3SEd Rodriguez }
465306fbf4b3SEd Rodriguez 
465406fbf4b3SEd Rodriguez bool
465506fbf4b3SEd Rodriguez spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
465606fbf4b3SEd Rodriguez {
4657517b5572SBen Walker 	struct spdk_nvme_ns tmp, *ns;
4658517b5572SBen Walker 
4659517b5572SBen Walker 	tmp.id = nsid;
4660517b5572SBen Walker 	ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
4661517b5572SBen Walker 
4662517b5572SBen Walker 	if (ns != NULL) {
4663517b5572SBen Walker 		return ns->active;
4664517b5572SBen Walker 	}
4665517b5572SBen Walker 
4666517b5572SBen Walker 	return false;
466706fbf4b3SEd Rodriguez }
466806fbf4b3SEd Rodriguez 
466906fbf4b3SEd Rodriguez uint32_t
467006fbf4b3SEd Rodriguez spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
467106fbf4b3SEd Rodriguez {
4672517b5572SBen Walker 	struct spdk_nvme_ns *ns;
4673517b5572SBen Walker 
4674517b5572SBen Walker 	ns = RB_MIN(nvme_ns_tree, &ctrlr->ns);
4675517b5572SBen Walker 	if (ns == NULL) {
4676517b5572SBen Walker 		return 0;
4677517b5572SBen Walker 	}
4678517b5572SBen Walker 
4679517b5572SBen Walker 	while (ns != NULL) {
4680517b5572SBen Walker 		if (ns->active) {
4681517b5572SBen Walker 			return ns->id;
4682517b5572SBen Walker 		}
4683517b5572SBen Walker 
4684517b5572SBen Walker 		ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
4685517b5572SBen Walker 	}
4686517b5572SBen Walker 
4687517b5572SBen Walker 	return 0;
468806fbf4b3SEd Rodriguez }
468906fbf4b3SEd Rodriguez 
469006fbf4b3SEd Rodriguez uint32_t
469106fbf4b3SEd Rodriguez spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
469206fbf4b3SEd Rodriguez {
4693517b5572SBen Walker 	struct spdk_nvme_ns tmp, *ns;
4694517b5572SBen Walker 
4695517b5572SBen Walker 	tmp.id = prev_nsid;
4696517b5572SBen Walker 	ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
4697517b5572SBen Walker 	if (ns == NULL) {
4698517b5572SBen Walker 		return 0;
469906fbf4b3SEd Rodriguez 	}
4700517b5572SBen Walker 
4701517b5572SBen Walker 	ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
4702517b5572SBen Walker 	while (ns != NULL) {
4703517b5572SBen Walker 		if (ns->active) {
4704517b5572SBen Walker 			return ns->id;
4705517b5572SBen Walker 		}
4706517b5572SBen Walker 
4707517b5572SBen Walker 		ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
4708517b5572SBen Walker 	}
4709517b5572SBen Walker 
471006fbf4b3SEd Rodriguez 	return 0;
471106fbf4b3SEd Rodriguez }
471206fbf4b3SEd Rodriguez 
471306fbf4b3SEd Rodriguez struct spdk_nvme_ns *
471406fbf4b3SEd Rodriguez spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
471506fbf4b3SEd Rodriguez {
4716e7602c15SBen Walker 	struct spdk_nvme_ns tmp;
4717b4dace73SBen Walker 	struct spdk_nvme_ns *ns;
4718b4dace73SBen Walker 
4719517b5572SBen Walker 	if (nsid < 1 || nsid > ctrlr->cdata.nn) {
47201010fb3aSDaniel Verkamp 		return NULL;
47211010fb3aSDaniel Verkamp 	}
47221010fb3aSDaniel Verkamp 
4723e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
4724b4dace73SBen Walker 
4725e7602c15SBen Walker 	tmp.id = nsid;
4726e7602c15SBen Walker 	ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
4727b4dace73SBen Walker 
4728b4dace73SBen Walker 	if (ns == NULL) {
4729186b109dSJim Harris 		ns = spdk_zmalloc(sizeof(struct spdk_nvme_ns), 64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
4730b4dace73SBen Walker 		if (ns == NULL) {
4731e10b4806SJim Harris 			nvme_ctrlr_unlock(ctrlr);
4732b4dace73SBen Walker 			return NULL;
4733b4dace73SBen Walker 		}
4734b4dace73SBen Walker 
4735b4dace73SBen Walker 		NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was added\n", nsid);
4736e7602c15SBen Walker 		ns->id = nsid;
4737e7602c15SBen Walker 		RB_INSERT(nvme_ns_tree, &ctrlr->ns, ns);
4738b4dace73SBen Walker 	}
4739b4dace73SBen Walker 
4740e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
4741b4dace73SBen Walker 
4742b4dace73SBen Walker 	return ns;
47431010fb3aSDaniel Verkamp }
47441010fb3aSDaniel Verkamp 
4745a78ddd82SDaniel Verkamp struct spdk_pci_device *
4746a78ddd82SDaniel Verkamp spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
4747a78ddd82SDaniel Verkamp {
4748a78ddd82SDaniel Verkamp 	if (ctrlr == NULL) {
4749a78ddd82SDaniel Verkamp 		return NULL;
4750a78ddd82SDaniel Verkamp 	}
4751a78ddd82SDaniel Verkamp 
4752a78ddd82SDaniel Verkamp 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
4753a78ddd82SDaniel Verkamp 		return NULL;
4754a78ddd82SDaniel Verkamp 	}
4755a78ddd82SDaniel Verkamp 
4756a78ddd82SDaniel Verkamp 	return nvme_ctrlr_proc_get_devhandle(ctrlr);
4757a78ddd82SDaniel Verkamp }
4758a78ddd82SDaniel Verkamp 
475985be1014SJim Harris int32_t
476085be1014SJim Harris spdk_nvme_ctrlr_get_numa_id(struct spdk_nvme_ctrlr *ctrlr)
476185be1014SJim Harris {
476285be1014SJim Harris 	if (ctrlr->numa.id_valid) {
476385be1014SJim Harris 		return ctrlr->numa.id;
476485be1014SJim Harris 	} else {
476585be1014SJim Harris 		return SPDK_ENV_NUMA_ID_ANY;
476685be1014SJim Harris 	}
476785be1014SJim Harris }
476885be1014SJim Harris 
47692076ab31SShuhei Matsumoto uint16_t
47702076ab31SShuhei Matsumoto spdk_nvme_ctrlr_get_id(struct spdk_nvme_ctrlr *ctrlr)
47712076ab31SShuhei Matsumoto {
47722076ab31SShuhei Matsumoto 	return ctrlr->cntlid;
47732076ab31SShuhei Matsumoto }
47742076ab31SShuhei Matsumoto 
4775ace321dfSDaniel Verkamp uint32_t
4776ace321dfSDaniel Verkamp spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
4777ace321dfSDaniel Verkamp {
4778ace321dfSDaniel Verkamp 	return ctrlr->max_xfer_size;
4779ace321dfSDaniel Verkamp }
4780ace321dfSDaniel Verkamp 
47816f3e2778SJim Harris uint16_t
47826f3e2778SJim Harris spdk_nvme_ctrlr_get_max_sges(const struct spdk_nvme_ctrlr *ctrlr)
47836f3e2778SJim Harris {
47846f3e2778SJim Harris 	if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
47856f3e2778SJim Harris 		return ctrlr->max_sges;
47866f3e2778SJim Harris 	} else {
47876f3e2778SJim Harris 		return UINT16_MAX;
47886f3e2778SJim Harris 	}
47896f3e2778SJim Harris }
47906f3e2778SJim Harris 
47911010fb3aSDaniel Verkamp void
47926ce73aa6SDaniel Verkamp spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
47936ce73aa6SDaniel Verkamp 				      spdk_nvme_aer_cb aer_cb_fn,
47941010fb3aSDaniel Verkamp 				      void *aer_cb_arg)
47951010fb3aSDaniel Verkamp {
4796955b295aSDaniel Verkamp 	struct spdk_nvme_ctrlr_process *active_proc;
4797955b295aSDaniel Verkamp 
4798e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
4799955b295aSDaniel Verkamp 
48001a9c19a9SSeth Howell 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
4801955b295aSDaniel Verkamp 	if (active_proc) {
4802955b295aSDaniel Verkamp 		active_proc->aer_cb_fn = aer_cb_fn;
4803955b295aSDaniel Verkamp 		active_proc->aer_cb_arg = aer_cb_arg;
4804955b295aSDaniel Verkamp 	}
4805955b295aSDaniel Verkamp 
4806e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
48071010fb3aSDaniel Verkamp }
48089945c00cSCunyin Chang 
48097c60aec0SIsaac Otsiabah void
48104300c621SJim Harris spdk_nvme_ctrlr_disable_read_changed_ns_list_log_page(struct spdk_nvme_ctrlr *ctrlr)
48114300c621SJim Harris {
48124300c621SJim Harris 	ctrlr->opts.disable_read_changed_ns_list_log_page = true;
48134300c621SJim Harris }
48144300c621SJim Harris 
48154300c621SJim Harris void
48167c60aec0SIsaac Otsiabah spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
48175288c4dfSMatt Dumm 		uint64_t timeout_io_us, uint64_t timeout_admin_us,
48185288c4dfSMatt Dumm 		spdk_nvme_timeout_cb cb_fn, void *cb_arg)
48197c60aec0SIsaac Otsiabah {
4820cbd9c241SDaniel Verkamp 	struct spdk_nvme_ctrlr_process	*active_proc;
482131bf5d79SZiye Yang 
4822e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
4823943c7c69SDaniel Verkamp 
48241a9c19a9SSeth Howell 	active_proc = nvme_ctrlr_get_current_process(ctrlr);
4825cbd9c241SDaniel Verkamp 	if (active_proc) {
48265288c4dfSMatt Dumm 		active_proc->timeout_io_ticks = timeout_io_us * spdk_get_ticks_hz() / 1000000ULL;
48275288c4dfSMatt Dumm 		active_proc->timeout_admin_ticks = timeout_admin_us * spdk_get_ticks_hz() / 1000000ULL;
482831bf5d79SZiye Yang 		active_proc->timeout_cb_fn = cb_fn;
482931bf5d79SZiye Yang 		active_proc->timeout_cb_arg = cb_arg;
48307c60aec0SIsaac Otsiabah 	}
4831943c7c69SDaniel Verkamp 
48321681a055SDaniel Verkamp 	ctrlr->timeout_enabled = true;
48331681a055SDaniel Verkamp 
4834e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
4835cbd9c241SDaniel Verkamp }
48367c60aec0SIsaac Otsiabah 
48379945c00cSCunyin Chang bool
48386ce73aa6SDaniel Verkamp spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
48399945c00cSCunyin Chang {
484097601bb3SDaniel Verkamp 	/* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
484197601bb3SDaniel Verkamp 	SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
484297601bb3SDaniel Verkamp 	return ctrlr->log_page_supported[log_page];
48439945c00cSCunyin Chang }
484416eee6e2SCunyin Chang 
484516eee6e2SCunyin Chang bool
48466ce73aa6SDaniel Verkamp spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
484716eee6e2SCunyin Chang {
484816eee6e2SCunyin Chang 	/* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
484916eee6e2SCunyin Chang 	SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
485016eee6e2SCunyin Chang 	return ctrlr->feature_supported[feature_code];
485116eee6e2SCunyin Chang }
485298416108SCunyin Chang 
485398416108SCunyin Chang int
485498416108SCunyin Chang spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
485598416108SCunyin Chang 			  struct spdk_nvme_ctrlr_list *payload)
485698416108SCunyin Chang {
48578818ace2SAlexey Marchuk 	struct nvme_completion_poll_status	*status;
4858b4dace73SBen Walker 	struct spdk_nvme_ns			*ns;
485998416108SCunyin Chang 	int					res;
486098416108SCunyin Chang 
48613067ecaaSMao Jiang 	if (nsid == 0) {
48623067ecaaSMao Jiang 		return -EINVAL;
48633067ecaaSMao Jiang 	}
48643067ecaaSMao Jiang 
486524d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
48668818ace2SAlexey Marchuk 	if (!status) {
486701f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
48688818ace2SAlexey Marchuk 		return -ENOMEM;
48698818ace2SAlexey Marchuk 	}
48708818ace2SAlexey Marchuk 
487198416108SCunyin Chang 	res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
48728818ace2SAlexey Marchuk 				       nvme_completion_poll_cb, status);
487359970a89SDaniel Verkamp 	if (res) {
48748818ace2SAlexey Marchuk 		free(status);
487598416108SCunyin Chang 		return res;
487659970a89SDaniel Verkamp 	}
48771a9c19a9SSeth Howell 	if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
487801f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_attach_ns failed!\n");
48798818ace2SAlexey Marchuk 		if (!status->timed_out) {
48808818ace2SAlexey Marchuk 			free(status);
48818818ace2SAlexey Marchuk 		}
4882f0b20026SMinfei Huang 		return -ENXIO;
488398416108SCunyin Chang 	}
48848818ace2SAlexey Marchuk 	free(status);
488598416108SCunyin Chang 
48864c4c8ca2SChangpeng Liu 	res = nvme_ctrlr_identify_active_ns(ctrlr);
48874c4c8ca2SChangpeng Liu 	if (res) {
48884c4c8ca2SChangpeng Liu 		return res;
48894c4c8ca2SChangpeng Liu 	}
48904c4c8ca2SChangpeng Liu 
4891b4dace73SBen Walker 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
489246d02f3eSGangCao 	if (ns == NULL) {
489346d02f3eSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_get_ns failed!\n");
489446d02f3eSGangCao 		return -ENXIO;
489546d02f3eSGangCao 	}
489646d02f3eSGangCao 
4897b4dace73SBen Walker 	return nvme_ns_construct(ns, nsid, ctrlr);
489898416108SCunyin Chang }
489998416108SCunyin Chang 
490098416108SCunyin Chang int
490198416108SCunyin Chang spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
490298416108SCunyin Chang 			  struct spdk_nvme_ctrlr_list *payload)
490398416108SCunyin Chang {
49048818ace2SAlexey Marchuk 	struct nvme_completion_poll_status	*status;
490598416108SCunyin Chang 	int					res;
490698416108SCunyin Chang 
49073067ecaaSMao Jiang 	if (nsid == 0) {
49083067ecaaSMao Jiang 		return -EINVAL;
49093067ecaaSMao Jiang 	}
49103067ecaaSMao Jiang 
491124d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
49128818ace2SAlexey Marchuk 	if (!status) {
491301f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
49148818ace2SAlexey Marchuk 		return -ENOMEM;
49158818ace2SAlexey Marchuk 	}
49168818ace2SAlexey Marchuk 
491798416108SCunyin Chang 	res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
49188818ace2SAlexey Marchuk 				       nvme_completion_poll_cb, status);
491959970a89SDaniel Verkamp 	if (res) {
49208818ace2SAlexey Marchuk 		free(status);
492198416108SCunyin Chang 		return res;
492259970a89SDaniel Verkamp 	}
49231a9c19a9SSeth Howell 	if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
492401f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_detach_ns failed!\n");
49258818ace2SAlexey Marchuk 		if (!status->timed_out) {
49268818ace2SAlexey Marchuk 			free(status);
49278818ace2SAlexey Marchuk 		}
4928f0b20026SMinfei Huang 		return -ENXIO;
492998416108SCunyin Chang 	}
49308818ace2SAlexey Marchuk 	free(status);
493198416108SCunyin Chang 
4932517b5572SBen Walker 	return nvme_ctrlr_identify_active_ns(ctrlr);
493398416108SCunyin Chang }
493498416108SCunyin Chang 
49354957d264SDaniel Verkamp uint32_t
493698416108SCunyin Chang spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
493798416108SCunyin Chang {
49388818ace2SAlexey Marchuk 	struct nvme_completion_poll_status	*status;
493998416108SCunyin Chang 	int					res;
49404c4c8ca2SChangpeng Liu 	uint32_t				nsid;
494198416108SCunyin Chang 
494224d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
49438818ace2SAlexey Marchuk 	if (!status) {
494401f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
4945a8430987SAlexey Marchuk 		return 0;
49468818ace2SAlexey Marchuk 	}
49478818ace2SAlexey Marchuk 
49488818ace2SAlexey Marchuk 	res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, status);
494959970a89SDaniel Verkamp 	if (res) {
49508818ace2SAlexey Marchuk 		free(status);
49514957d264SDaniel Verkamp 		return 0;
495259970a89SDaniel Verkamp 	}
49531a9c19a9SSeth Howell 	if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
495401f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_create_ns failed!\n");
49558818ace2SAlexey Marchuk 		if (!status->timed_out) {
49568818ace2SAlexey Marchuk 			free(status);
49578818ace2SAlexey Marchuk 		}
49584957d264SDaniel Verkamp 		return 0;
495998416108SCunyin Chang 	}
496098416108SCunyin Chang 
49618818ace2SAlexey Marchuk 	nsid = status->cpl.cdw0;
49628818ace2SAlexey Marchuk 	free(status);
49633067ecaaSMao Jiang 
49643067ecaaSMao Jiang 	assert(nsid > 0);
49653067ecaaSMao Jiang 
49664957d264SDaniel Verkamp 	/* Return the namespace ID that was created */
49674c4c8ca2SChangpeng Liu 	return nsid;
496898416108SCunyin Chang }
496998416108SCunyin Chang 
497098416108SCunyin Chang int
497198416108SCunyin Chang spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
497298416108SCunyin Chang {
49738818ace2SAlexey Marchuk 	struct nvme_completion_poll_status	*status;
497498416108SCunyin Chang 	int					res;
497598416108SCunyin Chang 
49763067ecaaSMao Jiang 	if (nsid == 0) {
49773067ecaaSMao Jiang 		return -EINVAL;
49783067ecaaSMao Jiang 	}
49793067ecaaSMao Jiang 
498024d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
49818818ace2SAlexey Marchuk 	if (!status) {
498201f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
49838818ace2SAlexey Marchuk 		return -ENOMEM;
49848818ace2SAlexey Marchuk 	}
49858818ace2SAlexey Marchuk 
49868818ace2SAlexey Marchuk 	res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, status);
498759970a89SDaniel Verkamp 	if (res) {
49888818ace2SAlexey Marchuk 		free(status);
498998416108SCunyin Chang 		return res;
499059970a89SDaniel Verkamp 	}
49911a9c19a9SSeth Howell 	if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
499201f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_delete_ns failed!\n");
49938818ace2SAlexey Marchuk 		if (!status->timed_out) {
49948818ace2SAlexey Marchuk 			free(status);
49958818ace2SAlexey Marchuk 		}
4996f0b20026SMinfei Huang 		return -ENXIO;
499798416108SCunyin Chang 	}
49988818ace2SAlexey Marchuk 	free(status);
499998416108SCunyin Chang 
5000517b5572SBen Walker 	return nvme_ctrlr_identify_active_ns(ctrlr);
500198416108SCunyin Chang }
5002eae68857SCunyin Chang 
5003eae68857SCunyin Chang int
5004eae68857SCunyin Chang spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
5005eae68857SCunyin Chang 		       struct spdk_nvme_format *format)
5006eae68857SCunyin Chang {
50078818ace2SAlexey Marchuk 	struct nvme_completion_poll_status	*status;
5008eae68857SCunyin Chang 	int					res;
5009eae68857SCunyin Chang 
501024d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
50118818ace2SAlexey Marchuk 	if (!status) {
501201f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
50138818ace2SAlexey Marchuk 		return -ENOMEM;
50148818ace2SAlexey Marchuk 	}
50158818ace2SAlexey Marchuk 
5016eae68857SCunyin Chang 	res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
5017e69baea1STomasz Kulasek 				    status);
501859970a89SDaniel Verkamp 	if (res) {
50198818ace2SAlexey Marchuk 		free(status);
5020eae68857SCunyin Chang 		return res;
502159970a89SDaniel Verkamp 	}
50221a9c19a9SSeth Howell 	if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
502301f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_format failed!\n");
50248818ace2SAlexey Marchuk 		if (!status->timed_out) {
50258818ace2SAlexey Marchuk 			free(status);
50268818ace2SAlexey Marchuk 		}
5027f0b20026SMinfei Huang 		return -ENXIO;
5028eae68857SCunyin Chang 	}
50298818ace2SAlexey Marchuk 	free(status);
5030eae68857SCunyin Chang 
5031eae68857SCunyin Chang 	return spdk_nvme_ctrlr_reset(ctrlr);
5032eae68857SCunyin Chang }
5033f2168e1dSCunyin Chang 
5034f2168e1dSCunyin Chang int
5035f2168e1dSCunyin Chang spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
50366fb1ce42SIsaac Otsiabah 				int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
5037f2168e1dSCunyin Chang {
5038f2168e1dSCunyin Chang 	struct spdk_nvme_fw_commit		fw_commit;
50398818ace2SAlexey Marchuk 	struct nvme_completion_poll_status	*status;
5040f2168e1dSCunyin Chang 	int					res;
5041f2168e1dSCunyin Chang 	unsigned int				size_remaining;
5042f2168e1dSCunyin Chang 	unsigned int				offset;
5043f2168e1dSCunyin Chang 	unsigned int				transfer;
5044075d422fSKonrad Sztyber 	uint8_t					*p;
5045f2168e1dSCunyin Chang 
50466fb1ce42SIsaac Otsiabah 	if (!completion_status) {
50476fb1ce42SIsaac Otsiabah 		return -EINVAL;
50486fb1ce42SIsaac Otsiabah 	}
50496fb1ce42SIsaac Otsiabah 	memset(completion_status, 0, sizeof(struct spdk_nvme_status));
5050f2168e1dSCunyin Chang 	if (size % 4) {
505101f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid size!\n");
5052f2168e1dSCunyin Chang 		return -1;
5053f2168e1dSCunyin Chang 	}
5054f2168e1dSCunyin Chang 
50556fb1ce42SIsaac Otsiabah 	/* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
50566fb1ce42SIsaac Otsiabah 	 * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
50576fb1ce42SIsaac Otsiabah 	 */
50586fb1ce42SIsaac Otsiabah 	if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
50596fb1ce42SIsaac Otsiabah 	    (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
506001f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid command!\n");
50616fb1ce42SIsaac Otsiabah 		return -1;
50626fb1ce42SIsaac Otsiabah 	}
50636fb1ce42SIsaac Otsiabah 
506424d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
50658818ace2SAlexey Marchuk 	if (!status) {
506601f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
50678818ace2SAlexey Marchuk 		return -ENOMEM;
50688818ace2SAlexey Marchuk 	}
50698818ace2SAlexey Marchuk 
5070f2168e1dSCunyin Chang 	/* Firmware download */
5071f2168e1dSCunyin Chang 	size_remaining = size;
5072f2168e1dSCunyin Chang 	offset = 0;
5073f2168e1dSCunyin Chang 	p = payload;
5074f2168e1dSCunyin Chang 
5075f2168e1dSCunyin Chang 	while (size_remaining > 0) {
507684d90484SDaniel Verkamp 		transfer = spdk_min(size_remaining, ctrlr->min_page_size);
5077f2168e1dSCunyin Chang 
507824d61956SAlexey Marchuk 		memset(status, 0, sizeof(*status));
5079f2168e1dSCunyin Chang 		res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
5080f2168e1dSCunyin Chang 						       nvme_completion_poll_cb,
50818818ace2SAlexey Marchuk 						       status);
508259970a89SDaniel Verkamp 		if (res) {
50838818ace2SAlexey Marchuk 			free(status);
5084f2168e1dSCunyin Chang 			return res;
508559970a89SDaniel Verkamp 		}
5086f2168e1dSCunyin Chang 
50871a9c19a9SSeth Howell 		if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
508801f45ecdSGangCao 			NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_fw_image_download failed!\n");
50898818ace2SAlexey Marchuk 			if (!status->timed_out) {
50908818ace2SAlexey Marchuk 				free(status);
50918818ace2SAlexey Marchuk 			}
5092f0b20026SMinfei Huang 			return -ENXIO;
5093f2168e1dSCunyin Chang 		}
5094f2168e1dSCunyin Chang 		p += transfer;
5095f2168e1dSCunyin Chang 		offset += transfer;
5096f2168e1dSCunyin Chang 		size_remaining -= transfer;
5097f2168e1dSCunyin Chang 	}
5098f2168e1dSCunyin Chang 
5099f2168e1dSCunyin Chang 	/* Firmware commit */
5100f2168e1dSCunyin Chang 	memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
5101f2168e1dSCunyin Chang 	fw_commit.fs = slot;
51026fb1ce42SIsaac Otsiabah 	fw_commit.ca = commit_action;
5103f2168e1dSCunyin Chang 
510424d61956SAlexey Marchuk 	memset(status, 0, sizeof(*status));
5105f2168e1dSCunyin Chang 	res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
51068818ace2SAlexey Marchuk 				       status);
510759970a89SDaniel Verkamp 	if (res) {
51088818ace2SAlexey Marchuk 		free(status);
5109f2168e1dSCunyin Chang 		return res;
511059970a89SDaniel Verkamp 	}
5111f2168e1dSCunyin Chang 
51121a9c19a9SSeth Howell 	res = nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock);
5113c4bb0ea6SDaniel Verkamp 
51148818ace2SAlexey Marchuk 	memcpy(completion_status, &status->cpl.status, sizeof(struct spdk_nvme_status));
51158818ace2SAlexey Marchuk 
51168818ace2SAlexey Marchuk 	if (!status->timed_out) {
51178818ace2SAlexey Marchuk 		free(status);
51188818ace2SAlexey Marchuk 	}
5119c4bb0ea6SDaniel Verkamp 
5120c4bb0ea6SDaniel Verkamp 	if (res) {
51218818ace2SAlexey Marchuk 		if (completion_status->sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
51228818ace2SAlexey Marchuk 		    completion_status->sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
51238818ace2SAlexey Marchuk 			if (completion_status->sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
51248818ace2SAlexey Marchuk 			    completion_status->sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
512501f45ecdSGangCao 				NVME_CTRLR_NOTICELOG(ctrlr,
512601f45ecdSGangCao 						     "firmware activation requires conventional reset to be performed. !\n");
51276fb1ce42SIsaac Otsiabah 			} else {
512801f45ecdSGangCao 				NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
51296fb1ce42SIsaac Otsiabah 			}
5130f0b20026SMinfei Huang 			return -ENXIO;
5131f2168e1dSCunyin Chang 		}
51326fb1ce42SIsaac Otsiabah 	}
5133f2168e1dSCunyin Chang 
5134f2168e1dSCunyin Chang 	return spdk_nvme_ctrlr_reset(ctrlr);
5135f2168e1dSCunyin Chang }
513638396397SDaniel Verkamp 
51377b28450bSBen Walker int
51387b28450bSBen Walker spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
51397b28450bSBen Walker {
51407b28450bSBen Walker 	int rc, size;
51417b28450bSBen Walker 	union spdk_nvme_cmbsz_register cmbsz;
51427b28450bSBen Walker 
51437b28450bSBen Walker 	cmbsz = spdk_nvme_ctrlr_get_regs_cmbsz(ctrlr);
51447b28450bSBen Walker 
51457b28450bSBen Walker 	if (cmbsz.bits.rds == 0 || cmbsz.bits.wds == 0) {
51467b28450bSBen Walker 		return -ENOTSUP;
51477b28450bSBen Walker 	}
51487b28450bSBen Walker 
51497b28450bSBen Walker 	size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4));
51507b28450bSBen Walker 
5151e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
51527b28450bSBen Walker 	rc = nvme_transport_ctrlr_reserve_cmb(ctrlr);
5153e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
51547b28450bSBen Walker 
51557b28450bSBen Walker 	if (rc < 0) {
51567b28450bSBen Walker 		return rc;
51577b28450bSBen Walker 	}
51587b28450bSBen Walker 
51597b28450bSBen Walker 	return size;
51607b28450bSBen Walker }
51617b28450bSBen Walker 
516238396397SDaniel Verkamp void *
5163265a8436SBen Walker spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
516438396397SDaniel Verkamp {
516538396397SDaniel Verkamp 	void *buf;
516638396397SDaniel Verkamp 
5167e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
5168265a8436SBen Walker 	buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
5169e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
517038396397SDaniel Verkamp 
517138396397SDaniel Verkamp 	return buf;
517238396397SDaniel Verkamp }
517338396397SDaniel Verkamp 
517438396397SDaniel Verkamp void
5175265a8436SBen Walker spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
517638396397SDaniel Verkamp {
5177e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
5178265a8436SBen Walker 	nvme_transport_ctrlr_unmap_cmb(ctrlr);
5179e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
518038396397SDaniel Verkamp }
5181e865a524SLance Hartmann 
5182f98b792dSKrishna Kanth Reddy int
5183f98b792dSKrishna Kanth Reddy spdk_nvme_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
5184f98b792dSKrishna Kanth Reddy {
5185f98b792dSKrishna Kanth Reddy 	int rc;
5186f98b792dSKrishna Kanth Reddy 
5187e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
5188f98b792dSKrishna Kanth Reddy 	rc = nvme_transport_ctrlr_enable_pmr(ctrlr);
5189e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
5190f98b792dSKrishna Kanth Reddy 
5191f98b792dSKrishna Kanth Reddy 	return rc;
5192f98b792dSKrishna Kanth Reddy }
5193f98b792dSKrishna Kanth Reddy 
5194f98b792dSKrishna Kanth Reddy int
5195f98b792dSKrishna Kanth Reddy spdk_nvme_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
5196f98b792dSKrishna Kanth Reddy {
5197f98b792dSKrishna Kanth Reddy 	int rc;
5198f98b792dSKrishna Kanth Reddy 
5199e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
5200f98b792dSKrishna Kanth Reddy 	rc = nvme_transport_ctrlr_disable_pmr(ctrlr);
5201e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
5202f98b792dSKrishna Kanth Reddy 
5203f98b792dSKrishna Kanth Reddy 	return rc;
5204f98b792dSKrishna Kanth Reddy }
5205f98b792dSKrishna Kanth Reddy 
5206f98b792dSKrishna Kanth Reddy void *
5207f98b792dSKrishna Kanth Reddy spdk_nvme_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
5208f98b792dSKrishna Kanth Reddy {
5209f98b792dSKrishna Kanth Reddy 	void *buf;
5210f98b792dSKrishna Kanth Reddy 
5211e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
5212f98b792dSKrishna Kanth Reddy 	buf = nvme_transport_ctrlr_map_pmr(ctrlr, size);
5213e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
5214f98b792dSKrishna Kanth Reddy 
5215f98b792dSKrishna Kanth Reddy 	return buf;
5216f98b792dSKrishna Kanth Reddy }
5217f98b792dSKrishna Kanth Reddy 
5218f98b792dSKrishna Kanth Reddy int
5219f98b792dSKrishna Kanth Reddy spdk_nvme_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
5220f98b792dSKrishna Kanth Reddy {
5221f98b792dSKrishna Kanth Reddy 	int rc;
5222f98b792dSKrishna Kanth Reddy 
5223e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
5224f98b792dSKrishna Kanth Reddy 	rc = nvme_transport_ctrlr_unmap_pmr(ctrlr);
5225e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
5226f98b792dSKrishna Kanth Reddy 
5227f98b792dSKrishna Kanth Reddy 	return rc;
5228f98b792dSKrishna Kanth Reddy }
5229f98b792dSKrishna Kanth Reddy 
52308dd1cd21SBen Walker int
52318dd1cd21SBen Walker spdk_nvme_ctrlr_read_boot_partition_start(struct spdk_nvme_ctrlr *ctrlr, void *payload,
5232fec55c84SKrishna Kanth Reddy 		uint32_t bprsz, uint32_t bprof, uint32_t bpid)
5233fec55c84SKrishna Kanth Reddy {
5234fec55c84SKrishna Kanth Reddy 	union spdk_nvme_bprsel_register bprsel;
5235fec55c84SKrishna Kanth Reddy 	union spdk_nvme_bpinfo_register bpinfo;
5236fec55c84SKrishna Kanth Reddy 	uint64_t bpmbl, bpmb_size;
5237fec55c84SKrishna Kanth Reddy 
5238fec55c84SKrishna Kanth Reddy 	if (ctrlr->cap.bits.bps == 0) {
5239fec55c84SKrishna Kanth Reddy 		return -ENOTSUP;
5240fec55c84SKrishna Kanth Reddy 	}
5241fec55c84SKrishna Kanth Reddy 
5242fec55c84SKrishna Kanth Reddy 	if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
5243fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
5244fec55c84SKrishna Kanth Reddy 		return -EIO;
5245fec55c84SKrishna Kanth Reddy 	}
5246fec55c84SKrishna Kanth Reddy 
5247fec55c84SKrishna Kanth Reddy 	if (bpinfo.bits.brs == SPDK_NVME_BRS_READ_IN_PROGRESS) {
5248fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read already initiated\n");
5249fec55c84SKrishna Kanth Reddy 		return -EALREADY;
5250fec55c84SKrishna Kanth Reddy 	}
5251fec55c84SKrishna Kanth Reddy 
5252e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
5253fec55c84SKrishna Kanth Reddy 
5254fec55c84SKrishna Kanth Reddy 	bpmb_size = bprsz * 4096;
5255fec55c84SKrishna Kanth Reddy 	bpmbl = spdk_vtophys(payload, &bpmb_size);
5256fec55c84SKrishna Kanth Reddy 	if (bpmbl == SPDK_VTOPHYS_ERROR) {
5257fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_vtophys of bpmbl failed\n");
5258e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
5259fec55c84SKrishna Kanth Reddy 		return -EFAULT;
5260fec55c84SKrishna Kanth Reddy 	}
5261fec55c84SKrishna Kanth Reddy 
5262fec55c84SKrishna Kanth Reddy 	if (bpmb_size != bprsz * 4096) {
5263fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition buffer is not physically contiguous\n");
5264e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
5265fec55c84SKrishna Kanth Reddy 		return -EFAULT;
5266fec55c84SKrishna Kanth Reddy 	}
5267fec55c84SKrishna Kanth Reddy 
5268fec55c84SKrishna Kanth Reddy 	if (nvme_ctrlr_set_bpmbl(ctrlr, bpmbl)) {
5269fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "set_bpmbl() failed\n");
5270e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
5271fec55c84SKrishna Kanth Reddy 		return -EIO;
5272fec55c84SKrishna Kanth Reddy 	}
5273fec55c84SKrishna Kanth Reddy 
5274fec55c84SKrishna Kanth Reddy 	bprsel.bits.bpid = bpid;
5275fec55c84SKrishna Kanth Reddy 	bprsel.bits.bprof = bprof;
5276fec55c84SKrishna Kanth Reddy 	bprsel.bits.bprsz = bprsz;
5277fec55c84SKrishna Kanth Reddy 
5278fec55c84SKrishna Kanth Reddy 	if (nvme_ctrlr_set_bprsel(ctrlr, &bprsel)) {
5279fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "set_bprsel() failed\n");
5280e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
5281fec55c84SKrishna Kanth Reddy 		return -EIO;
5282fec55c84SKrishna Kanth Reddy 	}
5283fec55c84SKrishna Kanth Reddy 
5284e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
5285fec55c84SKrishna Kanth Reddy 	return 0;
5286fec55c84SKrishna Kanth Reddy }
5287fec55c84SKrishna Kanth Reddy 
52888dd1cd21SBen Walker int
52898dd1cd21SBen Walker spdk_nvme_ctrlr_read_boot_partition_poll(struct spdk_nvme_ctrlr *ctrlr)
5290fec55c84SKrishna Kanth Reddy {
5291fec55c84SKrishna Kanth Reddy 	int rc = 0;
5292fec55c84SKrishna Kanth Reddy 	union spdk_nvme_bpinfo_register bpinfo;
5293fec55c84SKrishna Kanth Reddy 
5294fec55c84SKrishna Kanth Reddy 	if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
5295fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
5296fec55c84SKrishna Kanth Reddy 		return -EIO;
5297fec55c84SKrishna Kanth Reddy 	}
5298fec55c84SKrishna Kanth Reddy 
5299fec55c84SKrishna Kanth Reddy 	switch (bpinfo.bits.brs) {
5300fec55c84SKrishna Kanth Reddy 	case SPDK_NVME_BRS_NO_READ:
5301fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read not initiated\n");
5302fec55c84SKrishna Kanth Reddy 		rc = -EINVAL;
5303fec55c84SKrishna Kanth Reddy 		break;
5304fec55c84SKrishna Kanth Reddy 	case SPDK_NVME_BRS_READ_IN_PROGRESS:
5305fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition read in progress\n");
5306fec55c84SKrishna Kanth Reddy 		rc = -EAGAIN;
5307fec55c84SKrishna Kanth Reddy 		break;
5308fec55c84SKrishna Kanth Reddy 	case SPDK_NVME_BRS_READ_ERROR:
5309fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "Error completing Boot Partition read\n");
5310fec55c84SKrishna Kanth Reddy 		rc = -EIO;
5311fec55c84SKrishna Kanth Reddy 		break;
5312fec55c84SKrishna Kanth Reddy 	case SPDK_NVME_BRS_READ_SUCCESS:
5313fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_INFOLOG(ctrlr, "Boot Partition read completed successfully\n");
5314fec55c84SKrishna Kanth Reddy 		break;
5315fec55c84SKrishna Kanth Reddy 	default:
5316fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition read status\n");
5317fec55c84SKrishna Kanth Reddy 		rc = -EINVAL;
5318fec55c84SKrishna Kanth Reddy 	}
5319fec55c84SKrishna Kanth Reddy 
5320fec55c84SKrishna Kanth Reddy 	return rc;
5321fec55c84SKrishna Kanth Reddy }
5322fec55c84SKrishna Kanth Reddy 
5323fec55c84SKrishna Kanth Reddy static void
5324fec55c84SKrishna Kanth Reddy nvme_write_boot_partition_cb(void *arg, const struct spdk_nvme_cpl *cpl)
5325fec55c84SKrishna Kanth Reddy {
5326fec55c84SKrishna Kanth Reddy 	int res;
5327fec55c84SKrishna Kanth Reddy 	struct spdk_nvme_ctrlr *ctrlr = arg;
5328fec55c84SKrishna Kanth Reddy 	struct spdk_nvme_fw_commit fw_commit;
5329fec55c84SKrishna Kanth Reddy 	struct spdk_nvme_cpl err_cpl =
5330fec55c84SKrishna Kanth Reddy 	{.status = {.sct = SPDK_NVME_SCT_GENERIC, .sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR }};
5331fec55c84SKrishna Kanth Reddy 
5332fec55c84SKrishna Kanth Reddy 	if (spdk_nvme_cpl_is_error(cpl)) {
5333fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "Write Boot Partition failed\n");
5334fec55c84SKrishna Kanth Reddy 		ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
5335fec55c84SKrishna Kanth Reddy 		return;
5336fec55c84SKrishna Kanth Reddy 	}
5337fec55c84SKrishna Kanth Reddy 
5338fec55c84SKrishna Kanth Reddy 	if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADING) {
5339fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Downloading at Offset %d Success\n", ctrlr->fw_offset);
5340075d422fSKonrad Sztyber 		ctrlr->fw_payload = (uint8_t *)ctrlr->fw_payload + ctrlr->fw_transfer_size;
5341fec55c84SKrishna Kanth Reddy 		ctrlr->fw_offset += ctrlr->fw_transfer_size;
5342fec55c84SKrishna Kanth Reddy 		ctrlr->fw_size_remaining -= ctrlr->fw_transfer_size;
5343fec55c84SKrishna Kanth Reddy 		ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
5344fec55c84SKrishna Kanth Reddy 		res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
5345fec55c84SKrishna Kanth Reddy 						       ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
5346fec55c84SKrishna Kanth Reddy 		if (res) {
5347fec55c84SKrishna Kanth Reddy 			NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_image_download failed!\n");
5348fec55c84SKrishna Kanth Reddy 			ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
5349fec55c84SKrishna Kanth Reddy 			return;
5350fec55c84SKrishna Kanth Reddy 		}
5351fec55c84SKrishna Kanth Reddy 
5352fec55c84SKrishna Kanth Reddy 		if (ctrlr->fw_transfer_size < ctrlr->min_page_size) {
5353fec55c84SKrishna Kanth Reddy 			ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADED;
5354fec55c84SKrishna Kanth Reddy 		}
5355fec55c84SKrishna Kanth Reddy 	} else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADED) {
5356fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Download Success\n");
5357fec55c84SKrishna Kanth Reddy 		memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
5358fec55c84SKrishna Kanth Reddy 		fw_commit.bpid = ctrlr->bpid;
5359fec55c84SKrishna Kanth Reddy 		fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_BOOT_PARTITION;
5360fec55c84SKrishna Kanth Reddy 		res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
5361fec55c84SKrishna Kanth Reddy 					       nvme_write_boot_partition_cb, ctrlr);
5362fec55c84SKrishna Kanth Reddy 		if (res) {
5363fec55c84SKrishna Kanth Reddy 			NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
5364fec55c84SKrishna Kanth Reddy 			NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
5365fec55c84SKrishna Kanth Reddy 			ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
5366fec55c84SKrishna Kanth Reddy 			return;
5367fec55c84SKrishna Kanth Reddy 		}
5368fec55c84SKrishna Kanth Reddy 
5369fec55c84SKrishna Kanth Reddy 		ctrlr->bp_ws = SPDK_NVME_BP_WS_REPLACE;
5370fec55c84SKrishna Kanth Reddy 	} else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_REPLACE) {
5371fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Replacement Success\n");
5372fec55c84SKrishna Kanth Reddy 		memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
5373fec55c84SKrishna Kanth Reddy 		fw_commit.bpid = ctrlr->bpid;
5374fec55c84SKrishna Kanth Reddy 		fw_commit.ca = SPDK_NVME_FW_COMMIT_ACTIVATE_BOOT_PARTITION;
5375fec55c84SKrishna Kanth Reddy 		res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
5376fec55c84SKrishna Kanth Reddy 					       nvme_write_boot_partition_cb, ctrlr);
5377fec55c84SKrishna Kanth Reddy 		if (res) {
5378fec55c84SKrishna Kanth Reddy 			NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
5379fec55c84SKrishna Kanth Reddy 			NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
5380fec55c84SKrishna Kanth Reddy 			ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
5381fec55c84SKrishna Kanth Reddy 			return;
5382fec55c84SKrishna Kanth Reddy 		}
5383fec55c84SKrishna Kanth Reddy 
5384fec55c84SKrishna Kanth Reddy 		ctrlr->bp_ws = SPDK_NVME_BP_WS_ACTIVATE;
5385fec55c84SKrishna Kanth Reddy 	} else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_ACTIVATE) {
5386fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Activation Success\n");
5387fec55c84SKrishna Kanth Reddy 		ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
5388fec55c84SKrishna Kanth Reddy 	} else {
5389fec55c84SKrishna Kanth Reddy 		NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition write state\n");
5390fec55c84SKrishna Kanth Reddy 		ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
5391fec55c84SKrishna Kanth Reddy 		return;
5392fec55c84SKrishna Kanth Reddy 	}
5393fec55c84SKrishna Kanth Reddy }
5394fec55c84SKrishna Kanth Reddy 
53958dd1cd21SBen Walker int
53968dd1cd21SBen Walker spdk_nvme_ctrlr_write_boot_partition(struct spdk_nvme_ctrlr *ctrlr,
5397fec55c84SKrishna Kanth Reddy 				     void *payload, uint32_t size, uint32_t bpid,
5398fec55c84SKrishna Kanth Reddy 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
5399fec55c84SKrishna Kanth Reddy {
5400fec55c84SKrishna Kanth Reddy 	int res;
5401fec55c84SKrishna Kanth Reddy 
5402fec55c84SKrishna Kanth Reddy 	if (ctrlr->cap.bits.bps == 0) {
5403fec55c84SKrishna Kanth Reddy 		return -ENOTSUP;
5404fec55c84SKrishna Kanth Reddy 	}
5405fec55c84SKrishna Kanth Reddy 
5406fec55c84SKrishna Kanth Reddy 	ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADING;
5407fec55c84SKrishna Kanth Reddy 	ctrlr->bpid = bpid;
5408fec55c84SKrishna Kanth Reddy 	ctrlr->bp_write_cb_fn = cb_fn;
5409fec55c84SKrishna Kanth Reddy 	ctrlr->bp_write_cb_arg = cb_arg;
5410fec55c84SKrishna Kanth Reddy 	ctrlr->fw_offset = 0;
5411fec55c84SKrishna Kanth Reddy 	ctrlr->fw_size_remaining = size;
5412fec55c84SKrishna Kanth Reddy 	ctrlr->fw_payload = payload;
5413fec55c84SKrishna Kanth Reddy 	ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
5414fec55c84SKrishna Kanth Reddy 
5415fec55c84SKrishna Kanth Reddy 	res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
5416fec55c84SKrishna Kanth Reddy 					       ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
5417fec55c84SKrishna Kanth Reddy 
5418fec55c84SKrishna Kanth Reddy 	return res;
5419fec55c84SKrishna Kanth Reddy }
5420fec55c84SKrishna Kanth Reddy 
5421e865a524SLance Hartmann bool
5422e865a524SLance Hartmann spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
5423e865a524SLance Hartmann {
5424e865a524SLance Hartmann 	assert(ctrlr);
5425e865a524SLance Hartmann 
5426e865a524SLance Hartmann 	return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
5427e865a524SLance Hartmann 			strlen(SPDK_NVMF_DISCOVERY_NQN));
5428e865a524SLance Hartmann }
542919feb4e1SChunyang Hui 
54309f5e3c99SJim Harris bool
54319f5e3c99SJim Harris spdk_nvme_ctrlr_is_fabrics(struct spdk_nvme_ctrlr *ctrlr)
54329f5e3c99SJim Harris {
54339f5e3c99SJim Harris 	assert(ctrlr);
54349f5e3c99SJim Harris 
543546b355c0SChangpeng Liu 	return spdk_nvme_trtype_is_fabrics(ctrlr->trid.trtype);
54369f5e3c99SJim Harris }
54379f5e3c99SJim Harris 
543819feb4e1SChunyang Hui int
543919feb4e1SChunyang Hui spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
544019feb4e1SChunyang Hui 				 uint16_t spsp, uint8_t nssf, void *payload, size_t size)
544119feb4e1SChunyang Hui {
54428818ace2SAlexey Marchuk 	struct nvme_completion_poll_status	*status;
544319feb4e1SChunyang Hui 	int					res;
544419feb4e1SChunyang Hui 
544524d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
54468818ace2SAlexey Marchuk 	if (!status) {
544701f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
54488818ace2SAlexey Marchuk 		return -ENOMEM;
54498818ace2SAlexey Marchuk 	}
54508818ace2SAlexey Marchuk 
54514f7fa18dSChangpeng Liu 	res = spdk_nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
54528818ace2SAlexey Marchuk 			nvme_completion_poll_cb, status);
545319feb4e1SChunyang Hui 	if (res) {
54548818ace2SAlexey Marchuk 		free(status);
545519feb4e1SChunyang Hui 		return res;
545619feb4e1SChunyang Hui 	}
54571a9c19a9SSeth Howell 	if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
545801f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_receive failed!\n");
54598818ace2SAlexey Marchuk 		if (!status->timed_out) {
54608818ace2SAlexey Marchuk 			free(status);
54618818ace2SAlexey Marchuk 		}
546219feb4e1SChunyang Hui 		return -ENXIO;
546319feb4e1SChunyang Hui 	}
54648818ace2SAlexey Marchuk 	free(status);
546519feb4e1SChunyang Hui 
546619feb4e1SChunyang Hui 	return 0;
546719feb4e1SChunyang Hui }
546819feb4e1SChunyang Hui 
546919feb4e1SChunyang Hui int
547019feb4e1SChunyang Hui spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
547119feb4e1SChunyang Hui 			      uint16_t spsp, uint8_t nssf, void *payload, size_t size)
547219feb4e1SChunyang Hui {
54738818ace2SAlexey Marchuk 	struct nvme_completion_poll_status	*status;
547419feb4e1SChunyang Hui 	int					res;
547519feb4e1SChunyang Hui 
547624d61956SAlexey Marchuk 	status = calloc(1, sizeof(*status));
54778818ace2SAlexey Marchuk 	if (!status) {
547801f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
54798818ace2SAlexey Marchuk 		return -ENOMEM;
54808818ace2SAlexey Marchuk 	}
54818818ace2SAlexey Marchuk 
54824f7fa18dSChangpeng Liu 	res = spdk_nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size,
54834f7fa18dSChangpeng Liu 						nvme_completion_poll_cb,
54848818ace2SAlexey Marchuk 						status);
548519feb4e1SChunyang Hui 	if (res) {
54868818ace2SAlexey Marchuk 		free(status);
548719feb4e1SChunyang Hui 		return res;
548819feb4e1SChunyang Hui 	}
54891a9c19a9SSeth Howell 	if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
549001f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_send failed!\n");
54918818ace2SAlexey Marchuk 		if (!status->timed_out) {
54928818ace2SAlexey Marchuk 			free(status);
54938818ace2SAlexey Marchuk 		}
549419feb4e1SChunyang Hui 		return -ENXIO;
549519feb4e1SChunyang Hui 	}
549619feb4e1SChunyang Hui 
54978818ace2SAlexey Marchuk 	free(status);
54988818ace2SAlexey Marchuk 
549919feb4e1SChunyang Hui 	return 0;
550019feb4e1SChunyang Hui }
550151ab3788SChunyang Hui 
550251ab3788SChunyang Hui uint64_t
550351ab3788SChunyang Hui spdk_nvme_ctrlr_get_flags(struct spdk_nvme_ctrlr *ctrlr)
550451ab3788SChunyang Hui {
550551ab3788SChunyang Hui 	return ctrlr->flags;
550651ab3788SChunyang Hui }
5507f0dd2b78SJim Harris 
5508f0dd2b78SJim Harris const struct spdk_nvme_transport_id *
5509f0dd2b78SJim Harris spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
5510f0dd2b78SJim Harris {
5511f0dd2b78SJim Harris 	return &ctrlr->trid;
5512f0dd2b78SJim Harris }
551349e70693SBen Walker 
55144e06de69SJacek Kalwas int32_t
55154e06de69SJacek Kalwas spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr)
55164e06de69SJacek Kalwas {
55174e06de69SJacek Kalwas 	uint32_t qid;
55184e06de69SJacek Kalwas 
55193c4a68caSAlexey Marchuk 	assert(ctrlr->free_io_qids);
5520e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
55214e06de69SJacek Kalwas 	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
55224e06de69SJacek Kalwas 	if (qid > ctrlr->opts.num_io_queues) {
552301f45ecdSGangCao 		NVME_CTRLR_ERRLOG(ctrlr, "No free I/O queue IDs\n");
5524e10b4806SJim Harris 		nvme_ctrlr_unlock(ctrlr);
55254e06de69SJacek Kalwas 		return -1;
55264e06de69SJacek Kalwas 	}
55274e06de69SJacek Kalwas 
55284e06de69SJacek Kalwas 	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
5529e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
55304e06de69SJacek Kalwas 	return qid;
55314e06de69SJacek Kalwas }
55324e06de69SJacek Kalwas 
55334e06de69SJacek Kalwas void
55344e06de69SJacek Kalwas spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
55354e06de69SJacek Kalwas {
55364e06de69SJacek Kalwas 	assert(qid <= ctrlr->opts.num_io_queues);
55374e06de69SJacek Kalwas 
5538e10b4806SJim Harris 	nvme_ctrlr_lock(ctrlr);
553903323b09SZiye Yang 
554003323b09SZiye Yang 	if (spdk_likely(ctrlr->free_io_qids)) {
55414e06de69SJacek Kalwas 		spdk_bit_array_set(ctrlr->free_io_qids, qid);
554203323b09SZiye Yang 	}
554303323b09SZiye Yang 
5544e10b4806SJim Harris 	nvme_ctrlr_unlock(ctrlr);
55454e06de69SJacek Kalwas }
5546a422d8b0SAlexey Marchuk 
55479381d8d3SAlexey Marchuk int
55489381d8d3SAlexey Marchuk spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
55499381d8d3SAlexey Marchuk 				   struct spdk_memory_domain **domains, int array_size)
5550a422d8b0SAlexey Marchuk {
55519381d8d3SAlexey Marchuk 	return nvme_transport_ctrlr_get_memory_domains(ctrlr, domains, array_size);
5552a422d8b0SAlexey Marchuk }
55534ddd77b2SKonrad Sztyber 
55544ddd77b2SKonrad Sztyber int
55554ddd77b2SKonrad Sztyber spdk_nvme_ctrlr_authenticate(struct spdk_nvme_ctrlr *ctrlr,
55564ddd77b2SKonrad Sztyber 			     spdk_nvme_authenticate_cb cb_fn, void *cb_ctx)
55574ddd77b2SKonrad Sztyber {
55584ddd77b2SKonrad Sztyber 	return spdk_nvme_qpair_authenticate(ctrlr->adminq, cb_fn, cb_ctx);
55594ddd77b2SKonrad Sztyber }
5560