xref: /spdk/lib/nvmf/ctrlr.c (revision c2471e450077f9601e9f40f4449b1ee639f00498)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
31c5444d6SAlexey Marchuk  *   Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
4d478b20dSAlexey Marchuk  *   Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
56ee9cd25Skyuho.son  *   Copyright (c) 2024 Samsung Electronics Co., Ltd. All rights reserved.
603788f93SBen Walker  */
703788f93SBen Walker 
803788f93SBen Walker #include "spdk/stdinc.h"
903788f93SBen Walker 
1003788f93SBen Walker #include "nvmf_internal.h"
1103788f93SBen Walker #include "transport.h"
1203788f93SBen Walker 
13a36785dfSDennis Maisenbacher #include "spdk/bdev.h"
14a36785dfSDennis Maisenbacher #include "spdk/bdev_zone.h"
15f08cea71SBen Walker #include "spdk/bit_array.h"
16a7b62cc4SDaniel Verkamp #include "spdk/endian.h"
17a83f91c2SBen Walker #include "spdk/thread.h"
1803788f93SBen Walker #include "spdk/nvme_spec.h"
19ab945f32SBen Walker #include "spdk/nvmf_cmd.h"
202d1a2926SDaniel Verkamp #include "spdk/string.h"
2103788f93SBen Walker #include "spdk/util.h"
2212ab1e26SDaniel Verkamp #include "spdk/version.h"
234e8e97c8STomasz Zawadzki #include "spdk/log.h"
24a266b6e4SKonrad Sztyber #include "spdk_internal/usdt.h"
2503788f93SBen Walker 
26b8769cdbSJinYu #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS 10000
27b8769cdbSJinYu #define NVMF_DISC_KATO_IN_MS 120000
28b8769cdbSJinYu #define KAS_TIME_UNIT_IN_MS 100
29faacc878SChangpeng Liu #define KAS_DEFAULT_VALUE (MIN_KEEP_ALIVE_TIMEOUT_IN_MS / KAS_TIME_UNIT_IN_MS)
3003788f93SBen Walker 
31f63c0899SChangpeng Liu #define NVMF_CC_RESET_SHN_TIMEOUT_IN_MS	10000
32f63c0899SChangpeng Liu 
33f63c0899SChangpeng Liu #define NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS	(NVMF_CC_RESET_SHN_TIMEOUT_IN_MS + 5000)
34f63c0899SChangpeng Liu 
3570c6f5d9SJim Harris #define DUPLICATE_QID_RETRY_US 1000
36b1a23196SJim Harris 
3712ab1e26SDaniel Verkamp /*
3812ab1e26SDaniel Verkamp  * Report the SPDK version as the firmware revision.
3912ab1e26SDaniel Verkamp  * SPDK_VERSION_STRING won't fit into FR (only 8 bytes), so try to fit the most important parts.
4012ab1e26SDaniel Verkamp  */
4112ab1e26SDaniel Verkamp #define FW_VERSION SPDK_VERSION_MAJOR_STRING SPDK_VERSION_MINOR_STRING SPDK_VERSION_PATCH_STRING
422d1a2926SDaniel Verkamp 
436206e468SShuhei Matsumoto #define ANA_TRANSITION_TIME_IN_SEC 10
446206e468SShuhei Matsumoto 
452a99dbc7SShuhei Matsumoto #define NVMF_ABORT_COMMAND_LIMIT 3
462a99dbc7SShuhei Matsumoto 
473fa22056SMichael Haeuptle /*
483fa22056SMichael Haeuptle  * Support for custom admin command handlers
493fa22056SMichael Haeuptle  */
503fa22056SMichael Haeuptle struct spdk_nvmf_custom_admin_cmd {
513fa22056SMichael Haeuptle 	spdk_nvmf_custom_cmd_hdlr hdlr;
523fa22056SMichael Haeuptle 	uint32_t nsid; /* nsid to forward */
533fa22056SMichael Haeuptle };
543fa22056SMichael Haeuptle 
553fa22056SMichael Haeuptle static struct spdk_nvmf_custom_admin_cmd g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_MAX_OPC + 1];
563fa22056SMichael Haeuptle 
57db96437eSShuhei Matsumoto static void _nvmf_request_complete(void *ctx);
589910e8d7SJim Harris int nvmf_passthru_admin_cmd_for_ctrlr(struct spdk_nvmf_request *req, struct spdk_nvmf_ctrlr *ctrlr);
5960241941SKarl Bonde Torp static int nvmf_passthru_admin_cmd(struct spdk_nvmf_request *req);
60db96437eSShuhei Matsumoto 
616d4c78eaSZiye Yang static inline void
62198fd2ceSSeth Howell nvmf_invalid_connect_response(struct spdk_nvmf_fabric_connect_rsp *rsp,
636d4c78eaSZiye Yang 			      uint8_t iattr, uint16_t ipo)
646d4c78eaSZiye Yang {
656d4c78eaSZiye Yang 	rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
666d4c78eaSZiye Yang 	rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
676d4c78eaSZiye Yang 	rsp->status_code_specific.invalid.iattr = iattr;
686d4c78eaSZiye Yang 	rsp->status_code_specific.invalid.ipo = ipo;
696d4c78eaSZiye Yang }
706d4c78eaSZiye Yang 
716d4c78eaSZiye Yang #define SPDK_NVMF_INVALID_CONNECT_CMD(rsp, field)	\
72198fd2ceSSeth Howell 	nvmf_invalid_connect_response(rsp, 0, offsetof(struct spdk_nvmf_fabric_connect_cmd, field))
736d4c78eaSZiye Yang #define SPDK_NVMF_INVALID_CONNECT_DATA(rsp, field)	\
74198fd2ceSSeth Howell 	nvmf_invalid_connect_response(rsp, 1, offsetof(struct spdk_nvmf_fabric_connect_data, field))
756d4c78eaSZiye Yang 
76b832f99fSyupeng 
776d4c78eaSZiye Yang static void
78198fd2ceSSeth Howell nvmf_ctrlr_stop_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr)
79b8769cdbSJinYu {
80b8769cdbSJinYu 	if (!ctrlr) {
81b8769cdbSJinYu 		SPDK_ERRLOG("Controller is NULL\n");
82b8769cdbSJinYu 		return;
83b8769cdbSJinYu 	}
84b8769cdbSJinYu 
85b8769cdbSJinYu 	if (ctrlr->keep_alive_poller == NULL) {
86b8769cdbSJinYu 		return;
87b8769cdbSJinYu 	}
88b8769cdbSJinYu 
892172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Stop keep alive poller\n");
90b8769cdbSJinYu 	spdk_poller_unregister(&ctrlr->keep_alive_poller);
91b8769cdbSJinYu }
92b8769cdbSJinYu 
93b8769cdbSJinYu static void
9471cd42e1SJacek Kalwas nvmf_ctrlr_stop_association_timer(struct spdk_nvmf_ctrlr *ctrlr)
9571cd42e1SJacek Kalwas {
9671cd42e1SJacek Kalwas 	if (!ctrlr) {
9771cd42e1SJacek Kalwas 		SPDK_ERRLOG("Controller is NULL\n");
9871cd42e1SJacek Kalwas 		assert(false);
9971cd42e1SJacek Kalwas 		return;
10071cd42e1SJacek Kalwas 	}
10171cd42e1SJacek Kalwas 
10271cd42e1SJacek Kalwas 	if (ctrlr->association_timer == NULL) {
10371cd42e1SJacek Kalwas 		return;
10471cd42e1SJacek Kalwas 	}
10571cd42e1SJacek Kalwas 
1062172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Stop association timer\n");
10771cd42e1SJacek Kalwas 	spdk_poller_unregister(&ctrlr->association_timer);
10871cd42e1SJacek Kalwas }
10971cd42e1SJacek Kalwas 
11071cd42e1SJacek Kalwas static void
111198fd2ceSSeth Howell nvmf_ctrlr_disconnect_qpairs_done(struct spdk_io_channel_iter *i, int status)
112b8769cdbSJinYu {
113b8769cdbSJinYu 	if (status == 0) {
1142172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "ctrlr disconnect qpairs complete successfully\n");
115b8769cdbSJinYu 	} else {
116b8769cdbSJinYu 		SPDK_ERRLOG("Fail to disconnect ctrlr qpairs\n");
117b8769cdbSJinYu 	}
118b8769cdbSJinYu }
119b8769cdbSJinYu 
120ae86baebSSeth Howell static int
121198fd2ceSSeth Howell _nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i, bool include_admin)
122b8769cdbSJinYu {
123b8769cdbSJinYu 	int rc = 0;
124b8769cdbSJinYu 	struct spdk_nvmf_ctrlr *ctrlr;
125b8769cdbSJinYu 	struct spdk_nvmf_qpair *qpair, *temp_qpair;
126b8769cdbSJinYu 	struct spdk_io_channel *ch;
127b8769cdbSJinYu 	struct spdk_nvmf_poll_group *group;
128b8769cdbSJinYu 
129b8769cdbSJinYu 	ctrlr = spdk_io_channel_iter_get_ctx(i);
130b8769cdbSJinYu 	ch = spdk_io_channel_iter_get_channel(i);
131b8769cdbSJinYu 	group = spdk_io_channel_get_ctx(ch);
132b8769cdbSJinYu 
133b8769cdbSJinYu 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, temp_qpair) {
1349cb21ad6SSeth Howell 		if (qpair->ctrlr == ctrlr && (include_admin || !nvmf_qpair_is_admin_queue(qpair))) {
135608b54a2SKonrad Sztyber 			rc = spdk_nvmf_qpair_disconnect(qpair);
136b8769cdbSJinYu 			if (rc) {
137d478b20dSAlexey Marchuk 				if (rc == -EINPROGRESS) {
138d478b20dSAlexey Marchuk 					rc = 0;
139d478b20dSAlexey Marchuk 				} else {
140b8769cdbSJinYu 					SPDK_ERRLOG("Qpair disconnect failed\n");
141ae86baebSSeth Howell 					return rc;
142b8769cdbSJinYu 				}
143b8769cdbSJinYu 			}
144b8769cdbSJinYu 		}
145d478b20dSAlexey Marchuk 	}
146b8769cdbSJinYu 
147ae86baebSSeth Howell 	return rc;
148ae86baebSSeth Howell }
149ae86baebSSeth Howell 
150ae86baebSSeth Howell static void
151198fd2ceSSeth Howell nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i)
152ae86baebSSeth Howell {
153198fd2ceSSeth Howell 	spdk_for_each_channel_continue(i, _nvmf_ctrlr_disconnect_qpairs_on_pg(i, true));
154ae86baebSSeth Howell }
155ae86baebSSeth Howell 
156ae86baebSSeth Howell static void
157198fd2ceSSeth Howell nvmf_ctrlr_disconnect_io_qpairs_on_pg(struct spdk_io_channel_iter *i)
158ae86baebSSeth Howell {
159198fd2ceSSeth Howell 	spdk_for_each_channel_continue(i, _nvmf_ctrlr_disconnect_qpairs_on_pg(i, false));
160b8769cdbSJinYu }
161b8769cdbSJinYu 
162b8769cdbSJinYu static int
163198fd2ceSSeth Howell nvmf_ctrlr_keep_alive_poll(void *ctx)
164b8769cdbSJinYu {
165b8769cdbSJinYu 	uint64_t keep_alive_timeout_tick;
166b8769cdbSJinYu 	uint64_t now = spdk_get_ticks();
167b8769cdbSJinYu 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
168b8769cdbSJinYu 
16997385af1SAlexey Marchuk 	if (ctrlr->in_destruct) {
17097385af1SAlexey Marchuk 		nvmf_ctrlr_stop_keep_alive_timer(ctrlr);
17197385af1SAlexey Marchuk 		return SPDK_POLLER_IDLE;
17297385af1SAlexey Marchuk 	}
17397385af1SAlexey Marchuk 
1742172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Polling ctrlr keep alive timeout\n");
175b8769cdbSJinYu 
176b8769cdbSJinYu 	/* If the Keep alive feature is in use and the timer expires */
177b8769cdbSJinYu 	keep_alive_timeout_tick = ctrlr->last_keep_alive_tick +
178b8769cdbSJinYu 				  ctrlr->feat.keep_alive_timer.bits.kato * spdk_get_ticks_hz() / UINT64_C(1000);
179b8769cdbSJinYu 	if (now > keep_alive_timeout_tick) {
180813869d8SAlexey Marchuk 		SPDK_NOTICELOG("Disconnecting host %s from subsystem %s due to keep alive timeout.\n",
181813869d8SAlexey Marchuk 			       ctrlr->hostnqn, ctrlr->subsys->subnqn);
182b8769cdbSJinYu 		/* set the Controller Fatal Status bit to '1' */
183b8769cdbSJinYu 		if (ctrlr->vcprop.csts.bits.cfs == 0) {
184a19e5b4dSChangpeng Liu 			nvmf_ctrlr_set_fatal_status(ctrlr);
185b8769cdbSJinYu 
186b8769cdbSJinYu 			/*
187b8769cdbSJinYu 			 * disconnect qpairs, terminate Transport connection
188b8769cdbSJinYu 			 * destroy ctrlr, break the host to controller association
189b8769cdbSJinYu 			 * disconnect qpairs with qpair->ctrlr == ctrlr
190b8769cdbSJinYu 			 */
191b8769cdbSJinYu 			spdk_for_each_channel(ctrlr->subsys->tgt,
192198fd2ceSSeth Howell 					      nvmf_ctrlr_disconnect_qpairs_on_pg,
193b8769cdbSJinYu 					      ctrlr,
194198fd2ceSSeth Howell 					      nvmf_ctrlr_disconnect_qpairs_done);
1956ec5e75aSJohn Levon 			return SPDK_POLLER_BUSY;
196b8769cdbSJinYu 		}
197b8769cdbSJinYu 	}
198b8769cdbSJinYu 
1996ec5e75aSJohn Levon 	return SPDK_POLLER_IDLE;
200b8769cdbSJinYu }
201b8769cdbSJinYu 
202b8769cdbSJinYu static void
203198fd2ceSSeth Howell nvmf_ctrlr_start_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr)
204b8769cdbSJinYu {
205b8769cdbSJinYu 	if (!ctrlr) {
206b8769cdbSJinYu 		SPDK_ERRLOG("Controller is NULL\n");
207b8769cdbSJinYu 		return;
208b8769cdbSJinYu 	}
209b8769cdbSJinYu 
210b8769cdbSJinYu 	/* if cleared to 0 then the Keep Alive Timer is disabled */
211b8769cdbSJinYu 	if (ctrlr->feat.keep_alive_timer.bits.kato != 0) {
212b8769cdbSJinYu 
213b8769cdbSJinYu 		ctrlr->last_keep_alive_tick = spdk_get_ticks();
214b8769cdbSJinYu 
2152172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "Ctrlr add keep alive poller\n");
216198fd2ceSSeth Howell 		ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr,
217b8769cdbSJinYu 					   ctrlr->feat.keep_alive_timer.bits.kato * 1000);
218b8769cdbSJinYu 	}
219b8769cdbSJinYu }
220b8769cdbSJinYu 
221825c9890SJim Harris static void
222825c9890SJim Harris nvmf_qpair_set_ctrlr(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_ctrlr *ctrlr)
223825c9890SJim Harris {
224825c9890SJim Harris 	if (qpair->ctrlr != NULL) {
225825c9890SJim Harris 		/* Admin queues will call this function twice. */
226825c9890SJim Harris 		assert(qpair->ctrlr == ctrlr);
227825c9890SJim Harris 		return;
228825c9890SJim Harris 	}
229825c9890SJim Harris 
230825c9890SJim Harris 	qpair->ctrlr = ctrlr;
231ea1a6608SJim Harris 	spdk_trace_owner_append_description(qpair->trace_id,
232ea1a6608SJim Harris 					    spdk_nvmf_subsystem_get_nqn(ctrlr->subsys));
233825c9890SJim Harris }
234825c9890SJim Harris 
235b1a23196SJim Harris static int _retry_qid_check(void *ctx);
236b1a23196SJim Harris 
237b8769cdbSJinYu static void
238b54cbf1fSKonrad Sztyber nvmf_ctrlr_send_connect_rsp(void *ctx)
239b54cbf1fSKonrad Sztyber {
240b54cbf1fSKonrad Sztyber 	struct spdk_nvmf_request *req = ctx;
241b54cbf1fSKonrad Sztyber 	struct spdk_nvmf_qpair *qpair = req->qpair;
242b54cbf1fSKonrad Sztyber 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
243b54cbf1fSKonrad Sztyber 	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
2440a6bb8caSKonrad Sztyber 	int rc;
245b54cbf1fSKonrad Sztyber 
246db221b40SKonrad Sztyber 	/* The qpair might have been disconnected in the meantime */
247db221b40SKonrad Sztyber 	assert(qpair->state == SPDK_NVMF_QPAIR_CONNECTING ||
248db221b40SKonrad Sztyber 	       qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING);
249db221b40SKonrad Sztyber 	if (qpair->state == SPDK_NVMF_QPAIR_CONNECTING) {
2500a6bb8caSKonrad Sztyber 		if (nvmf_subsystem_host_auth_required(ctrlr->subsys, ctrlr->hostnqn)) {
2510a6bb8caSKonrad Sztyber 			rc = nvmf_qpair_auth_init(qpair);
2520a6bb8caSKonrad Sztyber 			if (rc != 0) {
2530a6bb8caSKonrad Sztyber 				rsp->status.sct = SPDK_NVME_SCT_GENERIC;
2540a6bb8caSKonrad Sztyber 				rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2550a6bb8caSKonrad Sztyber 				spdk_nvmf_request_complete(req);
2560a6bb8caSKonrad Sztyber 				spdk_nvmf_qpair_disconnect(qpair);
2570a6bb8caSKonrad Sztyber 				return;
2580a6bb8caSKonrad Sztyber 			}
2590a6bb8caSKonrad Sztyber 			rsp->status_code_specific.success.authreq.atr = 1;
2600a6bb8caSKonrad Sztyber 			nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_AUTHENTICATING);
2610a6bb8caSKonrad Sztyber 		} else {
262db221b40SKonrad Sztyber 			nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ENABLED);
263db221b40SKonrad Sztyber 		}
2640a6bb8caSKonrad Sztyber 	}
265db221b40SKonrad Sztyber 
266b54cbf1fSKonrad Sztyber 	SPDK_DEBUGLOG(nvmf, "connect capsule response: cntlid = 0x%04x\n", ctrlr->cntlid);
267b54cbf1fSKonrad Sztyber 
268b54cbf1fSKonrad Sztyber 	assert(spdk_get_thread() == qpair->group->thread);
269b54cbf1fSKonrad Sztyber 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
270b54cbf1fSKonrad Sztyber 	rsp->status_code_specific.success.cntlid = ctrlr->cntlid;
271b54cbf1fSKonrad Sztyber 	spdk_nvmf_request_complete(req);
272b54cbf1fSKonrad Sztyber }
273b54cbf1fSKonrad Sztyber 
274b54cbf1fSKonrad Sztyber static void
275b54cbf1fSKonrad Sztyber nvmf_ctrlr_add_qpair(struct spdk_nvmf_qpair *qpair,
2766d4c78eaSZiye Yang 		     struct spdk_nvmf_ctrlr *ctrlr,
2773741a852SJim Harris 		     struct spdk_nvmf_request *req)
2786d4c78eaSZiye Yang {
2793741a852SJim Harris 	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
2803741a852SJim Harris 
281cc2f6634SZiv Hirsch 	if (!ctrlr->admin_qpair) {
282cc2f6634SZiv Hirsch 		SPDK_ERRLOG("Inactive admin qpair\n");
283cc2f6634SZiv Hirsch 		rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
284cc2f6634SZiv Hirsch 		rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
285cc2f6634SZiv Hirsch 		qpair->connect_req = NULL;
286cc2f6634SZiv Hirsch 		qpair->ctrlr = NULL;
287cc2f6634SZiv Hirsch 		spdk_nvmf_request_complete(req);
288cc2f6634SZiv Hirsch 		return;
289cc2f6634SZiv Hirsch 	}
290cc2f6634SZiv Hirsch 
2914c4cba9aSZiye Yang 	assert(ctrlr->admin_qpair->group->thread == spdk_get_thread());
2924c4cba9aSZiye Yang 
293f08cea71SBen Walker 	if (spdk_bit_array_get(ctrlr->qpair_mask, qpair->qid)) {
294b1a23196SJim Harris 		if (qpair->connect_req != NULL) {
295c69768bdSMarcin Spiewak 			SPDK_ERRLOG("Got I/O connect with duplicate QID %u (cntlid:%u)\n",
296c69768bdSMarcin Spiewak 				    qpair->qid, ctrlr->cntlid);
297f08cea71SBen Walker 			rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
298f08cea71SBen Walker 			rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER;
299b1a23196SJim Harris 			qpair->connect_req = NULL;
300b1a23196SJim Harris 			qpair->ctrlr = NULL;
3013741a852SJim Harris 			spdk_nvmf_request_complete(req);
302b1a23196SJim Harris 		} else {
303c69768bdSMarcin Spiewak 			SPDK_WARNLOG("Duplicate QID detected (cntlid:%u, qid:%u), re-check in %dus\n",
304c69768bdSMarcin Spiewak 				     ctrlr->cntlid, qpair->qid, DUPLICATE_QID_RETRY_US);
305b1a23196SJim Harris 			qpair->connect_req = req;
306b1a23196SJim Harris 			/* Set qpair->ctrlr here so that we'll have it when the poller expires. */
307825c9890SJim Harris 			nvmf_qpair_set_ctrlr(qpair, ctrlr);
308b1a23196SJim Harris 			req->poller = SPDK_POLLER_REGISTER(_retry_qid_check, qpair,
309b1a23196SJim Harris 							   DUPLICATE_QID_RETRY_US);
310b1a23196SJim Harris 		}
311f08cea71SBen Walker 		return;
312f08cea71SBen Walker 	}
3134d58c132SBen Walker 
314414f91a0SAlex Michon 	qpair->connect_req = NULL;
315414f91a0SAlex Michon 
316b54cbf1fSKonrad Sztyber 	SPDK_DTRACE_PROBE4_TICKS(nvmf_ctrlr_add_qpair, qpair, qpair->qid, ctrlr->subsys->subnqn,
317b54cbf1fSKonrad Sztyber 				 ctrlr->hostnqn);
318825c9890SJim Harris 	nvmf_qpair_set_ctrlr(qpair, ctrlr);
319f08cea71SBen Walker 	spdk_bit_array_set(ctrlr->qpair_mask, qpair->qid);
320c69768bdSMarcin Spiewak 	SPDK_DEBUGLOG(nvmf, "qpair_mask set, qid %u\n", qpair->qid);
3216d4c78eaSZiye Yang 
322b54cbf1fSKonrad Sztyber 	spdk_thread_send_msg(qpair->group->thread, nvmf_ctrlr_send_connect_rsp, req);
3236d4c78eaSZiye Yang }
3246d4c78eaSZiye Yang 
325b1a23196SJim Harris static int
326b1a23196SJim Harris _retry_qid_check(void *ctx)
327b1a23196SJim Harris {
328b1a23196SJim Harris 	struct spdk_nvmf_qpair *qpair = ctx;
329b1a23196SJim Harris 	struct spdk_nvmf_request *req = qpair->connect_req;
330b1a23196SJim Harris 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
331b1a23196SJim Harris 
332b1a23196SJim Harris 	spdk_poller_unregister(&req->poller);
333d8804f6bSMarcin Spiewak 	SPDK_WARNLOG("Retrying adding qpair, qid:%d\n", qpair->qid);
334b54cbf1fSKonrad Sztyber 	nvmf_ctrlr_add_qpair(qpair, ctrlr, req);
335b1a23196SJim Harris 	return SPDK_POLLER_BUSY;
336b1a23196SJim Harris }
337b1a23196SJim Harris 
3386d4c78eaSZiye Yang static void
339198fd2ceSSeth Howell _nvmf_ctrlr_add_admin_qpair(void *ctx)
3407346be69SZiye Yang {
3417346be69SZiye Yang 	struct spdk_nvmf_request *req = ctx;
3427346be69SZiye Yang 	struct spdk_nvmf_qpair *qpair = req->qpair;
3437346be69SZiye Yang 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
3447346be69SZiye Yang 
3457346be69SZiye Yang 	ctrlr->admin_qpair = qpair;
3463f2d21a0SJim Harris 	ctrlr->association_timeout = qpair->transport->opts.association_timeout;
347198fd2ceSSeth Howell 	nvmf_ctrlr_start_keep_alive_timer(ctrlr);
348b54cbf1fSKonrad Sztyber 	nvmf_ctrlr_add_qpair(qpair, ctrlr, req);
3497346be69SZiye Yang }
3507346be69SZiye Yang 
3517346be69SZiye Yang static void
352198fd2ceSSeth Howell _nvmf_subsystem_add_ctrlr(void *ctx)
3537346be69SZiye Yang {
3547346be69SZiye Yang 	struct spdk_nvmf_request *req = ctx;
3557346be69SZiye Yang 	struct spdk_nvmf_qpair *qpair = req->qpair;
3567346be69SZiye Yang 	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
3577346be69SZiye Yang 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
3587346be69SZiye Yang 
3599cb21ad6SSeth Howell 	if (nvmf_subsystem_add_ctrlr(ctrlr->subsys, ctrlr)) {
3607346be69SZiye Yang 		SPDK_ERRLOG("Unable to add controller to subsystem\n");
361a3c9ab66SJinYu 		spdk_bit_array_free(&ctrlr->qpair_mask);
3627346be69SZiye Yang 		free(ctrlr);
3637346be69SZiye Yang 		qpair->ctrlr = NULL;
3647346be69SZiye Yang 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
365db96437eSShuhei Matsumoto 		spdk_nvmf_request_complete(req);
3667346be69SZiye Yang 		return;
3677346be69SZiye Yang 	}
3687346be69SZiye Yang 
369198fd2ceSSeth Howell 	spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_admin_qpair, req);
3707346be69SZiye Yang }
3717346be69SZiye Yang 
372000e6f5bSJacek Kalwas static void
373000e6f5bSJacek Kalwas nvmf_ctrlr_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem,
374000e6f5bSJacek Kalwas 		      struct spdk_nvmf_ctrlr_data *cdata)
375000e6f5bSJacek Kalwas {
376982c25feSChangpeng Liu 	cdata->aerl = SPDK_NVMF_MAX_ASYNC_EVENTS - 1;
377000e6f5bSJacek Kalwas 	cdata->kas = KAS_DEFAULT_VALUE;
378b023e638SChangpeng Liu 	cdata->vid = SPDK_PCI_VID_INTEL;
379b023e638SChangpeng Liu 	cdata->ssvid = SPDK_PCI_VID_INTEL;
380b023e638SChangpeng Liu 	/* INTEL OUI */
381b023e638SChangpeng Liu 	cdata->ieee[0] = 0xe4;
382b023e638SChangpeng Liu 	cdata->ieee[1] = 0xd2;
383b023e638SChangpeng Liu 	cdata->ieee[2] = 0x5c;
38416c65744SAlexis Lescouet 	cdata->oncs.compare = 1;
3857d19bf23SKonrad Sztyber 	cdata->oncs.dsm = 1;
3867d19bf23SKonrad Sztyber 	cdata->oncs.write_zeroes = 1;
38708f25fb9SChangpeng Liu 	cdata->oncs.reservations = 1;
38886136540SRui Chang 	cdata->oncs.copy = 1;
3897d19bf23SKonrad Sztyber 	cdata->fuses.compare_and_write = 1;
390000e6f5bSJacek Kalwas 	cdata->sgls.supported = 1;
391000e6f5bSJacek Kalwas 	cdata->sgls.keyed_sgl = 1;
392000e6f5bSJacek Kalwas 	cdata->sgls.sgl_offset = 1;
393000e6f5bSJacek Kalwas 	cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
394000e6f5bSJacek Kalwas 	cdata->nvmf_specific.ioccsz += transport->opts.in_capsule_data_size / 16;
395000e6f5bSJacek Kalwas 	cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
396000e6f5bSJacek Kalwas 	cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
397000e6f5bSJacek Kalwas 	cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
398000e6f5bSJacek Kalwas 	cdata->nvmf_specific.msdbd = 1;
399000e6f5bSJacek Kalwas 
400000e6f5bSJacek Kalwas 	if (transport->ops->cdata_init) {
401000e6f5bSJacek Kalwas 		transport->ops->cdata_init(transport, subsystem, cdata);
402000e6f5bSJacek Kalwas 	}
403000e6f5bSJacek Kalwas }
404000e6f5bSJacek Kalwas 
405a36785dfSDennis Maisenbacher static bool
406239855acSKonrad Sztyber nvmf_subsystem_has_zns_iocs(struct spdk_nvmf_subsystem *subsystem)
407a36785dfSDennis Maisenbacher {
408a36785dfSDennis Maisenbacher 	struct spdk_nvmf_ns *ns;
409a36785dfSDennis Maisenbacher 	uint32_t i;
410a36785dfSDennis Maisenbacher 
411a36785dfSDennis Maisenbacher 	for (i = 0; i < subsystem->max_nsid; i++) {
412a36785dfSDennis Maisenbacher 		ns = subsystem->ns[i];
413a36785dfSDennis Maisenbacher 		if (ns && ns->bdev && spdk_bdev_is_zoned(ns->bdev)) {
414a36785dfSDennis Maisenbacher 			return true;
415a36785dfSDennis Maisenbacher 		}
416a36785dfSDennis Maisenbacher 	}
417a36785dfSDennis Maisenbacher 	return false;
418a36785dfSDennis Maisenbacher }
419a36785dfSDennis Maisenbacher 
420d37555b4SJonas Pfefferle static void
421d37555b4SJonas Pfefferle nvmf_ctrlr_init_visible_ns(struct spdk_nvmf_ctrlr *ctrlr)
422d37555b4SJonas Pfefferle {
423d37555b4SJonas Pfefferle 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
424d37555b4SJonas Pfefferle 	struct spdk_nvmf_ns *ns;
425d37555b4SJonas Pfefferle 
426d37555b4SJonas Pfefferle 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
427d37555b4SJonas Pfefferle 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
428d37555b4SJonas Pfefferle 		if (ns->always_visible || nvmf_ns_find_host(ns, ctrlr->hostnqn) != NULL) {
429bfd014b5SKonrad Sztyber 			nvmf_ctrlr_ns_set_visible(ctrlr, ns->nsid, true);
430d37555b4SJonas Pfefferle 		}
431d37555b4SJonas Pfefferle 	}
432d37555b4SJonas Pfefferle }
433d37555b4SJonas Pfefferle 
4342d1a2926SDaniel Verkamp static struct spdk_nvmf_ctrlr *
435198fd2ceSSeth Howell nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
4367346be69SZiye Yang 		  struct spdk_nvmf_request *req,
4372d1a2926SDaniel Verkamp 		  struct spdk_nvmf_fabric_connect_cmd *connect_cmd,
4382d1a2926SDaniel Verkamp 		  struct spdk_nvmf_fabric_connect_data *connect_data)
43903788f93SBen Walker {
4402d1a2926SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr;
4412a6c2c28SChangpeng Liu 	struct spdk_nvmf_transport *transport = req->qpair->transport;
4425c9a8a3cSShuhei Matsumoto 	struct spdk_nvme_transport_id listen_trid = {};
443a36785dfSDennis Maisenbacher 	bool subsys_has_multi_iocs = false;
44403788f93SBen Walker 
4452d1a2926SDaniel Verkamp 	ctrlr = calloc(1, sizeof(*ctrlr));
4462d1a2926SDaniel Verkamp 	if (ctrlr == NULL) {
4472d1a2926SDaniel Verkamp 		SPDK_ERRLOG("Memory allocation failed\n");
4482d1a2926SDaniel Verkamp 		return NULL;
4492d1a2926SDaniel Verkamp 	}
45003788f93SBen Walker 
4512a6c2c28SChangpeng Liu 	if (spdk_nvme_trtype_is_fabrics(transport->ops->type)) {
4522a6c2c28SChangpeng Liu 		ctrlr->dynamic_ctrlr = true;
4532a6c2c28SChangpeng Liu 	} else {
4542a6c2c28SChangpeng Liu 		ctrlr->cntlid = connect_data->cntlid;
4552a6c2c28SChangpeng Liu 	}
4562a6c2c28SChangpeng Liu 
4577c30df4eSJim Harris 	SPDK_DTRACE_PROBE3_TICKS(nvmf_ctrlr_create, ctrlr, subsystem->subnqn,
458d11601e8SKrzysztof Karas 				 spdk_thread_get_id(req->qpair->group->thread));
459d11601e8SKrzysztof Karas 
4608cd9ef28SJiewei Ke 	STAILQ_INIT(&ctrlr->async_events);
46178bfb2a1SChangpeng Liu 	TAILQ_INIT(&ctrlr->log_head);
4622d1a2926SDaniel Verkamp 	ctrlr->subsys = subsystem;
463008ec0bdSBen Walker 	ctrlr->thread = req->qpair->group->thread;
4648689f6a3SJim Harris 	ctrlr->disconnect_in_progress = false;
465f08cea71SBen Walker 
4668e808490SJohn Barnard 	ctrlr->qpair_mask = spdk_bit_array_create(transport->opts.max_qpairs_per_ctrlr);
467f08cea71SBen Walker 	if (!ctrlr->qpair_mask) {
468f08cea71SBen Walker 		SPDK_ERRLOG("Failed to allocate controller qpair mask\n");
46919faf912SJonas Pfefferle 		goto err_qpair_mask;
470f08cea71SBen Walker 	}
4712d1a2926SDaniel Verkamp 
472000e6f5bSJacek Kalwas 	nvmf_ctrlr_cdata_init(transport, subsystem, &ctrlr->cdata);
473000e6f5bSJacek Kalwas 
474b8769cdbSJinYu 	/*
475f74fdce9SJacek Kalwas 	 * KAS: This field indicates the granularity of the Keep Alive Timer in 100ms units.
476f74fdce9SJacek Kalwas 	 * If this field is cleared to 0h, then Keep Alive is not supported.
477b8769cdbSJinYu 	 */
478000e6f5bSJacek Kalwas 	if (ctrlr->cdata.kas) {
479b8769cdbSJinYu 		ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(connect_cmd->kato,
480b8769cdbSJinYu 				KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) *
481b8769cdbSJinYu 				KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS;
482f74fdce9SJacek Kalwas 	}
483f74fdce9SJacek Kalwas 
484763ab888SChangpeng Liu 	ctrlr->feat.async_event_configuration.bits.ns_attr_notice = 1;
4855e4e4bc4SBen Walker 	if (ctrlr->subsys->flags.ana_reporting) {
486523f2a85SShuhei Matsumoto 		ctrlr->feat.async_event_configuration.bits.ana_change_notice = 1;
487523f2a85SShuhei Matsumoto 	}
4880f561837SDaniel Verkamp 	ctrlr->feat.volatile_write_cache.bits.wce = 1;
4893eed8456SChangpeng Liu 	/* Coalescing Disable */
4903eed8456SChangpeng Liu 	ctrlr->feat.interrupt_vector_configuration.bits.cd = 1;
4910f561837SDaniel Verkamp 
4927efdf905SSlawomir Ptak 	if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
493b8769cdbSJinYu 		/*
494e37fc5a3SShuhei Matsumoto 		 * If keep-alive timeout is not set, discovery controllers use some
495e37fc5a3SShuhei Matsumoto 		 * arbitrary high value in order to cleanup stale discovery sessions
496b8769cdbSJinYu 		 *
497b8769cdbSJinYu 		 * From the 1.0a nvme-of spec:
498b8769cdbSJinYu 		 * "The Keep Alive command is reserved for
499b8769cdbSJinYu 		 * Discovery controllers. A transport may specify a
500b8769cdbSJinYu 		 * fixed Discovery controller activity timeout value
501b8769cdbSJinYu 		 * (e.g., 2 minutes). If no commands are received
502b8769cdbSJinYu 		 * by a Discovery controller within that time
503b8769cdbSJinYu 		 * period, the controller may perform the
504b8769cdbSJinYu 		 * actions for Keep Alive Timer expiration".
505fce94287SMadhu Adav MJ 		 *
506fce94287SMadhu Adav MJ 		 * From the 1.1 nvme-of spec:
507fce94287SMadhu Adav MJ 		 * "A host requests an explicit persistent connection
508fce94287SMadhu Adav MJ 		 * to a Discovery controller and Asynchronous Event Notifications from
509fce94287SMadhu Adav MJ 		 * the Discovery controller on that persistent connection by specifying
510fce94287SMadhu Adav MJ 		 * a non-zero Keep Alive Timer value in the Connect command."
511fce94287SMadhu Adav MJ 		 *
512fce94287SMadhu Adav MJ 		 * In case non-zero KATO is used, we enable discovery_log_change_notice
513fce94287SMadhu Adav MJ 		 * otherwise we disable it and use default discovery controller KATO.
514fce94287SMadhu Adav MJ 		 * KATO is in millisecond.
515b8769cdbSJinYu 		 */
516e37fc5a3SShuhei Matsumoto 		if (ctrlr->feat.keep_alive_timer.bits.kato == 0) {
517b8769cdbSJinYu 			ctrlr->feat.keep_alive_timer.bits.kato = NVMF_DISC_KATO_IN_MS;
518fce94287SMadhu Adav MJ 			ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 0;
519fce94287SMadhu Adav MJ 		} else {
520fce94287SMadhu Adav MJ 			ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 1;
521b8769cdbSJinYu 		}
522e37fc5a3SShuhei Matsumoto 	}
523b8769cdbSJinYu 
5240f561837SDaniel Verkamp 	/* Subtract 1 for admin queue, 1 for 0's based */
5258e808490SJohn Barnard 	ctrlr->feat.number_of_queues.bits.ncqr = transport->opts.max_qpairs_per_ctrlr - 1 -
5268e808490SJohn Barnard 			1;
5278e808490SJohn Barnard 	ctrlr->feat.number_of_queues.bits.nsqr = transport->opts.max_qpairs_per_ctrlr - 1 -
5288e808490SJohn Barnard 			1;
5290f561837SDaniel Verkamp 
530187e2dfbSChangpeng Liu 	spdk_uuid_copy(&ctrlr->hostid, (struct spdk_uuid *)connect_data->hostid);
531d1698dcaSMarcin Spiewak 	memcpy(ctrlr->hostnqn, connect_data->hostnqn, SPDK_NVMF_NQN_MAX_LEN);
5322d1a2926SDaniel Verkamp 
533d37555b4SJonas Pfefferle 	ctrlr->visible_ns = spdk_bit_array_create(subsystem->max_nsid);
534d37555b4SJonas Pfefferle 	if (!ctrlr->visible_ns) {
535d37555b4SJonas Pfefferle 		SPDK_ERRLOG("Failed to allocate visible namespace array\n");
536d37555b4SJonas Pfefferle 		goto err_visible_ns;
537d37555b4SJonas Pfefferle 	}
538d37555b4SJonas Pfefferle 	nvmf_ctrlr_init_visible_ns(ctrlr);
539d37555b4SJonas Pfefferle 
54003788f93SBen Walker 	ctrlr->vcprop.cap.raw = 0;
54103788f93SBen Walker 	ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */
5428e808490SJohn Barnard 	ctrlr->vcprop.cap.bits.mqes = transport->opts.max_queue_depth -
5438e808490SJohn Barnard 				      1; /* max queue depth */
54403788f93SBen Walker 	ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */
545f63c0899SChangpeng Liu 	/* ready timeout - 500 msec units */
546f63c0899SChangpeng Liu 	ctrlr->vcprop.cap.bits.to = NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500;
54703788f93SBen Walker 	ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */
548239855acSKonrad Sztyber 	subsys_has_multi_iocs = nvmf_subsystem_has_zns_iocs(subsystem);
549a36785dfSDennis Maisenbacher 	if (subsys_has_multi_iocs) {
550a36785dfSDennis Maisenbacher 		ctrlr->vcprop.cap.bits.css =
551a36785dfSDennis Maisenbacher 			SPDK_NVME_CAP_CSS_IOCS; /* One or more I/O command sets supported */
552a36785dfSDennis Maisenbacher 	} else {
55399c8c6d8SBen Walker 		ctrlr->vcprop.cap.bits.css = SPDK_NVME_CAP_CSS_NVM; /* NVM command set */
554a36785dfSDennis Maisenbacher 	}
555a36785dfSDennis Maisenbacher 
55603788f93SBen Walker 	ctrlr->vcprop.cap.bits.mpsmin = 0; /* 2 ^ (12 + mpsmin) == 4k */
55703788f93SBen Walker 	ctrlr->vcprop.cap.bits.mpsmax = 0; /* 2 ^ (12 + mpsmax) == 4k */
55803788f93SBen Walker 
559b86ae853SDaniel Verkamp 	/* Version Supported: 1.3 */
56003788f93SBen Walker 	ctrlr->vcprop.vs.bits.mjr = 1;
561b86ae853SDaniel Verkamp 	ctrlr->vcprop.vs.bits.mnr = 3;
562b86ae853SDaniel Verkamp 	ctrlr->vcprop.vs.bits.ter = 0;
56303788f93SBen Walker 
56403788f93SBen Walker 	ctrlr->vcprop.cc.raw = 0;
5652d1a2926SDaniel Verkamp 	ctrlr->vcprop.cc.bits.en = 0; /* Init controller disabled */
566a36785dfSDennis Maisenbacher 	if (subsys_has_multi_iocs) {
567a36785dfSDennis Maisenbacher 		ctrlr->vcprop.cc.bits.css =
568a36785dfSDennis Maisenbacher 			SPDK_NVME_CC_CSS_IOCS; /* All supported I/O Command Sets */
569a36785dfSDennis Maisenbacher 	}
57003788f93SBen Walker 
57103788f93SBen Walker 	ctrlr->vcprop.csts.raw = 0;
57203788f93SBen Walker 	ctrlr->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */
57303788f93SBen Walker 
5742172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "cap 0x%" PRIx64 "\n", ctrlr->vcprop.cap.raw);
5752172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "vs 0x%x\n", ctrlr->vcprop.vs.raw);
5762172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "cc 0x%x\n", ctrlr->vcprop.cc.raw);
5772172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "csts 0x%x\n", ctrlr->vcprop.csts.raw);
5782d1a2926SDaniel Verkamp 
57991da9aaaSShuhei Matsumoto 	ctrlr->dif_insert_or_strip = transport->opts.dif_insert_or_strip;
58091da9aaaSShuhei Matsumoto 
5817bcff376SShuhei Matsumoto 	if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_NVME) {
5825c9a8a3cSShuhei Matsumoto 		if (spdk_nvmf_qpair_get_listen_trid(req->qpair, &listen_trid) != 0) {
5835c9a8a3cSShuhei Matsumoto 			SPDK_ERRLOG("Could not get listener transport ID\n");
58419faf912SJonas Pfefferle 			goto err_listener;
5855c9a8a3cSShuhei Matsumoto 		}
5865c9a8a3cSShuhei Matsumoto 
5875c9a8a3cSShuhei Matsumoto 		ctrlr->listener = nvmf_subsystem_find_listener(ctrlr->subsys, &listen_trid);
5887bcff376SShuhei Matsumoto 		if (!ctrlr->listener) {
5897bcff376SShuhei Matsumoto 			SPDK_ERRLOG("Listener was not found\n");
59019faf912SJonas Pfefferle 			goto err_listener;
5917bcff376SShuhei Matsumoto 		}
5927bcff376SShuhei Matsumoto 	}
5937bcff376SShuhei Matsumoto 
594825c9890SJim Harris 	nvmf_qpair_set_ctrlr(req->qpair, ctrlr);
595198fd2ceSSeth Howell 	spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_add_ctrlr, req);
596f4a4ddd8SDaniel Verkamp 
5972d1a2926SDaniel Verkamp 	return ctrlr;
59819faf912SJonas Pfefferle err_listener:
599d37555b4SJonas Pfefferle 	spdk_bit_array_free(&ctrlr->visible_ns);
600d37555b4SJonas Pfefferle err_visible_ns:
60119faf912SJonas Pfefferle 	spdk_bit_array_free(&ctrlr->qpair_mask);
60219faf912SJonas Pfefferle err_qpair_mask:
60319faf912SJonas Pfefferle 	free(ctrlr);
60419faf912SJonas Pfefferle 	return NULL;
60503788f93SBen Walker }
60603788f93SBen Walker 
607b8769cdbSJinYu static void
608198fd2ceSSeth Howell _nvmf_ctrlr_destruct(void *ctx)
609b8769cdbSJinYu {
610b8769cdbSJinYu 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
61178bfb2a1SChangpeng Liu 	struct spdk_nvmf_reservation_log *log, *log_tmp;
6128cd9ef28SJiewei Ke 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
613b8769cdbSJinYu 
6147c30df4eSJim Harris 	SPDK_DTRACE_PROBE3_TICKS(nvmf_ctrlr_destruct, ctrlr, ctrlr->subsys->subnqn,
615d11601e8SKrzysztof Karas 				 spdk_thread_get_id(ctrlr->thread));
616d11601e8SKrzysztof Karas 
61797385af1SAlexey Marchuk 	assert(spdk_get_thread() == ctrlr->thread);
61897385af1SAlexey Marchuk 	assert(ctrlr->in_destruct);
61997385af1SAlexey Marchuk 
620478f6524SAlexey Marchuk 	SPDK_DEBUGLOG(nvmf, "Destroy ctrlr 0x%hx\n", ctrlr->cntlid);
6218689f6a3SJim Harris 	if (ctrlr->disconnect_in_progress) {
6228689f6a3SJim Harris 		SPDK_ERRLOG("freeing ctrlr with disconnect in progress\n");
6238689f6a3SJim Harris 		spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr);
6248689f6a3SJim Harris 		return;
6258689f6a3SJim Harris 	}
6268689f6a3SJim Harris 
627198fd2ceSSeth Howell 	nvmf_ctrlr_stop_keep_alive_timer(ctrlr);
62871cd42e1SJacek Kalwas 	nvmf_ctrlr_stop_association_timer(ctrlr);
629d156d2f7SSeth Howell 	spdk_bit_array_free(&ctrlr->qpair_mask);
63071cd42e1SJacek Kalwas 
63178bfb2a1SChangpeng Liu 	TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) {
63278bfb2a1SChangpeng Liu 		TAILQ_REMOVE(&ctrlr->log_head, log, link);
63378bfb2a1SChangpeng Liu 		free(log);
63478bfb2a1SChangpeng Liu 	}
6358cd9ef28SJiewei Ke 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
6368cd9ef28SJiewei Ke 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
6378cd9ef28SJiewei Ke 		free(event);
6388cd9ef28SJiewei Ke 	}
639d37555b4SJonas Pfefferle 	spdk_bit_array_free(&ctrlr->visible_ns);
640b8769cdbSJinYu 	free(ctrlr);
641b8769cdbSJinYu }
642b8769cdbSJinYu 
64303788f93SBen Walker void
6449cb21ad6SSeth Howell nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
64503788f93SBen Walker {
6469cb21ad6SSeth Howell 	nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr);
6474a8b3adbSBen Walker 
648198fd2ceSSeth Howell 	spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr);
64903788f93SBen Walker }
65003788f93SBen Walker 
65130ef8cacSZiye Yang static void
652198fd2ceSSeth Howell nvmf_ctrlr_add_io_qpair(void *ctx)
653114a91fdSZiye Yang {
654114a91fdSZiye Yang 	struct spdk_nvmf_request *req = ctx;
655114a91fdSZiye Yang 	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
656114a91fdSZiye Yang 	struct spdk_nvmf_qpair *qpair = req->qpair;
657114a91fdSZiye Yang 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
65892f62deeSAlexey Marchuk 	struct spdk_nvmf_qpair *admin_qpair = ctrlr->admin_qpair;
659cc2f6634SZiv Hirsch 	struct spdk_nvmf_poll_group *admin_qpair_group = NULL;
660cc2f6634SZiv Hirsch 	enum spdk_nvmf_qpair_state admin_qpair_state = SPDK_NVMF_QPAIR_UNINITIALIZED;
6613caf2080SKonrad Sztyber 	bool admin_qpair_active = false;
662114a91fdSZiye Yang 
6637c30df4eSJim Harris 	SPDK_DTRACE_PROBE4_TICKS(nvmf_ctrlr_add_io_qpair, ctrlr, req->qpair, req->qpair->qid,
664d11601e8SKrzysztof Karas 				 spdk_thread_get_id(ctrlr->thread));
665d11601e8SKrzysztof Karas 
666114a91fdSZiye Yang 	/* Unit test will check qpair->ctrlr after calling spdk_nvmf_ctrlr_connect.
667114a91fdSZiye Yang 	  * For error case, the value should be NULL. So set it to NULL at first.
668114a91fdSZiye Yang 	  */
669114a91fdSZiye Yang 	qpair->ctrlr = NULL;
670114a91fdSZiye Yang 
6710162da7fSSeth Howell 	/* Make sure the controller is not being destroyed. */
6720162da7fSSeth Howell 	if (ctrlr->in_destruct) {
6730162da7fSSeth Howell 		SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n");
6740162da7fSSeth Howell 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
6750162da7fSSeth Howell 		goto end;
6760162da7fSSeth Howell 	}
6770162da7fSSeth Howell 
6787efdf905SSlawomir Ptak 	if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
679114a91fdSZiye Yang 		SPDK_ERRLOG("I/O connect not allowed on discovery controller\n");
680114a91fdSZiye Yang 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
68130ef8cacSZiye Yang 		goto end;
682114a91fdSZiye Yang 	}
683114a91fdSZiye Yang 
684114a91fdSZiye Yang 	if (!ctrlr->vcprop.cc.bits.en) {
685114a91fdSZiye Yang 		SPDK_ERRLOG("Got I/O connect before ctrlr was enabled\n");
686114a91fdSZiye Yang 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
68730ef8cacSZiye Yang 		goto end;
688114a91fdSZiye Yang 	}
689114a91fdSZiye Yang 
690114a91fdSZiye Yang 	if (1u << ctrlr->vcprop.cc.bits.iosqes != sizeof(struct spdk_nvme_cmd)) {
691114a91fdSZiye Yang 		SPDK_ERRLOG("Got I/O connect with invalid IOSQES %u\n",
692114a91fdSZiye Yang 			    ctrlr->vcprop.cc.bits.iosqes);
693114a91fdSZiye Yang 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
69430ef8cacSZiye Yang 		goto end;
695114a91fdSZiye Yang 	}
696114a91fdSZiye Yang 
697114a91fdSZiye Yang 	if (1u << ctrlr->vcprop.cc.bits.iocqes != sizeof(struct spdk_nvme_cpl)) {
698114a91fdSZiye Yang 		SPDK_ERRLOG("Got I/O connect with invalid IOCQES %u\n",
699114a91fdSZiye Yang 			    ctrlr->vcprop.cc.bits.iocqes);
700114a91fdSZiye Yang 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
70130ef8cacSZiye Yang 		goto end;
702114a91fdSZiye Yang 	}
703114a91fdSZiye Yang 
704cc2f6634SZiv Hirsch 	/* There is a chance that admin qpair was destroyed. This is an issue that was observed only with ESX initiators */
705cc2f6634SZiv Hirsch 	if (admin_qpair) {
7063caf2080SKonrad Sztyber 		admin_qpair_active = spdk_nvmf_qpair_is_active(admin_qpair);
707cc2f6634SZiv Hirsch 		admin_qpair_group = admin_qpair->group;
708cc2f6634SZiv Hirsch 		admin_qpair_state = admin_qpair->state;
709cc2f6634SZiv Hirsch 	}
710cc2f6634SZiv Hirsch 
7113caf2080SKonrad Sztyber 	if (!admin_qpair_active || admin_qpair_group == NULL) {
712cc2f6634SZiv Hirsch 		/* There is a chance that admin qpair was destroyed or is being destroyed at this moment due to e.g.
71392f62deeSAlexey Marchuk 		 * expired keep alive timer. Part of the qpair destruction process is change of qpair's
71492f62deeSAlexey Marchuk 		 * state to DEACTIVATING and removing it from poll group */
715cc2f6634SZiv Hirsch 		SPDK_ERRLOG("Inactive admin qpair (state %d, group %p)\n", admin_qpair_state, admin_qpair_group);
71692f62deeSAlexey Marchuk 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
71792f62deeSAlexey Marchuk 		goto end;
71892f62deeSAlexey Marchuk 	}
71992f62deeSAlexey Marchuk 
7203b54b0d7SJim Harris 	/* check if we would exceed ctrlr connection limit */
7213b54b0d7SJim Harris 	if (qpair->qid >= spdk_bit_array_capacity(ctrlr->qpair_mask)) {
7223b54b0d7SJim Harris 		SPDK_ERRLOG("Requested QID %u but Max QID is %u\n",
7233b54b0d7SJim Harris 			    qpair->qid, spdk_bit_array_capacity(ctrlr->qpair_mask) - 1);
7243b54b0d7SJim Harris 		rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
7253b54b0d7SJim Harris 		rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER;
7263b54b0d7SJim Harris 		goto end;
7273b54b0d7SJim Harris 	}
7283b54b0d7SJim Harris 
729b54cbf1fSKonrad Sztyber 	nvmf_ctrlr_add_qpair(qpair, ctrlr, req);
7303741a852SJim Harris 	return;
73130ef8cacSZiye Yang end:
732db96437eSShuhei Matsumoto 	spdk_nvmf_request_complete(req);
733114a91fdSZiye Yang }
734114a91fdSZiye Yang 
735114a91fdSZiye Yang static void
736198fd2ceSSeth Howell _nvmf_ctrlr_add_io_qpair(void *ctx)
7377346be69SZiye Yang {
7387346be69SZiye Yang 	struct spdk_nvmf_request *req = ctx;
7397346be69SZiye Yang 	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
74049c0d28aSJohn Levon 	struct spdk_nvmf_fabric_connect_data *data;
7417346be69SZiye Yang 	struct spdk_nvmf_ctrlr *ctrlr;
7427346be69SZiye Yang 	struct spdk_nvmf_qpair *qpair = req->qpair;
7437346be69SZiye Yang 	struct spdk_nvmf_qpair *admin_qpair;
7447346be69SZiye Yang 	struct spdk_nvmf_tgt *tgt = qpair->transport->tgt;
7457346be69SZiye Yang 	struct spdk_nvmf_subsystem *subsystem;
7465c9a8a3cSShuhei Matsumoto 	struct spdk_nvme_transport_id listen_trid = {};
7475b27db62SShuhei Matsumoto 	const struct spdk_nvmf_subsystem_listener *listener;
748cc2f6634SZiv Hirsch 	struct spdk_nvmf_poll_group *admin_qpair_group = NULL;
749cc2f6634SZiv Hirsch 	enum spdk_nvmf_qpair_state admin_qpair_state = SPDK_NVMF_QPAIR_UNINITIALIZED;
7503caf2080SKonrad Sztyber 	bool admin_qpair_active = false;
7517346be69SZiye Yang 
75249c0d28aSJohn Levon 	assert(req->iovcnt == 1);
75349c0d28aSJohn Levon 
75449c0d28aSJohn Levon 	data = req->iov[0].iov_base;
75549c0d28aSJohn Levon 
7562172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Connect I/O Queue for controller id 0x%x\n", data->cntlid);
7577346be69SZiye Yang 
7587346be69SZiye Yang 	subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn);
7597346be69SZiye Yang 	/* We already checked this in spdk_nvmf_ctrlr_connect */
7607346be69SZiye Yang 	assert(subsystem != NULL);
7617346be69SZiye Yang 
7629cb21ad6SSeth Howell 	ctrlr = nvmf_subsystem_get_ctrlr(subsystem, data->cntlid);
7637346be69SZiye Yang 	if (ctrlr == NULL) {
7647346be69SZiye Yang 		SPDK_ERRLOG("Unknown controller ID 0x%x\n", data->cntlid);
7657346be69SZiye Yang 		SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid);
766db96437eSShuhei Matsumoto 		spdk_nvmf_request_complete(req);
7677346be69SZiye Yang 		return;
7687346be69SZiye Yang 	}
7697346be69SZiye Yang 
7700162da7fSSeth Howell 	/* fail before passing a message to the controller thread. */
7710162da7fSSeth Howell 	if (ctrlr->in_destruct) {
7720162da7fSSeth Howell 		SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n");
7730162da7fSSeth Howell 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
7740162da7fSSeth Howell 		spdk_nvmf_request_complete(req);
7750162da7fSSeth Howell 		return;
7760162da7fSSeth Howell 	}
7770162da7fSSeth Howell 
7785b27db62SShuhei Matsumoto 	/* If ANA reporting is enabled, check if I/O connect is on the same listener. */
7795e4e4bc4SBen Walker 	if (subsystem->flags.ana_reporting) {
7805c9a8a3cSShuhei Matsumoto 		if (spdk_nvmf_qpair_get_listen_trid(req->qpair, &listen_trid) != 0) {
7815c9a8a3cSShuhei Matsumoto 			SPDK_ERRLOG("Could not get listener transport ID\n");
7825c9a8a3cSShuhei Matsumoto 			SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
7835c9a8a3cSShuhei Matsumoto 			spdk_nvmf_request_complete(req);
7845c9a8a3cSShuhei Matsumoto 			return;
7855c9a8a3cSShuhei Matsumoto 		}
7865c9a8a3cSShuhei Matsumoto 
7875c9a8a3cSShuhei Matsumoto 		listener = nvmf_subsystem_find_listener(subsystem, &listen_trid);
7885b27db62SShuhei Matsumoto 		if (listener != ctrlr->listener) {
7895b27db62SShuhei Matsumoto 			SPDK_ERRLOG("I/O connect is on a listener different from admin connect\n");
7905b27db62SShuhei Matsumoto 			SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
7915b27db62SShuhei Matsumoto 			spdk_nvmf_request_complete(req);
7925b27db62SShuhei Matsumoto 			return;
7935b27db62SShuhei Matsumoto 		}
7945b27db62SShuhei Matsumoto 	}
7955b27db62SShuhei Matsumoto 
7967346be69SZiye Yang 	admin_qpair = ctrlr->admin_qpair;
797cc2f6634SZiv Hirsch 
798cc2f6634SZiv Hirsch 	/* There is a chance that admin qpair was destroyed. This is an issue that was observed only with ESX initiators */
799cc2f6634SZiv Hirsch 	if (admin_qpair) {
8003caf2080SKonrad Sztyber 		admin_qpair_active = spdk_nvmf_qpair_is_active(admin_qpair);
801cc2f6634SZiv Hirsch 		admin_qpair_group = admin_qpair->group;
802cc2f6634SZiv Hirsch 		admin_qpair_state = admin_qpair->state;
803cc2f6634SZiv Hirsch 	}
804cc2f6634SZiv Hirsch 
8053caf2080SKonrad Sztyber 	if (!admin_qpair_active || admin_qpair_group == NULL) {
806cc2f6634SZiv Hirsch 		/* There is a chance that admin qpair was destroyed or is being destroyed at this moment due to e.g.
807813869d8SAlexey Marchuk 		 * expired keep alive timer. Part of the qpair destruction process is change of qpair's
808813869d8SAlexey Marchuk 		 * state to DEACTIVATING and removing it from poll group */
809cc2f6634SZiv Hirsch 		SPDK_ERRLOG("Inactive admin qpair (state %d, group %p)\n", admin_qpair_state, admin_qpair_group);
810813869d8SAlexey Marchuk 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
811813869d8SAlexey Marchuk 		spdk_nvmf_request_complete(req);
812813869d8SAlexey Marchuk 		return;
813813869d8SAlexey Marchuk 	}
8147346be69SZiye Yang 	qpair->ctrlr = ctrlr;
815cc2f6634SZiv Hirsch 	spdk_thread_send_msg(admin_qpair_group->thread, nvmf_ctrlr_add_io_qpair, req);
8167346be69SZiye Yang }
8177346be69SZiye Yang 
818489815dcSJacek Kalwas static bool
819198fd2ceSSeth Howell nvmf_qpair_access_allowed(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_subsystem *subsystem,
820489815dcSJacek Kalwas 			  const char *hostnqn)
821489815dcSJacek Kalwas {
822489815dcSJacek Kalwas 	struct spdk_nvme_transport_id listen_trid = {};
823489815dcSJacek Kalwas 
824489815dcSJacek Kalwas 	if (!spdk_nvmf_subsystem_host_allowed(subsystem, hostnqn)) {
825489815dcSJacek Kalwas 		SPDK_ERRLOG("Subsystem '%s' does not allow host '%s'\n", subsystem->subnqn, hostnqn);
826489815dcSJacek Kalwas 		return false;
827489815dcSJacek Kalwas 	}
828489815dcSJacek Kalwas 
829489815dcSJacek Kalwas 	if (spdk_nvmf_qpair_get_listen_trid(qpair, &listen_trid)) {
830489815dcSJacek Kalwas 		SPDK_ERRLOG("Subsystem '%s' is unable to enforce access control due to an internal error.\n",
831489815dcSJacek Kalwas 			    subsystem->subnqn);
832489815dcSJacek Kalwas 		return false;
833489815dcSJacek Kalwas 	}
834489815dcSJacek Kalwas 
835489815dcSJacek Kalwas 	if (!spdk_nvmf_subsystem_listener_allowed(subsystem, &listen_trid)) {
836489815dcSJacek Kalwas 		SPDK_ERRLOG("Subsystem '%s' does not allow host '%s' to connect at this address.\n",
837489815dcSJacek Kalwas 			    subsystem->subnqn, hostnqn);
838489815dcSJacek Kalwas 		return false;
839489815dcSJacek Kalwas 	}
840489815dcSJacek Kalwas 
841489815dcSJacek Kalwas 	return true;
842489815dcSJacek Kalwas }
843489815dcSJacek Kalwas 
8445323a026SDaniel Verkamp static int
845198fd2ceSSeth Howell _nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
8464ff0eba8SDaniel Verkamp {
84749c0d28aSJohn Levon 	struct spdk_nvmf_fabric_connect_data *data = req->iov[0].iov_base;
8484ff0eba8SDaniel Verkamp 	struct spdk_nvmf_fabric_connect_cmd *cmd = &req->cmd->connect_cmd;
8494ff0eba8SDaniel Verkamp 	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
8504ff0eba8SDaniel Verkamp 	struct spdk_nvmf_qpair *qpair = req->qpair;
85120618744SAnil Veerabhadrappa 	struct spdk_nvmf_transport *transport = qpair->transport;
85203788f93SBen Walker 	struct spdk_nvmf_ctrlr *ctrlr;
85303788f93SBen Walker 	struct spdk_nvmf_subsystem *subsystem;
8544ff0eba8SDaniel Verkamp 
8552172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "recfmt 0x%x qid %u sqsize %u\n",
85603788f93SBen Walker 		      cmd->recfmt, cmd->qid, cmd->sqsize);
85703788f93SBen Walker 
8582172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Connect data:\n");
8592172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "  cntlid:  0x%04x\n", data->cntlid);
8602172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "  hostid: %08x-%04x-%04x-%02x%02x-%04x%08x ***\n",
86103788f93SBen Walker 		      ntohl(*(uint32_t *)&data->hostid[0]),
86203788f93SBen Walker 		      ntohs(*(uint16_t *)&data->hostid[4]),
86303788f93SBen Walker 		      ntohs(*(uint16_t *)&data->hostid[6]),
86403788f93SBen Walker 		      data->hostid[8],
86503788f93SBen Walker 		      data->hostid[9],
86603788f93SBen Walker 		      ntohs(*(uint16_t *)&data->hostid[10]),
86703788f93SBen Walker 		      ntohl(*(uint32_t *)&data->hostid[12]));
8682172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "  subnqn: \"%s\"\n", data->subnqn);
8692172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "  hostnqn: \"%s\"\n", data->hostnqn);
8700ab300f8SBen Walker 
871489815dcSJacek Kalwas 	subsystem = spdk_nvmf_tgt_find_subsystem(transport->tgt, data->subnqn);
872489815dcSJacek Kalwas 	if (!subsystem) {
8734ff0eba8SDaniel Verkamp 		SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn);
8745323a026SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
8754ff0eba8SDaniel Verkamp 	}
8764ff0eba8SDaniel Verkamp 
877fcca8ea6SJacek Kalwas 	if (cmd->recfmt != 0) {
878fcca8ea6SJacek Kalwas 		SPDK_ERRLOG("Connect command unsupported RECFMT %u\n", cmd->recfmt);
879f3109678SZiye Yang 		rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
880fcca8ea6SJacek Kalwas 		rsp->status.sc = SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT;
8816dbcb893SBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
8826dbcb893SBen Walker 	}
8836dbcb893SBen Walker 
88403788f93SBen Walker 	/*
88503788f93SBen Walker 	 * SQSIZE is a 0-based value, so it must be at least 1 (minimum queue depth is 2) and
88620618744SAnil Veerabhadrappa 	 * strictly less than max_aq_depth (admin queues) or max_queue_depth (io queues).
88703788f93SBen Walker 	 */
88820618744SAnil Veerabhadrappa 	if (cmd->sqsize == 0) {
88920618744SAnil Veerabhadrappa 		SPDK_ERRLOG("Invalid SQSIZE = 0\n");
8901e714cfeSDaniel Verkamp 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize);
8915323a026SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
89203788f93SBen Walker 	}
89320618744SAnil Veerabhadrappa 
8949e3d841dSEvgeniy Kochetov 	if (cmd->qid == 0) {
89520618744SAnil Veerabhadrappa 		if (cmd->sqsize >= transport->opts.max_aq_depth) {
89620618744SAnil Veerabhadrappa 			SPDK_ERRLOG("Invalid SQSIZE for admin queue %u (min 1, max %u)\n",
89720618744SAnil Veerabhadrappa 				    cmd->sqsize, transport->opts.max_aq_depth - 1);
89820618744SAnil Veerabhadrappa 			SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize);
89920618744SAnil Veerabhadrappa 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
90020618744SAnil Veerabhadrappa 		}
90120618744SAnil Veerabhadrappa 	} else if (cmd->sqsize >= transport->opts.max_queue_depth) {
90220618744SAnil Veerabhadrappa 		SPDK_ERRLOG("Invalid SQSIZE %u (min 1, max %u)\n",
90320618744SAnil Veerabhadrappa 			    cmd->sqsize, transport->opts.max_queue_depth - 1);
90420618744SAnil Veerabhadrappa 		SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize);
90520618744SAnil Veerabhadrappa 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
90620618744SAnil Veerabhadrappa 	}
90720618744SAnil Veerabhadrappa 
9081d304bc5SBen Walker 	qpair->sq_head_max = cmd->sqsize;
9091d304bc5SBen Walker 	qpair->qid = cmd->qid;
910f3e197ffSJim Harris 	qpair->connect_received = true;
91103788f93SBen Walker 
912bb926e80SJim Harris 	pthread_mutex_lock(&qpair->group->mutex);
913*c2471e45SAlexey Marchuk 	assert(qpair->group->current_unassociated_qpairs > 0);
914bb926e80SJim Harris 	qpair->group->current_unassociated_qpairs--;
915bb926e80SJim Harris 	pthread_mutex_unlock(&qpair->group->mutex);
916bb926e80SJim Harris 
917da999b69SEvgeniy Kochetov 	if (0 == qpair->qid) {
918da999b69SEvgeniy Kochetov 		qpair->group->stat.admin_qpairs++;
919080118cdSRui Chang 		qpair->group->stat.current_admin_qpairs++;
920da999b69SEvgeniy Kochetov 	} else {
921da999b69SEvgeniy Kochetov 		qpair->group->stat.io_qpairs++;
922080118cdSRui Chang 		qpair->group->stat.current_io_qpairs++;
923da999b69SEvgeniy Kochetov 	}
924da999b69SEvgeniy Kochetov 
92503788f93SBen Walker 	if (cmd->qid == 0) {
9262172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "Connect Admin Queue for controller ID 0x%x\n", data->cntlid);
92703788f93SBen Walker 
9282a6c2c28SChangpeng Liu 		if (spdk_nvme_trtype_is_fabrics(transport->ops->type) && data->cntlid != 0xFFFF) {
92903788f93SBen Walker 			/* This NVMf target only supports dynamic mode. */
93003788f93SBen Walker 			SPDK_ERRLOG("The NVMf target only supports dynamic mode (CNTLID = 0x%x).\n", data->cntlid);
9311e714cfeSDaniel Verkamp 			SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid);
9325323a026SDaniel Verkamp 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
93303788f93SBen Walker 		}
93403788f93SBen Walker 
93503788f93SBen Walker 		/* Establish a new ctrlr */
936198fd2ceSSeth Howell 		ctrlr = nvmf_ctrlr_create(subsystem, req, cmd, data);
937baa936a1SBen Walker 		if (!ctrlr) {
938198fd2ceSSeth Howell 			SPDK_ERRLOG("nvmf_ctrlr_create() failed\n");
93903788f93SBen Walker 			rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
9405323a026SDaniel Verkamp 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
94103788f93SBen Walker 		} else {
9427346be69SZiye Yang 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
94303788f93SBen Walker 		}
9447346be69SZiye Yang 	} else {
945198fd2ceSSeth Howell 		spdk_thread_send_msg(subsystem->thread, _nvmf_ctrlr_add_io_qpair, req);
94630ef8cacSZiye Yang 		return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
94703788f93SBen Walker 	}
948114a91fdSZiye Yang }
94903788f93SBen Walker 
950652fe8a2SJacek Kalwas static struct spdk_nvmf_subsystem_poll_group *
951652fe8a2SJacek Kalwas nvmf_subsystem_pg_from_connect_cmd(struct spdk_nvmf_request *req)
952652fe8a2SJacek Kalwas {
953652fe8a2SJacek Kalwas 	struct spdk_nvmf_fabric_connect_data *data;
954652fe8a2SJacek Kalwas 	struct spdk_nvmf_subsystem *subsystem;
955652fe8a2SJacek Kalwas 	struct spdk_nvmf_tgt *tgt;
956652fe8a2SJacek Kalwas 
957652fe8a2SJacek Kalwas 	assert(nvmf_request_is_fabric_connect(req));
958652fe8a2SJacek Kalwas 	assert(req->qpair->ctrlr == NULL);
95949c0d28aSJohn Levon 	assert(req->iovcnt == 1);
960652fe8a2SJacek Kalwas 
96149c0d28aSJohn Levon 	data = req->iov[0].iov_base;
962652fe8a2SJacek Kalwas 	tgt = req->qpair->transport->tgt;
963652fe8a2SJacek Kalwas 
964652fe8a2SJacek Kalwas 	subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn);
965652fe8a2SJacek Kalwas 	if (subsystem == NULL) {
966652fe8a2SJacek Kalwas 		return NULL;
967652fe8a2SJacek Kalwas 	}
968652fe8a2SJacek Kalwas 
969652fe8a2SJacek Kalwas 	return &req->qpair->group->sgroups[subsystem->id];
970652fe8a2SJacek Kalwas }
971652fe8a2SJacek Kalwas 
972fcca8ea6SJacek Kalwas int
973fcca8ea6SJacek Kalwas spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
974fcca8ea6SJacek Kalwas {
975652fe8a2SJacek Kalwas 	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
976652fe8a2SJacek Kalwas 	struct spdk_nvmf_subsystem_poll_group *sgroup;
97727223168SKonrad Sztyber 	struct spdk_nvmf_qpair *qpair = req->qpair;
978652fe8a2SJacek Kalwas 	enum spdk_nvmf_request_exec_status status;
979fcca8ea6SJacek Kalwas 
98049c0d28aSJohn Levon 	if (req->iovcnt > 1) {
98149c0d28aSJohn Levon 		SPDK_ERRLOG("Connect command invalid iovcnt: %d\n", req->iovcnt);
98249c0d28aSJohn Levon 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
98349c0d28aSJohn Levon 		status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
98449c0d28aSJohn Levon 		goto out;
98549c0d28aSJohn Levon 	}
98649c0d28aSJohn Levon 
987652fe8a2SJacek Kalwas 	sgroup = nvmf_subsystem_pg_from_connect_cmd(req);
988652fe8a2SJacek Kalwas 	if (!sgroup) {
989652fe8a2SJacek Kalwas 		SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn);
990652fe8a2SJacek Kalwas 		status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
991652fe8a2SJacek Kalwas 		goto out;
992652fe8a2SJacek Kalwas 	}
993652fe8a2SJacek Kalwas 
994312a9d60SBen Walker 	sgroup->mgmt_io_outstanding++;
99527223168SKonrad Sztyber 	TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
996fcca8ea6SJacek Kalwas 
997198fd2ceSSeth Howell 	status = _nvmf_ctrlr_connect(req);
998652fe8a2SJacek Kalwas 
999652fe8a2SJacek Kalwas out:
1000652fe8a2SJacek Kalwas 	if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
1001db96437eSShuhei Matsumoto 		_nvmf_request_complete(req);
1002652fe8a2SJacek Kalwas 	}
1003652fe8a2SJacek Kalwas 
1004652fe8a2SJacek Kalwas 	return status;
1005fcca8ea6SJacek Kalwas }
1006fcca8ea6SJacek Kalwas 
1007fcca8ea6SJacek Kalwas static int
1008198fd2ceSSeth Howell nvmf_ctrlr_cmd_connect(struct spdk_nvmf_request *req)
1009fcca8ea6SJacek Kalwas {
101049c0d28aSJohn Levon 	struct spdk_nvmf_fabric_connect_data *data = req->iov[0].iov_base;
1011fcca8ea6SJacek Kalwas 	struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
1012fcca8ea6SJacek Kalwas 	struct spdk_nvmf_transport *transport = req->qpair->transport;
1013fcca8ea6SJacek Kalwas 	struct spdk_nvmf_subsystem *subsystem;
1014fcca8ea6SJacek Kalwas 
1015fcca8ea6SJacek Kalwas 	if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) {
1016fcca8ea6SJacek Kalwas 		SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length);
1017fcca8ea6SJacek Kalwas 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1018fcca8ea6SJacek Kalwas 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1019fcca8ea6SJacek Kalwas 	}
1020fcca8ea6SJacek Kalwas 
1021bae7cfb4SJohn Levon 	if (req->iovcnt > 1) {
1022bae7cfb4SJohn Levon 		SPDK_ERRLOG("Connect command invalid iovcnt: %d\n", req->iovcnt);
1023bae7cfb4SJohn Levon 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1024bae7cfb4SJohn Levon 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1025bae7cfb4SJohn Levon 	}
1026bae7cfb4SJohn Levon 
1027fcca8ea6SJacek Kalwas 	subsystem = spdk_nvmf_tgt_find_subsystem(transport->tgt, data->subnqn);
1028fcca8ea6SJacek Kalwas 	if (!subsystem) {
1029fcca8ea6SJacek Kalwas 		SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn);
1030fcca8ea6SJacek Kalwas 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1031fcca8ea6SJacek Kalwas 	}
1032fcca8ea6SJacek Kalwas 
1033fcca8ea6SJacek Kalwas 	if ((subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE) ||
1034fcca8ea6SJacek Kalwas 	    (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSING) ||
1035fcca8ea6SJacek Kalwas 	    (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED) ||
1036fcca8ea6SJacek Kalwas 	    (subsystem->state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING)) {
1037ac3a42b1SJim Harris 		struct spdk_nvmf_subsystem_poll_group *sgroup;
1038ac3a42b1SJim Harris 
103912c640c0SJim Harris 		/* Subsystem is not ready to handle a connect. Decrement
104012c640c0SJim Harris 		 * the mgmt_io_outstanding to avoid the subsystem waiting
104112c640c0SJim Harris 		 * for this command to complete before unpausing. Queued
104212c640c0SJim Harris 		 * requests get retried when subsystem resumes.
1043ac3a42b1SJim Harris 		 */
1044ac3a42b1SJim Harris 		sgroup = nvmf_subsystem_pg_from_connect_cmd(req);
1045bd4aec1cSGangCao 		assert(sgroup != NULL);
1046ac3a42b1SJim Harris 		sgroup->mgmt_io_outstanding--;
104712c640c0SJim Harris 		TAILQ_REMOVE(&req->qpair->outstanding, req, link);
104812c640c0SJim Harris 		TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
1049ac3a42b1SJim Harris 		SPDK_DEBUGLOG(nvmf, "Subsystem '%s' is not ready for connect, retrying...\n", subsystem->subnqn);
1050ac3a42b1SJim Harris 		return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
1051ac3a42b1SJim Harris 	}
1052ac3a42b1SJim Harris 
1053fcca8ea6SJacek Kalwas 	/* Ensure that hostnqn is null terminated */
1054fcca8ea6SJacek Kalwas 	if (!memchr(data->hostnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) {
1055fcca8ea6SJacek Kalwas 		SPDK_ERRLOG("Connect HOSTNQN is not null terminated\n");
1056fcca8ea6SJacek Kalwas 		SPDK_NVMF_INVALID_CONNECT_DATA(rsp, hostnqn);
1057fcca8ea6SJacek Kalwas 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1058fcca8ea6SJacek Kalwas 	}
1059fcca8ea6SJacek Kalwas 
1060198fd2ceSSeth Howell 	if (!nvmf_qpair_access_allowed(req->qpair, subsystem, data->hostnqn)) {
1061fcca8ea6SJacek Kalwas 		rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
1062fcca8ea6SJacek Kalwas 		rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_HOST;
1063fcca8ea6SJacek Kalwas 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1064fcca8ea6SJacek Kalwas 	}
1065fcca8ea6SJacek Kalwas 
1066198fd2ceSSeth Howell 	return _nvmf_ctrlr_connect(req);
1067fcca8ea6SJacek Kalwas }
1068fcca8ea6SJacek Kalwas 
106971cd42e1SJacek Kalwas static int
107071cd42e1SJacek Kalwas nvmf_ctrlr_association_remove(void *ctx)
107171cd42e1SJacek Kalwas {
107271cd42e1SJacek Kalwas 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
107371cd42e1SJacek Kalwas 	int rc;
107471cd42e1SJacek Kalwas 
107597385af1SAlexey Marchuk 	nvmf_ctrlr_stop_association_timer(ctrlr);
107697385af1SAlexey Marchuk 
107797385af1SAlexey Marchuk 	if (ctrlr->in_destruct) {
107897385af1SAlexey Marchuk 		return SPDK_POLLER_IDLE;
107997385af1SAlexey Marchuk 	}
10802172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Disconnecting host from subsystem %s due to association timeout.\n",
108171cd42e1SJacek Kalwas 		      ctrlr->subsys->subnqn);
108271cd42e1SJacek Kalwas 
1083478f6524SAlexey Marchuk 	if (ctrlr->admin_qpair) {
1084608b54a2SKonrad Sztyber 		rc = spdk_nvmf_qpair_disconnect(ctrlr->admin_qpair);
1085d478b20dSAlexey Marchuk 		if (rc < 0 && rc != -EINPROGRESS) {
108671cd42e1SJacek Kalwas 			SPDK_ERRLOG("Fail to disconnect admin ctrlr qpair\n");
108771cd42e1SJacek Kalwas 			assert(false);
108871cd42e1SJacek Kalwas 		}
1089478f6524SAlexey Marchuk 	}
109071cd42e1SJacek Kalwas 
10914ec7d1efSJohn Levon 	return SPDK_POLLER_BUSY;
109271cd42e1SJacek Kalwas }
109371cd42e1SJacek Kalwas 
1094534e2e7cSChangpeng Liu static int
1095534e2e7cSChangpeng Liu _nvmf_ctrlr_cc_reset_shn_done(void *ctx)
1096aedcec8aSJacek Kalwas {
1097534e2e7cSChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
1098f63c0899SChangpeng Liu 	uint64_t now = spdk_get_ticks();
1099534e2e7cSChangpeng Liu 	uint32_t count;
1100aedcec8aSJacek Kalwas 
1101534e2e7cSChangpeng Liu 	if (ctrlr->cc_timer) {
1102534e2e7cSChangpeng Liu 		spdk_poller_unregister(&ctrlr->cc_timer);
1103534e2e7cSChangpeng Liu 	}
1104534e2e7cSChangpeng Liu 
1105534e2e7cSChangpeng Liu 	count = spdk_bit_array_count_set(ctrlr->qpair_mask);
1106534e2e7cSChangpeng Liu 	SPDK_DEBUGLOG(nvmf, "ctrlr %p active queue count %u\n", ctrlr, count);
1107534e2e7cSChangpeng Liu 
1108534e2e7cSChangpeng Liu 	if (count > 1) {
1109f63c0899SChangpeng Liu 		if (now < ctrlr->cc_timeout_tsc) {
1110f63c0899SChangpeng Liu 			/* restart cc timer */
1111534e2e7cSChangpeng Liu 			ctrlr->cc_timer = SPDK_POLLER_REGISTER(_nvmf_ctrlr_cc_reset_shn_done, ctrlr, 100 * 1000);
1112534e2e7cSChangpeng Liu 			return SPDK_POLLER_IDLE;
1113f63c0899SChangpeng Liu 		} else {
1114f63c0899SChangpeng Liu 			/* controller fatal status */
1115f63c0899SChangpeng Liu 			SPDK_WARNLOG("IO timeout, ctrlr %p is in fatal status\n", ctrlr);
1116a19e5b4dSChangpeng Liu 			nvmf_ctrlr_set_fatal_status(ctrlr);
1117aedcec8aSJacek Kalwas 		}
1118f63c0899SChangpeng Liu 	}
1119f63c0899SChangpeng Liu 
1120f63c0899SChangpeng Liu 	spdk_poller_unregister(&ctrlr->cc_timeout_timer);
1121aedcec8aSJacek Kalwas 
112219aa3040SChangpeng Liu 	if (ctrlr->disconnect_is_shn) {
1123aedcec8aSJacek Kalwas 		ctrlr->vcprop.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
112419aa3040SChangpeng Liu 		ctrlr->disconnect_is_shn = false;
112519aa3040SChangpeng Liu 	} else {
1126ae86baebSSeth Howell 		/* Only a subset of the registers are cleared out on a reset */
1127ae86baebSSeth Howell 		ctrlr->vcprop.cc.raw = 0;
1128ae86baebSSeth Howell 		ctrlr->vcprop.csts.raw = 0;
112919aa3040SChangpeng Liu 	}
1130ae86baebSSeth Howell 
113147ce1fe3SJacek Kalwas 	/* After CC.EN transitions to 0 (due to shutdown or reset), the association
113247ce1fe3SJacek Kalwas 	 * between the host and controller shall be preserved for at least 2 minutes */
113344090079SMichael Haeuptle 	if (ctrlr->association_timer) {
113444090079SMichael Haeuptle 		SPDK_DEBUGLOG(nvmf, "Association timer already set\n");
113544090079SMichael Haeuptle 		nvmf_ctrlr_stop_association_timer(ctrlr);
113644090079SMichael Haeuptle 	}
11374fa3d991SChangpeng Liu 	if (ctrlr->association_timeout) {
113847ce1fe3SJacek Kalwas 		ctrlr->association_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_association_remove, ctrlr,
11393f2d21a0SJim Harris 					   ctrlr->association_timeout * 1000);
11404fa3d991SChangpeng Liu 	}
11418689f6a3SJim Harris 	ctrlr->disconnect_in_progress = false;
1142534e2e7cSChangpeng Liu 	return SPDK_POLLER_BUSY;
1143534e2e7cSChangpeng Liu }
1144534e2e7cSChangpeng Liu 
1145534e2e7cSChangpeng Liu static void
1146534e2e7cSChangpeng Liu nvmf_ctrlr_cc_reset_shn_done(struct spdk_io_channel_iter *i, int status)
1147534e2e7cSChangpeng Liu {
1148534e2e7cSChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = spdk_io_channel_iter_get_ctx(i);
1149534e2e7cSChangpeng Liu 
1150534e2e7cSChangpeng Liu 	if (status < 0) {
1151534e2e7cSChangpeng Liu 		SPDK_ERRLOG("Fail to disconnect io ctrlr qpairs\n");
1152534e2e7cSChangpeng Liu 		assert(false);
1153534e2e7cSChangpeng Liu 	}
1154534e2e7cSChangpeng Liu 
1155534e2e7cSChangpeng Liu 	_nvmf_ctrlr_cc_reset_shn_done((void *)ctrlr);
1156ae86baebSSeth Howell }
1157ae86baebSSeth Howell 
1158f63c0899SChangpeng Liu static void
1159f63c0899SChangpeng Liu nvmf_bdev_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1160f63c0899SChangpeng Liu {
1161f63c0899SChangpeng Liu 	SPDK_NOTICELOG("Resetting bdev done with %s\n", success ? "success" : "failure");
1162f63c0899SChangpeng Liu 
1163f63c0899SChangpeng Liu 	spdk_bdev_free_io(bdev_io);
1164f63c0899SChangpeng Liu }
1165f63c0899SChangpeng Liu 
1166f63c0899SChangpeng Liu 
1167f63c0899SChangpeng Liu static int
1168f63c0899SChangpeng Liu nvmf_ctrlr_cc_timeout(void *ctx)
1169f63c0899SChangpeng Liu {
1170f63c0899SChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
1171cc2f6634SZiv Hirsch 	struct spdk_nvmf_poll_group *group;
1172f63c0899SChangpeng Liu 	struct spdk_nvmf_ns *ns;
1173f63c0899SChangpeng Liu 	struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
1174f63c0899SChangpeng Liu 
1175f63c0899SChangpeng Liu 	spdk_poller_unregister(&ctrlr->cc_timeout_timer);
1176f63c0899SChangpeng Liu 	SPDK_DEBUGLOG(nvmf, "Ctrlr %p reset or shutdown timeout\n", ctrlr);
1177f63c0899SChangpeng Liu 
1178cc2f6634SZiv Hirsch 	if (!ctrlr->admin_qpair) {
1179cc2f6634SZiv Hirsch 		SPDK_NOTICELOG("Ctrlr %p admin qpair disconnected\n", ctrlr);
1180cc2f6634SZiv Hirsch 		return SPDK_POLLER_IDLE;
1181cc2f6634SZiv Hirsch 	}
1182cc2f6634SZiv Hirsch 
1183cc2f6634SZiv Hirsch 	group = ctrlr->admin_qpair->group;
1184cc2f6634SZiv Hirsch 	assert(group != NULL && group->sgroups != NULL);
1185cc2f6634SZiv Hirsch 
1186f63c0899SChangpeng Liu 	for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL;
1187f63c0899SChangpeng Liu 	     ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) {
1188f63c0899SChangpeng Liu 		if (ns->bdev == NULL) {
1189f63c0899SChangpeng Liu 			continue;
1190f63c0899SChangpeng Liu 		}
1191f63c0899SChangpeng Liu 		ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[ns->opts.nsid - 1];
1192f63c0899SChangpeng Liu 		SPDK_NOTICELOG("Ctrlr %p resetting NSID %u\n", ctrlr, ns->opts.nsid);
1193f63c0899SChangpeng Liu 		spdk_bdev_reset(ns->desc, ns_info->channel, nvmf_bdev_complete_reset, NULL);
1194f63c0899SChangpeng Liu 	}
1195f63c0899SChangpeng Liu 
1196f63c0899SChangpeng Liu 	return SPDK_POLLER_BUSY;
1197f63c0899SChangpeng Liu }
1198f63c0899SChangpeng Liu 
119968c2244cSBen Walker const struct spdk_nvmf_registers *
120068c2244cSBen Walker spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr)
120168c2244cSBen Walker {
120268c2244cSBen Walker 	return &ctrlr->vcprop;
120368c2244cSBen Walker }
120468c2244cSBen Walker 
1205a19e5b4dSChangpeng Liu void
1206a19e5b4dSChangpeng Liu nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr)
1207a19e5b4dSChangpeng Liu {
1208a19e5b4dSChangpeng Liu 	ctrlr->vcprop.csts.bits.cfs = 1;
1209a19e5b4dSChangpeng Liu }
1210a19e5b4dSChangpeng Liu 
121103788f93SBen Walker static uint64_t
121203788f93SBen Walker nvmf_prop_get_cap(struct spdk_nvmf_ctrlr *ctrlr)
121303788f93SBen Walker {
121403788f93SBen Walker 	return ctrlr->vcprop.cap.raw;
121503788f93SBen Walker }
121603788f93SBen Walker 
121703788f93SBen Walker static uint64_t
121803788f93SBen Walker nvmf_prop_get_vs(struct spdk_nvmf_ctrlr *ctrlr)
121903788f93SBen Walker {
122003788f93SBen Walker 	return ctrlr->vcprop.vs.raw;
122103788f93SBen Walker }
122203788f93SBen Walker 
122303788f93SBen Walker static uint64_t
122403788f93SBen Walker nvmf_prop_get_cc(struct spdk_nvmf_ctrlr *ctrlr)
122503788f93SBen Walker {
122603788f93SBen Walker 	return ctrlr->vcprop.cc.raw;
122703788f93SBen Walker }
122803788f93SBen Walker 
122903788f93SBen Walker static bool
1230e8c195b3SBen Walker nvmf_prop_set_cc(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
123103788f93SBen Walker {
123203788f93SBen Walker 	union spdk_nvme_cc_register cc, diff;
1233f63c0899SChangpeng Liu 	uint32_t cc_timeout_ms;
123403788f93SBen Walker 
1235e8c195b3SBen Walker 	cc.raw = value;
123603788f93SBen Walker 
12372172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "cur CC: 0x%08x\n", ctrlr->vcprop.cc.raw);
12382172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "new CC: 0x%08x\n", cc.raw);
123903788f93SBen Walker 
124003788f93SBen Walker 	/*
124103788f93SBen Walker 	 * Calculate which bits changed between the current and new CC.
124203788f93SBen Walker 	 * Mark each bit as 0 once it is handled to determine if any unhandled bits were changed.
124303788f93SBen Walker 	 */
124403788f93SBen Walker 	diff.raw = cc.raw ^ ctrlr->vcprop.cc.raw;
124503788f93SBen Walker 
124603788f93SBen Walker 	if (diff.bits.en) {
124703788f93SBen Walker 		if (cc.bits.en) {
12482172c432STomasz Zawadzki 			SPDK_DEBUGLOG(nvmf, "Property Set CC Enable!\n");
124971cd42e1SJacek Kalwas 			nvmf_ctrlr_stop_association_timer(ctrlr);
125071cd42e1SJacek Kalwas 
125103788f93SBen Walker 			ctrlr->vcprop.cc.bits.en = 1;
125203788f93SBen Walker 			ctrlr->vcprop.csts.bits.rdy = 1;
125303788f93SBen Walker 		} else {
12542172c432STomasz Zawadzki 			SPDK_DEBUGLOG(nvmf, "Property Set CC Disable!\n");
125519aa3040SChangpeng Liu 			if (ctrlr->disconnect_in_progress) {
125619aa3040SChangpeng Liu 				SPDK_DEBUGLOG(nvmf, "Disconnect in progress\n");
125719aa3040SChangpeng Liu 				return true;
125819aa3040SChangpeng Liu 			}
125919aa3040SChangpeng Liu 
1260f63c0899SChangpeng Liu 			ctrlr->cc_timeout_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_cc_timeout, ctrlr,
1261f63c0899SChangpeng Liu 						  NVMF_CC_RESET_SHN_TIMEOUT_IN_MS * 1000);
1262f63c0899SChangpeng Liu 			/* Make sure cc_timeout_ms is between cc_timeout_timer and Host reset/shutdown timeout */
1263f63c0899SChangpeng Liu 			cc_timeout_ms = (NVMF_CC_RESET_SHN_TIMEOUT_IN_MS + NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS) / 2;
1264f63c0899SChangpeng Liu 			ctrlr->cc_timeout_tsc = spdk_get_ticks() + cc_timeout_ms * spdk_get_ticks_hz() / (uint64_t)1000;
1265f63c0899SChangpeng Liu 
1266ae86baebSSeth Howell 			ctrlr->vcprop.cc.bits.en = 0;
12678689f6a3SJim Harris 			ctrlr->disconnect_in_progress = true;
126819aa3040SChangpeng Liu 			ctrlr->disconnect_is_shn = false;
1269ae86baebSSeth Howell 			spdk_for_each_channel(ctrlr->subsys->tgt,
1270198fd2ceSSeth Howell 					      nvmf_ctrlr_disconnect_io_qpairs_on_pg,
1271ae86baebSSeth Howell 					      ctrlr,
127219aa3040SChangpeng Liu 					      nvmf_ctrlr_cc_reset_shn_done);
127303788f93SBen Walker 		}
127403788f93SBen Walker 		diff.bits.en = 0;
127503788f93SBen Walker 	}
127603788f93SBen Walker 
127703788f93SBen Walker 	if (diff.bits.shn) {
127803788f93SBen Walker 		if (cc.bits.shn == SPDK_NVME_SHN_NORMAL ||
127903788f93SBen Walker 		    cc.bits.shn == SPDK_NVME_SHN_ABRUPT) {
12802172c432STomasz Zawadzki 			SPDK_DEBUGLOG(nvmf, "Property Set CC Shutdown %u%ub!\n",
128103788f93SBen Walker 				      cc.bits.shn >> 1, cc.bits.shn & 1);
128219aa3040SChangpeng Liu 			if (ctrlr->disconnect_in_progress) {
128319aa3040SChangpeng Liu 				SPDK_DEBUGLOG(nvmf, "Disconnect in progress\n");
128419aa3040SChangpeng Liu 				return true;
128519aa3040SChangpeng Liu 			}
128619aa3040SChangpeng Liu 
1287f63c0899SChangpeng Liu 			ctrlr->cc_timeout_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_cc_timeout, ctrlr,
1288f63c0899SChangpeng Liu 						  NVMF_CC_RESET_SHN_TIMEOUT_IN_MS * 1000);
1289f63c0899SChangpeng Liu 			/* Make sure cc_timeout_ms is between cc_timeout_timer and Host reset/shutdown timeout */
1290f63c0899SChangpeng Liu 			cc_timeout_ms = (NVMF_CC_RESET_SHN_TIMEOUT_IN_MS + NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS) / 2;
1291f63c0899SChangpeng Liu 			ctrlr->cc_timeout_tsc = spdk_get_ticks() + cc_timeout_ms * spdk_get_ticks_hz() / (uint64_t)1000;
1292f63c0899SChangpeng Liu 
129303788f93SBen Walker 			ctrlr->vcprop.cc.bits.shn = cc.bits.shn;
12948689f6a3SJim Harris 			ctrlr->disconnect_in_progress = true;
129519aa3040SChangpeng Liu 			ctrlr->disconnect_is_shn = true;
1296aedcec8aSJacek Kalwas 			spdk_for_each_channel(ctrlr->subsys->tgt,
1297aedcec8aSJacek Kalwas 					      nvmf_ctrlr_disconnect_io_qpairs_on_pg,
1298aedcec8aSJacek Kalwas 					      ctrlr,
129919aa3040SChangpeng Liu 					      nvmf_ctrlr_cc_reset_shn_done);
1300c322453cSJacek Kalwas 
1301c322453cSJacek Kalwas 			/* From the time a shutdown is initiated the controller shall disable
1302c322453cSJacek Kalwas 			 * Keep Alive timer */
1303c322453cSJacek Kalwas 			nvmf_ctrlr_stop_keep_alive_timer(ctrlr);
130403788f93SBen Walker 		} else if (cc.bits.shn == 0) {
130503788f93SBen Walker 			ctrlr->vcprop.cc.bits.shn = 0;
130603788f93SBen Walker 		} else {
130703788f93SBen Walker 			SPDK_ERRLOG("Prop Set CC: Invalid SHN value %u%ub\n",
130803788f93SBen Walker 				    cc.bits.shn >> 1, cc.bits.shn & 1);
130903788f93SBen Walker 			return false;
131003788f93SBen Walker 		}
131103788f93SBen Walker 		diff.bits.shn = 0;
131203788f93SBen Walker 	}
131303788f93SBen Walker 
131403788f93SBen Walker 	if (diff.bits.iosqes) {
13152172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "Prop Set IOSQES = %u (%u bytes)\n",
131603788f93SBen Walker 			      cc.bits.iosqes, 1u << cc.bits.iosqes);
131703788f93SBen Walker 		ctrlr->vcprop.cc.bits.iosqes = cc.bits.iosqes;
131803788f93SBen Walker 		diff.bits.iosqes = 0;
131903788f93SBen Walker 	}
132003788f93SBen Walker 
132103788f93SBen Walker 	if (diff.bits.iocqes) {
13222172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "Prop Set IOCQES = %u (%u bytes)\n",
132303788f93SBen Walker 			      cc.bits.iocqes, 1u << cc.bits.iocqes);
132403788f93SBen Walker 		ctrlr->vcprop.cc.bits.iocqes = cc.bits.iocqes;
132503788f93SBen Walker 		diff.bits.iocqes = 0;
132603788f93SBen Walker 	}
132703788f93SBen Walker 
1328a71e30deSJacek Kalwas 	if (diff.bits.ams) {
1329a71e30deSJacek Kalwas 		SPDK_ERRLOG("Arbitration Mechanism Selected (AMS) 0x%x not supported!\n", cc.bits.ams);
1330a71e30deSJacek Kalwas 		return false;
1331a71e30deSJacek Kalwas 	}
1332a71e30deSJacek Kalwas 
1333a71e30deSJacek Kalwas 	if (diff.bits.mps) {
1334a71e30deSJacek Kalwas 		SPDK_ERRLOG("Memory Page Size (MPS) %u KiB not supported!\n", (1 << (2 + cc.bits.mps)));
1335a71e30deSJacek Kalwas 		return false;
1336a71e30deSJacek Kalwas 	}
1337a71e30deSJacek Kalwas 
1338a71e30deSJacek Kalwas 	if (diff.bits.css) {
1339a36785dfSDennis Maisenbacher 		if (cc.bits.css > SPDK_NVME_CC_CSS_IOCS) {
1340a71e30deSJacek Kalwas 			SPDK_ERRLOG("I/O Command Set Selected (CSS) 0x%x not supported!\n", cc.bits.css);
1341a71e30deSJacek Kalwas 			return false;
1342a71e30deSJacek Kalwas 		}
1343a36785dfSDennis Maisenbacher 		diff.bits.css = 0;
1344a36785dfSDennis Maisenbacher 	}
1345a71e30deSJacek Kalwas 
134603788f93SBen Walker 	if (diff.raw != 0) {
13472ec88c4fSBen Walker 		/* Print an error message, but don't fail the command in this case.
13482ec88c4fSBen Walker 		 * If we did want to fail in this case, we'd need to ensure we acted
13492ec88c4fSBen Walker 		 * on no other bits or the initiator gets confused. */
135003788f93SBen Walker 		SPDK_ERRLOG("Prop Set CC toggled reserved bits 0x%x!\n", diff.raw);
135103788f93SBen Walker 	}
135203788f93SBen Walker 
135303788f93SBen Walker 	return true;
135403788f93SBen Walker }
135503788f93SBen Walker 
135603788f93SBen Walker static uint64_t
135703788f93SBen Walker nvmf_prop_get_csts(struct spdk_nvmf_ctrlr *ctrlr)
135803788f93SBen Walker {
135903788f93SBen Walker 	return ctrlr->vcprop.csts.raw;
136003788f93SBen Walker }
136103788f93SBen Walker 
136284479ab6SBen Walker static uint64_t
136384479ab6SBen Walker nvmf_prop_get_aqa(struct spdk_nvmf_ctrlr *ctrlr)
136484479ab6SBen Walker {
136584479ab6SBen Walker 	return ctrlr->vcprop.aqa.raw;
136684479ab6SBen Walker }
136784479ab6SBen Walker 
136884479ab6SBen Walker static bool
1369e8c195b3SBen Walker nvmf_prop_set_aqa(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
137084479ab6SBen Walker {
1371e8c195b3SBen Walker 	union spdk_nvme_aqa_register aqa;
1372e8c195b3SBen Walker 
1373e8c195b3SBen Walker 	aqa.raw = value;
1374e8c195b3SBen Walker 
137548408177SJohn Levon 	/*
137648408177SJohn Levon 	 * We don't need to explicitly check for maximum size, as the fields are
137748408177SJohn Levon 	 * limited to 12 bits (4096).
137848408177SJohn Levon 	 */
1379e5bf2a19SJacek Kalwas 	if (aqa.bits.asqs < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES - 1 ||
138092f29271SJacek Kalwas 	    aqa.bits.acqs < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES - 1 ||
138192f29271SJacek Kalwas 	    aqa.bits.reserved1 != 0 || aqa.bits.reserved2 != 0) {
1382e8c195b3SBen Walker 		return false;
1383e8c195b3SBen Walker 	}
1384e8c195b3SBen Walker 
1385e8c195b3SBen Walker 	ctrlr->vcprop.aqa.raw = value;
138684479ab6SBen Walker 
138784479ab6SBen Walker 	return true;
138884479ab6SBen Walker }
138984479ab6SBen Walker 
1390516afb9aSBen Walker static uint64_t
1391516afb9aSBen Walker nvmf_prop_get_asq(struct spdk_nvmf_ctrlr *ctrlr)
1392516afb9aSBen Walker {
1393516afb9aSBen Walker 	return ctrlr->vcprop.asq;
1394516afb9aSBen Walker }
1395516afb9aSBen Walker 
1396516afb9aSBen Walker static bool
1397e8c195b3SBen Walker nvmf_prop_set_asq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1398516afb9aSBen Walker {
1399e8c195b3SBen Walker 	ctrlr->vcprop.asq = (ctrlr->vcprop.asq & (0xFFFFFFFFULL << 32ULL)) | value;
1400e8c195b3SBen Walker 
1401e8c195b3SBen Walker 	return true;
1402e8c195b3SBen Walker }
1403e8c195b3SBen Walker 
1404e8c195b3SBen Walker static bool
1405e8c195b3SBen Walker nvmf_prop_set_asq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1406e8c195b3SBen Walker {
1407e8c195b3SBen Walker 	ctrlr->vcprop.asq = (ctrlr->vcprop.asq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL);
1408516afb9aSBen Walker 
1409516afb9aSBen Walker 	return true;
1410516afb9aSBen Walker }
1411516afb9aSBen Walker 
1412516afb9aSBen Walker static uint64_t
1413516afb9aSBen Walker nvmf_prop_get_acq(struct spdk_nvmf_ctrlr *ctrlr)
1414516afb9aSBen Walker {
1415516afb9aSBen Walker 	return ctrlr->vcprop.acq;
1416516afb9aSBen Walker }
1417516afb9aSBen Walker 
1418516afb9aSBen Walker static bool
1419e8c195b3SBen Walker nvmf_prop_set_acq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1420516afb9aSBen Walker {
1421e8c195b3SBen Walker 	ctrlr->vcprop.acq = (ctrlr->vcprop.acq & (0xFFFFFFFFULL << 32ULL)) | value;
1422e8c195b3SBen Walker 
1423e8c195b3SBen Walker 	return true;
1424e8c195b3SBen Walker }
1425e8c195b3SBen Walker 
1426e8c195b3SBen Walker static bool
1427e8c195b3SBen Walker nvmf_prop_set_acq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1428e8c195b3SBen Walker {
1429e8c195b3SBen Walker 	ctrlr->vcprop.acq = (ctrlr->vcprop.acq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL);
1430516afb9aSBen Walker 
1431516afb9aSBen Walker 	return true;
1432516afb9aSBen Walker }
1433516afb9aSBen Walker 
143403788f93SBen Walker struct nvmf_prop {
143503788f93SBen Walker 	uint32_t ofst;
143603788f93SBen Walker 	uint8_t size;
143703788f93SBen Walker 	char name[11];
143803788f93SBen Walker 	uint64_t (*get_cb)(struct spdk_nvmf_ctrlr *ctrlr);
1439e8c195b3SBen Walker 	bool (*set_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value);
1440e8c195b3SBen Walker 	bool (*set_upper_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value);
144103788f93SBen Walker };
144203788f93SBen Walker 
1443e8c195b3SBen Walker #define PROP(field, size, get_cb, set_cb, set_upper_cb) \
144403788f93SBen Walker 	{ \
144503788f93SBen Walker 		offsetof(struct spdk_nvme_registers, field), \
1446e8c195b3SBen Walker 		size, \
144703788f93SBen Walker 		#field, \
1448e8c195b3SBen Walker 		get_cb, set_cb, set_upper_cb \
144903788f93SBen Walker 	}
145003788f93SBen Walker 
145103788f93SBen Walker static const struct nvmf_prop nvmf_props[] = {
1452e8c195b3SBen Walker 	PROP(cap,  8, nvmf_prop_get_cap,  NULL,                    NULL),
1453e8c195b3SBen Walker 	PROP(vs,   4, nvmf_prop_get_vs,   NULL,                    NULL),
1454e8c195b3SBen Walker 	PROP(cc,   4, nvmf_prop_get_cc,   nvmf_prop_set_cc,        NULL),
1455e8c195b3SBen Walker 	PROP(csts, 4, nvmf_prop_get_csts, NULL,                    NULL),
1456e8c195b3SBen Walker 	PROP(aqa,  4, nvmf_prop_get_aqa,  nvmf_prop_set_aqa,       NULL),
1457e8c195b3SBen Walker 	PROP(asq,  8, nvmf_prop_get_asq,  nvmf_prop_set_asq_lower, nvmf_prop_set_asq_upper),
1458e8c195b3SBen Walker 	PROP(acq,  8, nvmf_prop_get_acq,  nvmf_prop_set_acq_lower, nvmf_prop_set_acq_upper),
145903788f93SBen Walker };
146003788f93SBen Walker 
146103788f93SBen Walker static const struct nvmf_prop *
1462e8c195b3SBen Walker find_prop(uint32_t ofst, uint8_t size)
146303788f93SBen Walker {
146403788f93SBen Walker 	size_t i;
146503788f93SBen Walker 
146603788f93SBen Walker 	for (i = 0; i < SPDK_COUNTOF(nvmf_props); i++) {
146703788f93SBen Walker 		const struct nvmf_prop *prop = &nvmf_props[i];
146803788f93SBen Walker 
1469e8c195b3SBen Walker 		if ((ofst >= prop->ofst) && (ofst + size <= prop->ofst + prop->size)) {
147003788f93SBen Walker 			return prop;
147103788f93SBen Walker 		}
147203788f93SBen Walker 	}
147303788f93SBen Walker 
147403788f93SBen Walker 	return NULL;
147503788f93SBen Walker }
147603788f93SBen Walker 
14775323a026SDaniel Verkamp static int
1478198fd2ceSSeth Howell nvmf_property_get(struct spdk_nvmf_request *req)
147903788f93SBen Walker {
14805323a026SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
14815323a026SDaniel Verkamp 	struct spdk_nvmf_fabric_prop_get_cmd *cmd = &req->cmd->prop_get_cmd;
14825323a026SDaniel Verkamp 	struct spdk_nvmf_fabric_prop_get_rsp *response = &req->rsp->prop_get_rsp;
148303788f93SBen Walker 	const struct nvmf_prop *prop;
1484e8c195b3SBen Walker 	uint8_t size;
148503788f93SBen Walker 
148603788f93SBen Walker 	response->status.sc = 0;
148703788f93SBen Walker 	response->value.u64 = 0;
148803788f93SBen Walker 
14892172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "size %d, offset 0x%x\n",
149003788f93SBen Walker 		      cmd->attrib.size, cmd->ofst);
149103788f93SBen Walker 
1492e8c195b3SBen Walker 	switch (cmd->attrib.size) {
1493e8c195b3SBen Walker 	case SPDK_NVMF_PROP_SIZE_4:
1494e8c195b3SBen Walker 		size = 4;
1495e8c195b3SBen Walker 		break;
1496e8c195b3SBen Walker 	case SPDK_NVMF_PROP_SIZE_8:
1497e8c195b3SBen Walker 		size = 8;
1498e8c195b3SBen Walker 		break;
1499e8c195b3SBen Walker 	default:
15009cb78e1aSJim Harris 		SPDK_DEBUGLOG(nvmf, "Invalid size value %d\n", cmd->attrib.size);
1501f519fbbcSDaniel Verkamp 		response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
150203788f93SBen Walker 		response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
15035323a026SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
150403788f93SBen Walker 	}
150503788f93SBen Walker 
1506e8c195b3SBen Walker 	prop = find_prop(cmd->ofst, size);
150703788f93SBen Walker 	if (prop == NULL || prop->get_cb == NULL) {
1508f519fbbcSDaniel Verkamp 		response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
150903788f93SBen Walker 		response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
15105323a026SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
151103788f93SBen Walker 	}
151203788f93SBen Walker 
15132172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "name: %s\n", prop->name);
1514e8c195b3SBen Walker 
151503788f93SBen Walker 	response->value.u64 = prop->get_cb(ctrlr);
1516e8c195b3SBen Walker 
1517e8c195b3SBen Walker 	if (size != prop->size) {
1518e8c195b3SBen Walker 		/* The size must be 4 and the prop->size is 8. Figure out which part of the property to read. */
1519e8c195b3SBen Walker 		assert(size == 4);
1520e8c195b3SBen Walker 		assert(prop->size == 8);
1521e8c195b3SBen Walker 
1522e8c195b3SBen Walker 		if (cmd->ofst == prop->ofst) {
1523e8c195b3SBen Walker 			/* Keep bottom 4 bytes only */
1524e8c195b3SBen Walker 			response->value.u64 &= 0xFFFFFFFF;
1525e8c195b3SBen Walker 		} else {
1526e8c195b3SBen Walker 			/* Keep top 4 bytes only */
1527e8c195b3SBen Walker 			response->value.u64 >>= 32;
1528e8c195b3SBen Walker 		}
1529e8c195b3SBen Walker 	}
1530e8c195b3SBen Walker 
153110c7d133SJim Harris 	SPDK_DEBUGLOG(nvmf, "response value: 0x%" PRIx64 "\n", response->value.u64);
153210c7d133SJim Harris 
15335323a026SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
153403788f93SBen Walker }
153503788f93SBen Walker 
15365323a026SDaniel Verkamp static int
1537198fd2ceSSeth Howell nvmf_property_set(struct spdk_nvmf_request *req)
153803788f93SBen Walker {
15395323a026SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
15405323a026SDaniel Verkamp 	struct spdk_nvmf_fabric_prop_set_cmd *cmd = &req->cmd->prop_set_cmd;
15415323a026SDaniel Verkamp 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
154203788f93SBen Walker 	const struct nvmf_prop *prop;
154303788f93SBen Walker 	uint64_t value;
1544e8c195b3SBen Walker 	uint8_t size;
1545e8c195b3SBen Walker 	bool ret;
154603788f93SBen Walker 
15472172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "size %d, offset 0x%x, value 0x%" PRIx64 "\n",
154803788f93SBen Walker 		      cmd->attrib.size, cmd->ofst, cmd->value.u64);
154903788f93SBen Walker 
1550e8c195b3SBen Walker 	switch (cmd->attrib.size) {
1551e8c195b3SBen Walker 	case SPDK_NVMF_PROP_SIZE_4:
1552e8c195b3SBen Walker 		size = 4;
1553e8c195b3SBen Walker 		break;
1554e8c195b3SBen Walker 	case SPDK_NVMF_PROP_SIZE_8:
1555e8c195b3SBen Walker 		size = 8;
1556e8c195b3SBen Walker 		break;
1557e8c195b3SBen Walker 	default:
15589cb78e1aSJim Harris 		SPDK_DEBUGLOG(nvmf, "Invalid size value %d\n", cmd->attrib.size);
1559e8c195b3SBen Walker 		response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
1560e8c195b3SBen Walker 		response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
1561e8c195b3SBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1562e8c195b3SBen Walker 	}
1563e8c195b3SBen Walker 
1564e8c195b3SBen Walker 	prop = find_prop(cmd->ofst, size);
156503788f93SBen Walker 	if (prop == NULL || prop->set_cb == NULL) {
1566814cd258SChangpeng Liu 		SPDK_INFOLOG(nvmf, "Invalid offset 0x%x\n", cmd->ofst);
1567f519fbbcSDaniel Verkamp 		response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
156803788f93SBen Walker 		response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
15695323a026SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
157003788f93SBen Walker 	}
157103788f93SBen Walker 
15722172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "name: %s\n", prop->name);
157303788f93SBen Walker 
157403788f93SBen Walker 	value = cmd->value.u64;
1575e8c195b3SBen Walker 
1576e8c195b3SBen Walker 	if (prop->size == 4) {
1577e8c195b3SBen Walker 		ret = prop->set_cb(ctrlr, (uint32_t)value);
1578e8c195b3SBen Walker 	} else if (size != prop->size) {
1579e8c195b3SBen Walker 		/* The size must be 4 and the prop->size is 8. Figure out which part of the property to write. */
1580e8c195b3SBen Walker 		assert(size == 4);
1581e8c195b3SBen Walker 		assert(prop->size == 8);
1582e8c195b3SBen Walker 
1583e8c195b3SBen Walker 		if (cmd->ofst == prop->ofst) {
1584e8c195b3SBen Walker 			ret = prop->set_cb(ctrlr, (uint32_t)value);
1585e8c195b3SBen Walker 		} else {
1586e8c195b3SBen Walker 			ret = prop->set_upper_cb(ctrlr, (uint32_t)value);
1587e8c195b3SBen Walker 		}
1588e8c195b3SBen Walker 	} else {
1589e8c195b3SBen Walker 		ret = prop->set_cb(ctrlr, (uint32_t)value);
1590e8c195b3SBen Walker 		if (ret) {
1591e8c195b3SBen Walker 			ret = prop->set_upper_cb(ctrlr, (uint32_t)(value >> 32));
1592e8c195b3SBen Walker 		}
159303788f93SBen Walker 	}
159403788f93SBen Walker 
1595e8c195b3SBen Walker 	if (!ret) {
159603788f93SBen Walker 		SPDK_ERRLOG("prop set_cb failed\n");
1597f519fbbcSDaniel Verkamp 		response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
159803788f93SBen Walker 		response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
15995323a026SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
160003788f93SBen Walker 	}
16015323a026SDaniel Verkamp 
16025323a026SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
160303788f93SBen Walker }
160403788f93SBen Walker 
16055b4b66baSDaniel Verkamp static int
1606198fd2ceSSeth Howell nvmf_ctrlr_set_features_arbitration(struct spdk_nvmf_request *req)
1607d2e7daa4SDaniel Verkamp {
1608d2e7daa4SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1609d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1610d2e7daa4SDaniel Verkamp 
16112172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Arbitration (cdw11 = 0x%0x)\n", cmd->cdw11);
1612d2e7daa4SDaniel Verkamp 
1613d2e7daa4SDaniel Verkamp 	ctrlr->feat.arbitration.raw = cmd->cdw11;
1614d2e7daa4SDaniel Verkamp 	ctrlr->feat.arbitration.bits.reserved = 0;
1615d2e7daa4SDaniel Verkamp 
1616d2e7daa4SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1617d2e7daa4SDaniel Verkamp }
1618d2e7daa4SDaniel Verkamp 
1619d2e7daa4SDaniel Verkamp static int
1620198fd2ceSSeth Howell nvmf_ctrlr_set_features_power_management(struct spdk_nvmf_request *req)
1621d2e7daa4SDaniel Verkamp {
1622d2e7daa4SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1623d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1624d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1625d2e7daa4SDaniel Verkamp 
16262172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Power Management (cdw11 = 0x%0x)\n", cmd->cdw11);
1627d2e7daa4SDaniel Verkamp 
1628d2e7daa4SDaniel Verkamp 	/* Only PS = 0 is allowed, since we report NPSS = 0 */
16290c9057f0SChangpeng Liu 	if (cmd->cdw11_bits.feat_power_management.bits.ps != 0) {
16300c9057f0SChangpeng Liu 		SPDK_ERRLOG("Invalid power state %u\n", cmd->cdw11_bits.feat_power_management.bits.ps);
1631d2e7daa4SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1632d2e7daa4SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1633d2e7daa4SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1634d2e7daa4SDaniel Verkamp 	}
1635d2e7daa4SDaniel Verkamp 
1636d2e7daa4SDaniel Verkamp 	ctrlr->feat.power_management.raw = cmd->cdw11;
1637d2e7daa4SDaniel Verkamp 	ctrlr->feat.power_management.bits.reserved = 0;
1638d2e7daa4SDaniel Verkamp 
1639d2e7daa4SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1640d2e7daa4SDaniel Verkamp }
1641d2e7daa4SDaniel Verkamp 
1642d2e7daa4SDaniel Verkamp static bool
1643d2e7daa4SDaniel Verkamp temp_threshold_opts_valid(const union spdk_nvme_feat_temperature_threshold *opts)
1644d2e7daa4SDaniel Verkamp {
1645d2e7daa4SDaniel Verkamp 	/*
1646d2e7daa4SDaniel Verkamp 	 * Valid TMPSEL values:
1647d2e7daa4SDaniel Verkamp 	 *  0000b - 1000b: temperature sensors
1648d2e7daa4SDaniel Verkamp 	 *  1111b: set all implemented temperature sensors
1649d2e7daa4SDaniel Verkamp 	 */
1650d2e7daa4SDaniel Verkamp 	if (opts->bits.tmpsel >= 9 && opts->bits.tmpsel != 15) {
1651d2e7daa4SDaniel Verkamp 		/* 1001b - 1110b: reserved */
1652d2e7daa4SDaniel Verkamp 		SPDK_ERRLOG("Invalid TMPSEL %u\n", opts->bits.tmpsel);
1653d2e7daa4SDaniel Verkamp 		return false;
1654d2e7daa4SDaniel Verkamp 	}
1655d2e7daa4SDaniel Verkamp 
1656d2e7daa4SDaniel Verkamp 	/*
1657d2e7daa4SDaniel Verkamp 	 * Valid THSEL values:
1658d2e7daa4SDaniel Verkamp 	 *  00b: over temperature threshold
1659d2e7daa4SDaniel Verkamp 	 *  01b: under temperature threshold
1660d2e7daa4SDaniel Verkamp 	 */
1661d2e7daa4SDaniel Verkamp 	if (opts->bits.thsel > 1) {
1662d2e7daa4SDaniel Verkamp 		/* 10b - 11b: reserved */
1663d2e7daa4SDaniel Verkamp 		SPDK_ERRLOG("Invalid THSEL %u\n", opts->bits.thsel);
1664d2e7daa4SDaniel Verkamp 		return false;
1665d2e7daa4SDaniel Verkamp 	}
1666d2e7daa4SDaniel Verkamp 
1667d2e7daa4SDaniel Verkamp 	return true;
1668d2e7daa4SDaniel Verkamp }
1669d2e7daa4SDaniel Verkamp 
1670d2e7daa4SDaniel Verkamp static int
1671198fd2ceSSeth Howell nvmf_ctrlr_set_features_temperature_threshold(struct spdk_nvmf_request *req)
1672d2e7daa4SDaniel Verkamp {
1673d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1674d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1675d2e7daa4SDaniel Verkamp 
16762172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11);
1677d2e7daa4SDaniel Verkamp 
16780c9057f0SChangpeng Liu 	if (!temp_threshold_opts_valid(&cmd->cdw11_bits.feat_temp_threshold)) {
1679d2e7daa4SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1680d2e7daa4SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1681d2e7daa4SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1682d2e7daa4SDaniel Verkamp 	}
1683d2e7daa4SDaniel Verkamp 
1684d2e7daa4SDaniel Verkamp 	/* TODO: no sensors implemented - ignore new values */
1685d2e7daa4SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1686d2e7daa4SDaniel Verkamp }
1687d2e7daa4SDaniel Verkamp 
1688d2e7daa4SDaniel Verkamp static int
1689198fd2ceSSeth Howell nvmf_ctrlr_get_features_temperature_threshold(struct spdk_nvmf_request *req)
1690d2e7daa4SDaniel Verkamp {
1691d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1692d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1693d2e7daa4SDaniel Verkamp 
16942172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Get Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11);
1695d2e7daa4SDaniel Verkamp 
16960c9057f0SChangpeng Liu 	if (!temp_threshold_opts_valid(&cmd->cdw11_bits.feat_temp_threshold)) {
1697d2e7daa4SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1698d2e7daa4SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1699d2e7daa4SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1700d2e7daa4SDaniel Verkamp 	}
1701d2e7daa4SDaniel Verkamp 
1702d2e7daa4SDaniel Verkamp 	/* TODO: no sensors implemented - return 0 for all thresholds */
1703d2e7daa4SDaniel Verkamp 	rsp->cdw0 = 0;
1704d2e7daa4SDaniel Verkamp 
1705d2e7daa4SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1706d2e7daa4SDaniel Verkamp }
1707d2e7daa4SDaniel Verkamp 
1708d2e7daa4SDaniel Verkamp static int
17093eed8456SChangpeng Liu nvmf_ctrlr_get_features_interrupt_vector_configuration(struct spdk_nvmf_request *req)
17103eed8456SChangpeng Liu {
17113eed8456SChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
17123eed8456SChangpeng Liu 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
17133eed8456SChangpeng Liu 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
17143eed8456SChangpeng Liu 	union spdk_nvme_feat_interrupt_vector_configuration iv_conf = {};
17153eed8456SChangpeng Liu 
17163eed8456SChangpeng Liu 	SPDK_DEBUGLOG(nvmf, "Get Features - Interrupt Vector Configuration (cdw11 = 0x%0x)\n", cmd->cdw11);
17173eed8456SChangpeng Liu 
17183eed8456SChangpeng Liu 	iv_conf.bits.iv = cmd->cdw11_bits.feat_interrupt_vector_configuration.bits.iv;
17193eed8456SChangpeng Liu 	iv_conf.bits.cd = ctrlr->feat.interrupt_vector_configuration.bits.cd;
17203eed8456SChangpeng Liu 	rsp->cdw0 = iv_conf.raw;
17213eed8456SChangpeng Liu 
17223eed8456SChangpeng Liu 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
17233eed8456SChangpeng Liu }
17243eed8456SChangpeng Liu 
17253eed8456SChangpeng Liu static int
1726198fd2ceSSeth Howell nvmf_ctrlr_set_features_error_recovery(struct spdk_nvmf_request *req)
1727d2e7daa4SDaniel Verkamp {
1728d2e7daa4SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1729d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1730d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1731d2e7daa4SDaniel Verkamp 
17322172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Error Recovery (cdw11 = 0x%0x)\n", cmd->cdw11);
1733d2e7daa4SDaniel Verkamp 
17340c9057f0SChangpeng Liu 	if (cmd->cdw11_bits.feat_error_recovery.bits.dulbe) {
1735d2e7daa4SDaniel Verkamp 		/*
1736d2e7daa4SDaniel Verkamp 		 * Host is not allowed to set this bit, since we don't advertise it in
1737d2e7daa4SDaniel Verkamp 		 * Identify Namespace.
1738d2e7daa4SDaniel Verkamp 		 */
1739d2e7daa4SDaniel Verkamp 		SPDK_ERRLOG("Host set unsupported DULBE bit\n");
1740d2e7daa4SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1741d2e7daa4SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1742d2e7daa4SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1743d2e7daa4SDaniel Verkamp 	}
1744d2e7daa4SDaniel Verkamp 
1745d2e7daa4SDaniel Verkamp 	ctrlr->feat.error_recovery.raw = cmd->cdw11;
1746d2e7daa4SDaniel Verkamp 	ctrlr->feat.error_recovery.bits.reserved = 0;
1747d2e7daa4SDaniel Verkamp 
1748d2e7daa4SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1749d2e7daa4SDaniel Verkamp }
1750d2e7daa4SDaniel Verkamp 
1751d2e7daa4SDaniel Verkamp static int
1752198fd2ceSSeth Howell nvmf_ctrlr_set_features_volatile_write_cache(struct spdk_nvmf_request *req)
1753d2e7daa4SDaniel Verkamp {
1754d2e7daa4SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1755d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1756d2e7daa4SDaniel Verkamp 
17572172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Volatile Write Cache (cdw11 = 0x%0x)\n", cmd->cdw11);
1758d2e7daa4SDaniel Verkamp 
1759d2e7daa4SDaniel Verkamp 	ctrlr->feat.volatile_write_cache.raw = cmd->cdw11;
1760d2e7daa4SDaniel Verkamp 	ctrlr->feat.volatile_write_cache.bits.reserved = 0;
1761d2e7daa4SDaniel Verkamp 
17622172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Volatile Write Cache %s\n",
1763decb5957Syidong0635 		      ctrlr->feat.volatile_write_cache.bits.wce ? "Enabled" : "Disabled");
1764d2e7daa4SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1765d2e7daa4SDaniel Verkamp }
1766d2e7daa4SDaniel Verkamp 
1767d2e7daa4SDaniel Verkamp static int
1768198fd2ceSSeth Howell nvmf_ctrlr_set_features_write_atomicity(struct spdk_nvmf_request *req)
1769d2e7daa4SDaniel Verkamp {
1770d2e7daa4SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1771d2e7daa4SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1772d2e7daa4SDaniel Verkamp 
17732172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Write Atomicity (cdw11 = 0x%0x)\n", cmd->cdw11);
1774d2e7daa4SDaniel Verkamp 
1775d2e7daa4SDaniel Verkamp 	ctrlr->feat.write_atomicity.raw = cmd->cdw11;
1776d2e7daa4SDaniel Verkamp 	ctrlr->feat.write_atomicity.bits.reserved = 0;
1777d2e7daa4SDaniel Verkamp 
1778d2e7daa4SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1779d2e7daa4SDaniel Verkamp }
1780d2e7daa4SDaniel Verkamp 
1781d2e7daa4SDaniel Verkamp static int
1782198fd2ceSSeth Howell nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req)
178303788f93SBen Walker {
178403788f93SBen Walker 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
178503788f93SBen Walker 
178603788f93SBen Walker 	SPDK_ERRLOG("Set Features - Host Identifier not allowed\n");
178703788f93SBen Walker 	response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
178803788f93SBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
178903788f93SBen Walker }
179003788f93SBen Walker 
17915b4b66baSDaniel Verkamp static int
1792198fd2ceSSeth Howell nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req)
179303788f93SBen Walker {
17941d304bc5SBen Walker 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
179503788f93SBen Walker 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
179603788f93SBen Walker 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
1797ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
179803788f93SBen Walker 
17992172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Get Features - Host Identifier\n");
18000f561837SDaniel Verkamp 
18010c9057f0SChangpeng Liu 	if (!cmd->cdw11_bits.feat_host_identifier.bits.exhid) {
180203788f93SBen Walker 		/* NVMe over Fabrics requires EXHID=1 (128-bit/16-byte host ID) */
180303788f93SBen Walker 		SPDK_ERRLOG("Get Features - Host Identifier with EXHID=0 not allowed\n");
180403788f93SBen Walker 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
180503788f93SBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
180603788f93SBen Walker 	}
180703788f93SBen Walker 
1808ecc80dfcSJohn Levon 	if (req->iovcnt < 1 || req->length < sizeof(ctrlr->hostid)) {
180903788f93SBen Walker 		SPDK_ERRLOG("Invalid data buffer for Get Features - Host Identifier\n");
181003788f93SBen Walker 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
181103788f93SBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
181203788f93SBen Walker 	}
181303788f93SBen Walker 
1814ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
1815ecc80dfcSJohn Levon 	spdk_iov_xfer_from_buf(&ix, &ctrlr->hostid, sizeof(ctrlr->hostid));
1816ad521730SJohn Levon 
181703788f93SBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
181803788f93SBen Walker }
181903788f93SBen Walker 
18205b4b66baSDaniel Verkamp static int
1821198fd2ceSSeth Howell nvmf_ctrlr_get_features_reservation_notification_mask(struct spdk_nvmf_request *req)
1822d5b89466SChangpeng Liu {
1823d5b89466SChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1824d5b89466SChangpeng Liu 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1825d5b89466SChangpeng Liu 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1826d5b89466SChangpeng Liu 	struct spdk_nvmf_ns *ns;
1827d5b89466SChangpeng Liu 
1828cc6920a4SJosh Soref 	SPDK_DEBUGLOG(nvmf, "get Features - Reservation Notification Mask\n");
1829d5b89466SChangpeng Liu 
183098145aa6SShuhei Matsumoto 	if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) {
1831d5b89466SChangpeng Liu 		SPDK_ERRLOG("get Features - Invalid Namespace ID\n");
1832d5b89466SChangpeng Liu 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1833d5b89466SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1834d5b89466SChangpeng Liu 	}
1835d5b89466SChangpeng Liu 
183605859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
1837d5b89466SChangpeng Liu 	if (ns == NULL) {
183836ed5f1fSJim Harris 		SPDK_ERRLOG("get Features - Invalid Namespace ID\n");
1839d5b89466SChangpeng Liu 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1840d5b89466SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1841d5b89466SChangpeng Liu 	}
1842d5b89466SChangpeng Liu 	rsp->cdw0 = ns->mask;
1843d5b89466SChangpeng Liu 
1844d5b89466SChangpeng Liu 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1845d5b89466SChangpeng Liu }
1846d5b89466SChangpeng Liu 
1847d5b89466SChangpeng Liu static int
1848198fd2ceSSeth Howell nvmf_ctrlr_set_features_reservation_notification_mask(struct spdk_nvmf_request *req)
1849d5b89466SChangpeng Liu {
1850d5b89466SChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1851d5b89466SChangpeng Liu 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
1852d5b89466SChangpeng Liu 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1853d5b89466SChangpeng Liu 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1854d5b89466SChangpeng Liu 	struct spdk_nvmf_ns *ns;
1855d5b89466SChangpeng Liu 
1856cc6920a4SJosh Soref 	SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Notification Mask\n");
1857d5b89466SChangpeng Liu 
185898145aa6SShuhei Matsumoto 	if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) {
1859d5b89466SChangpeng Liu 		for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
1860d5b89466SChangpeng Liu 		     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
1861d5b89466SChangpeng Liu 			ns->mask = cmd->cdw11;
1862d5b89466SChangpeng Liu 		}
1863d5b89466SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1864d5b89466SChangpeng Liu 	}
1865d5b89466SChangpeng Liu 
186605859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
1867d5b89466SChangpeng Liu 	if (ns == NULL) {
1868d5b89466SChangpeng Liu 		SPDK_ERRLOG("Set Features - Invalid Namespace ID\n");
1869d5b89466SChangpeng Liu 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1870d5b89466SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1871d5b89466SChangpeng Liu 	}
1872d5b89466SChangpeng Liu 	ns->mask = cmd->cdw11;
1873d5b89466SChangpeng Liu 
1874d5b89466SChangpeng Liu 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1875d5b89466SChangpeng Liu }
1876d5b89466SChangpeng Liu 
1877d5b89466SChangpeng Liu static int
1878198fd2ceSSeth Howell nvmf_ctrlr_get_features_reservation_persistence(struct spdk_nvmf_request *req)
1879da30cda9SChangpeng Liu {
1880da30cda9SChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1881da30cda9SChangpeng Liu 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1882da30cda9SChangpeng Liu 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
1883da30cda9SChangpeng Liu 	struct spdk_nvmf_ns *ns;
1884da30cda9SChangpeng Liu 
18852172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Get Features - Reservation Persistence\n");
1886da30cda9SChangpeng Liu 
188705859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
188898145aa6SShuhei Matsumoto 	/* NSID with SPDK_NVME_GLOBAL_NS_TAG (=0xffffffff) also included */
1889da30cda9SChangpeng Liu 	if (ns == NULL) {
1890da30cda9SChangpeng Liu 		SPDK_ERRLOG("Get Features - Invalid Namespace ID\n");
1891da30cda9SChangpeng Liu 		response->status.sct = SPDK_NVME_SCT_GENERIC;
1892da30cda9SChangpeng Liu 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1893da30cda9SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1894da30cda9SChangpeng Liu 	}
1895da30cda9SChangpeng Liu 
1896cf5c4a8aSChangpeng Liu 	response->cdw0 = ns->ptpl_activated;
1897da30cda9SChangpeng Liu 
1898da30cda9SChangpeng Liu 	response->status.sct = SPDK_NVME_SCT_GENERIC;
1899da30cda9SChangpeng Liu 	response->status.sc = SPDK_NVME_SC_SUCCESS;
1900da30cda9SChangpeng Liu 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1901da30cda9SChangpeng Liu }
1902da30cda9SChangpeng Liu 
1903da30cda9SChangpeng Liu static int
1904198fd2ceSSeth Howell nvmf_ctrlr_set_features_reservation_persistence(struct spdk_nvmf_request *req)
1905da30cda9SChangpeng Liu {
1906da30cda9SChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1907da30cda9SChangpeng Liu 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1908da30cda9SChangpeng Liu 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
1909da30cda9SChangpeng Liu 	struct spdk_nvmf_ns *ns;
1910cf5c4a8aSChangpeng Liu 	bool ptpl;
1911da30cda9SChangpeng Liu 
19122172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Persistence\n");
1913da30cda9SChangpeng Liu 
191405859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
19150c9057f0SChangpeng Liu 	ptpl = cmd->cdw11_bits.feat_rsv_persistence.bits.ptpl;
1916cf5c4a8aSChangpeng Liu 
1917c6a78e83SArtur Paszkiewicz 	if (cmd->nsid != SPDK_NVME_GLOBAL_NS_TAG && ns && nvmf_ns_is_ptpl_capable(ns)) {
1918cf5c4a8aSChangpeng Liu 		ns->ptpl_activated = ptpl;
191998145aa6SShuhei Matsumoto 	} else if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) {
1920c623bf39SArtur Paszkiewicz 		for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns;
1921cf5c4a8aSChangpeng Liu 		     ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) {
1922c6a78e83SArtur Paszkiewicz 			if (nvmf_ns_is_ptpl_capable(ns)) {
1923cf5c4a8aSChangpeng Liu 				ns->ptpl_activated = ptpl;
1924cf5c4a8aSChangpeng Liu 			}
1925c623bf39SArtur Paszkiewicz 		}
1926cf5c4a8aSChangpeng Liu 	} else {
1927cf5c4a8aSChangpeng Liu 		SPDK_ERRLOG("Set Features - Invalid Namespace ID or Reservation Configuration\n");
1928da30cda9SChangpeng Liu 		response->status.sct = SPDK_NVME_SCT_GENERIC;
1929da30cda9SChangpeng Liu 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1930da30cda9SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1931da30cda9SChangpeng Liu 	}
1932da30cda9SChangpeng Liu 
1933da30cda9SChangpeng Liu 	/* TODO: Feature not changeable for now */
1934da30cda9SChangpeng Liu 	response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
1935cf5c4a8aSChangpeng Liu 	response->status.sc = SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE;
1936da30cda9SChangpeng Liu 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1937da30cda9SChangpeng Liu }
1938da30cda9SChangpeng Liu 
1939da30cda9SChangpeng Liu static int
19408a1862c0SChangpeng Liu nvmf_ctrlr_get_features_host_behavior_support(struct spdk_nvmf_request *req)
19418a1862c0SChangpeng Liu {
19428a1862c0SChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
19438a1862c0SChangpeng Liu 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
19448a1862c0SChangpeng Liu 	struct spdk_nvme_host_behavior host_behavior = {};
1945ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
19468a1862c0SChangpeng Liu 
19478a1862c0SChangpeng Liu 	SPDK_DEBUGLOG(nvmf, "Get Features - Host Behavior Support\n");
19488a1862c0SChangpeng Liu 
1949ecc80dfcSJohn Levon 	if (req->iovcnt < 1 || req->length < sizeof(struct spdk_nvme_host_behavior)) {
19508a1862c0SChangpeng Liu 		SPDK_ERRLOG("invalid data buffer for Host Behavior Support\n");
19518a1862c0SChangpeng Liu 		response->status.sct = SPDK_NVME_SCT_GENERIC;
19528a1862c0SChangpeng Liu 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
19538a1862c0SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
19548a1862c0SChangpeng Liu 	}
19558a1862c0SChangpeng Liu 
19568a1862c0SChangpeng Liu 	host_behavior.acre = ctrlr->acre_enabled;
19571e2d5e50SShuhei Matsumoto 	host_behavior.lbafee = ctrlr->lbafee_enabled;
1958ad521730SJohn Levon 
1959ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
1960ecc80dfcSJohn Levon 	spdk_iov_xfer_from_buf(&ix, &host_behavior, sizeof(host_behavior));
19618a1862c0SChangpeng Liu 
19628a1862c0SChangpeng Liu 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
19638a1862c0SChangpeng Liu }
19648a1862c0SChangpeng Liu 
19658a1862c0SChangpeng Liu static int
19665cc56599Syupeng nvmf_ctrlr_set_features_host_behavior_support(struct spdk_nvmf_request *req)
19675cc56599Syupeng {
19685cc56599Syupeng 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
19695cc56599Syupeng 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
19705cc56599Syupeng 	struct spdk_nvme_host_behavior *host_behavior;
19715cc56599Syupeng 
19725cc56599Syupeng 	SPDK_DEBUGLOG(nvmf, "Set Features - Host Behavior Support\n");
19735cc56599Syupeng 	if (req->iovcnt != 1) {
19745cc56599Syupeng 		SPDK_ERRLOG("Host Behavior Support invalid iovcnt: %d\n", req->iovcnt);
19755cc56599Syupeng 		response->status.sct = SPDK_NVME_SCT_GENERIC;
19765cc56599Syupeng 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
19775cc56599Syupeng 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
19785cc56599Syupeng 	}
19795cc56599Syupeng 	if (req->iov[0].iov_len != sizeof(struct spdk_nvme_host_behavior)) {
1980f9734138SNick Connolly 		SPDK_ERRLOG("Host Behavior Support invalid iov_len: %zd\n", req->iov[0].iov_len);
19815cc56599Syupeng 		response->status.sct = SPDK_NVME_SCT_GENERIC;
19825cc56599Syupeng 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
19835cc56599Syupeng 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
19845cc56599Syupeng 	}
19855cc56599Syupeng 
19865cc56599Syupeng 	host_behavior = (struct spdk_nvme_host_behavior *)req->iov[0].iov_base;
19875cc56599Syupeng 	if (host_behavior->acre == 0) {
19885cc56599Syupeng 		ctrlr->acre_enabled = false;
19895cc56599Syupeng 	} else if (host_behavior->acre == 1) {
19905cc56599Syupeng 		ctrlr->acre_enabled = true;
19915cc56599Syupeng 	} else {
19925cc56599Syupeng 		SPDK_ERRLOG("Host Behavior Support invalid acre: 0x%02x\n", host_behavior->acre);
19935cc56599Syupeng 		response->status.sct = SPDK_NVME_SCT_GENERIC;
19945cc56599Syupeng 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
19955cc56599Syupeng 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
19965cc56599Syupeng 	}
19971e2d5e50SShuhei Matsumoto 	if (host_behavior->lbafee == 0) {
19981e2d5e50SShuhei Matsumoto 		ctrlr->lbafee_enabled = false;
19991e2d5e50SShuhei Matsumoto 	} else if (host_behavior->lbafee == 1) {
20001e2d5e50SShuhei Matsumoto 		ctrlr->lbafee_enabled = true;
20011e2d5e50SShuhei Matsumoto 	} else {
2002481e17c3SShuhei Matsumoto 		SPDK_ERRLOG("Host Behavior Support invalid lbafee: 0x%02x\n", host_behavior->lbafee);
20031e2d5e50SShuhei Matsumoto 		response->status.sct = SPDK_NVME_SCT_GENERIC;
20041e2d5e50SShuhei Matsumoto 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
20051e2d5e50SShuhei Matsumoto 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
20061e2d5e50SShuhei Matsumoto 	}
20075cc56599Syupeng 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
20085cc56599Syupeng }
20095cc56599Syupeng 
20105cc56599Syupeng static int
2011198fd2ceSSeth Howell nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req)
201203788f93SBen Walker {
20131d304bc5SBen Walker 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
201403788f93SBen Walker 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
201503788f93SBen Walker 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
201603788f93SBen Walker 
20172172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Keep Alive Timer (%u ms)\n", cmd->cdw11);
201803788f93SBen Walker 
2019b8769cdbSJinYu 	/*
2020b8769cdbSJinYu 	 * if attempts to disable keep alive by setting kato to 0h
2021b8769cdbSJinYu 	 * a status value of keep alive invalid shall be returned
2022b8769cdbSJinYu 	 */
20230c9057f0SChangpeng Liu 	if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato == 0) {
202403788f93SBen Walker 		rsp->status.sc = SPDK_NVME_SC_KEEP_ALIVE_INVALID;
20250c9057f0SChangpeng Liu 	} else if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato < MIN_KEEP_ALIVE_TIMEOUT_IN_MS) {
2026b8769cdbSJinYu 		ctrlr->feat.keep_alive_timer.bits.kato = MIN_KEEP_ALIVE_TIMEOUT_IN_MS;
202703788f93SBen Walker 	} else {
2028b8769cdbSJinYu 		/* round up to milliseconds */
20290c9057f0SChangpeng Liu 		ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(
20300c9057f0SChangpeng Liu 					cmd->cdw11_bits.feat_keep_alive_timer.bits.kato,
2031b8769cdbSJinYu 					KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) *
2032b8769cdbSJinYu 				KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS;
2033b8769cdbSJinYu 	}
2034b8769cdbSJinYu 
2035b8769cdbSJinYu 	/*
2036b8769cdbSJinYu 	 * if change the keep alive timeout value successfully
2037b8769cdbSJinYu 	 * update the keep alive poller.
2038b8769cdbSJinYu 	 */
20390c9057f0SChangpeng Liu 	if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato != 0) {
2040b8769cdbSJinYu 		if (ctrlr->keep_alive_poller != NULL) {
2041b8769cdbSJinYu 			spdk_poller_unregister(&ctrlr->keep_alive_poller);
2042b8769cdbSJinYu 		}
2043198fd2ceSSeth Howell 		ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr,
2044b8769cdbSJinYu 					   ctrlr->feat.keep_alive_timer.bits.kato * 1000);
204503788f93SBen Walker 	}
204603788f93SBen Walker 
20472172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Keep Alive Timer set to %u ms\n",
20480f561837SDaniel Verkamp 		      ctrlr->feat.keep_alive_timer.bits.kato);
204903788f93SBen Walker 
205003788f93SBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
205103788f93SBen Walker }
205203788f93SBen Walker 
20535b4b66baSDaniel Verkamp static int
2054198fd2ceSSeth Howell nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
205503788f93SBen Walker {
20561d304bc5SBen Walker 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
2057f6c2ad90SChangpeng Liu 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
205803788f93SBen Walker 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
205965aa57b0SBen Walker 	uint32_t count;
206003788f93SBen Walker 
20612172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Number of Queues, cdw11 0x%x\n",
206203788f93SBen Walker 		      req->cmd->nvme_cmd.cdw11);
206303788f93SBen Walker 
2064f6c2ad90SChangpeng Liu 	if (cmd->cdw11_bits.feat_num_of_queues.bits.ncqr == UINT16_MAX ||
2065f6c2ad90SChangpeng Liu 	    cmd->cdw11_bits.feat_num_of_queues.bits.nsqr == UINT16_MAX) {
2066f6c2ad90SChangpeng Liu 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
2067f6c2ad90SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
2068f6c2ad90SChangpeng Liu 	}
2069f6c2ad90SChangpeng Liu 
207065aa57b0SBen Walker 	count = spdk_bit_array_count_set(ctrlr->qpair_mask);
20716fa48bbfSChen Wang 	/* verify that the controller is ready to process commands */
207265aa57b0SBen Walker 	if (count > 1) {
20732172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "Queue pairs already active!\n");
207403788f93SBen Walker 		rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
207503788f93SBen Walker 	} else {
20760f561837SDaniel Verkamp 		/*
20770f561837SDaniel Verkamp 		 * Ignore the value requested by the host -
20780f561837SDaniel Verkamp 		 * always return the pre-configured value based on max_qpairs_allowed.
20790f561837SDaniel Verkamp 		 */
20800f561837SDaniel Verkamp 		rsp->cdw0 = ctrlr->feat.number_of_queues.raw;
208103788f93SBen Walker 	}
208203788f93SBen Walker 
208303788f93SBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
208403788f93SBen Walker }
208503788f93SBen Walker 
2086d37555b4SJonas Pfefferle SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ctrlr) == 4928,
2087982c25feSChangpeng Liu 		   "Please check migration fields that need to be added or not");
2088982c25feSChangpeng Liu 
2089982c25feSChangpeng Liu static void
2090982c25feSChangpeng Liu nvmf_ctrlr_migr_data_copy(struct spdk_nvmf_ctrlr_migr_data *data,
2091982c25feSChangpeng Liu 			  const struct spdk_nvmf_ctrlr_migr_data *data_src, size_t data_size)
209213f7510fSChangpeng Liu {
2093982c25feSChangpeng Liu 	assert(data);
2094982c25feSChangpeng Liu 	assert(data_src);
2095982c25feSChangpeng Liu 	assert(data_size);
209613f7510fSChangpeng Liu 
2097982c25feSChangpeng Liu 	memcpy(&data->regs, &data_src->regs, spdk_min(data->regs_size, data_src->regs_size));
2098982c25feSChangpeng Liu 	memcpy(&data->feat, &data_src->feat, spdk_min(data->feat_size, data_src->feat_size));
209913f7510fSChangpeng Liu 
2100982c25feSChangpeng Liu #define SET_FIELD(field) \
2101982c25feSChangpeng Liu     if (offsetof(struct spdk_nvmf_ctrlr_migr_data, field) + sizeof(data->field) <= data_size) { \
2102982c25feSChangpeng Liu         data->field = data_src->field; \
2103982c25feSChangpeng Liu     } \
210413f7510fSChangpeng Liu 
2105982c25feSChangpeng Liu 	SET_FIELD(cntlid);
2106982c25feSChangpeng Liu 	SET_FIELD(acre);
2107982c25feSChangpeng Liu 	SET_FIELD(num_aer_cids);
2108982c25feSChangpeng Liu 	SET_FIELD(num_async_events);
2109982c25feSChangpeng Liu 	SET_FIELD(notice_aen_mask);
2110982c25feSChangpeng Liu #undef SET_FIELD
2111982c25feSChangpeng Liu 
2112982c25feSChangpeng Liu #define SET_ARRAY(arr) \
2113982c25feSChangpeng Liu     if (offsetof(struct spdk_nvmf_ctrlr_migr_data, arr) + sizeof(data->arr) <= data_size) { \
2114982c25feSChangpeng Liu         memcpy(&data->arr, &data_src->arr, sizeof(data->arr)); \
2115982c25feSChangpeng Liu     } \
2116982c25feSChangpeng Liu 
2117982c25feSChangpeng Liu 	SET_ARRAY(async_events);
2118982c25feSChangpeng Liu 	SET_ARRAY(aer_cids);
2119982c25feSChangpeng Liu #undef SET_ARRAY
212013f7510fSChangpeng Liu }
212113f7510fSChangpeng Liu 
212295dd9003SChangpeng Liu int
2123982c25feSChangpeng Liu spdk_nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr,
2124982c25feSChangpeng Liu 			       struct spdk_nvmf_ctrlr_migr_data *data)
212595dd9003SChangpeng Liu {
212695dd9003SChangpeng Liu 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
2127982c25feSChangpeng Liu 	uint32_t i;
2128982c25feSChangpeng Liu 	struct spdk_nvmf_ctrlr_migr_data data_local = {
2129982c25feSChangpeng Liu 		.data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused),
2130982c25feSChangpeng Liu 		.regs_size = sizeof(struct spdk_nvmf_registers),
2131982c25feSChangpeng Liu 		.feat_size = sizeof(struct spdk_nvmf_ctrlr_feat)
2132982c25feSChangpeng Liu 	};
213395dd9003SChangpeng Liu 
2134982c25feSChangpeng Liu 	assert(data->data_size <= sizeof(data_local));
2135982c25feSChangpeng Liu 	assert(spdk_get_thread() == ctrlr->thread);
2136982c25feSChangpeng Liu 
2137982c25feSChangpeng Liu 	memcpy(&data_local.regs, &ctrlr->vcprop, sizeof(struct spdk_nvmf_registers));
2138982c25feSChangpeng Liu 	memcpy(&data_local.feat, &ctrlr->feat, sizeof(struct spdk_nvmf_ctrlr_feat));
2139982c25feSChangpeng Liu 
2140982c25feSChangpeng Liu 	data_local.cntlid = ctrlr->cntlid;
2141982c25feSChangpeng Liu 	data_local.acre = ctrlr->acre_enabled;
2142982c25feSChangpeng Liu 	data_local.num_aer_cids = ctrlr->nr_aer_reqs;
214395dd9003SChangpeng Liu 
214495dd9003SChangpeng Liu 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
2145b6bb252eSLiu Xiaodong 		if (data_local.num_async_events + 1 > SPDK_NVMF_MIGR_MAX_PENDING_AERS) {
2146982c25feSChangpeng Liu 			SPDK_ERRLOG("ctrlr %p has too many pending AERs\n", ctrlr);
214795dd9003SChangpeng Liu 			break;
214895dd9003SChangpeng Liu 		}
2149b6bb252eSLiu Xiaodong 
2150b6bb252eSLiu Xiaodong 		data_local.async_events[data_local.num_async_events++].raw = event->event.raw;
215195dd9003SChangpeng Liu 	}
215295dd9003SChangpeng Liu 
2153982c25feSChangpeng Liu 	for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
2154982c25feSChangpeng Liu 		struct spdk_nvmf_request *req = ctrlr->aer_req[i];
2155982c25feSChangpeng Liu 		data_local.aer_cids[i] = req->cmd->nvme_cmd.cid;
2156982c25feSChangpeng Liu 	}
2157982c25feSChangpeng Liu 	data_local.notice_aen_mask = ctrlr->notice_aen_mask;
2158982c25feSChangpeng Liu 
2159982c25feSChangpeng Liu 	nvmf_ctrlr_migr_data_copy(data, &data_local, spdk_min(data->data_size, data_local.data_size));
216095dd9003SChangpeng Liu 	return 0;
216195dd9003SChangpeng Liu }
216295dd9003SChangpeng Liu 
216395dd9003SChangpeng Liu int
2164982c25feSChangpeng Liu spdk_nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr,
2165982c25feSChangpeng Liu 				  const struct spdk_nvmf_ctrlr_migr_data *data)
216695dd9003SChangpeng Liu {
216795dd9003SChangpeng Liu 	uint32_t i;
2168982c25feSChangpeng Liu 	struct spdk_nvmf_ctrlr_migr_data data_local = {
2169982c25feSChangpeng Liu 		.data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused),
2170982c25feSChangpeng Liu 		.regs_size = sizeof(struct spdk_nvmf_registers),
2171982c25feSChangpeng Liu 		.feat_size = sizeof(struct spdk_nvmf_ctrlr_feat)
2172982c25feSChangpeng Liu 	};
217395dd9003SChangpeng Liu 
2174982c25feSChangpeng Liu 	assert(data->data_size <= sizeof(data_local));
2175982c25feSChangpeng Liu 	assert(spdk_get_thread() == ctrlr->thread);
217695dd9003SChangpeng Liu 
2177982c25feSChangpeng Liu 	/* local version of data should have defaults set before copy */
2178982c25feSChangpeng Liu 	nvmf_ctrlr_migr_data_copy(&data_local, data, spdk_min(data->data_size, data_local.data_size));
2179982c25feSChangpeng Liu 	memcpy(&ctrlr->vcprop, &data_local.regs, sizeof(struct spdk_nvmf_registers));
2180982c25feSChangpeng Liu 	memcpy(&ctrlr->feat, &data_local.feat, sizeof(struct spdk_nvmf_ctrlr_feat));
2181982c25feSChangpeng Liu 
2182982c25feSChangpeng Liu 	ctrlr->cntlid = data_local.cntlid;
2183982c25feSChangpeng Liu 	ctrlr->acre_enabled = data_local.acre;
2184982c25feSChangpeng Liu 
2185982c25feSChangpeng Liu 	for (i = 0; i < data_local.num_async_events; i++) {
2186982c25feSChangpeng Liu 		struct spdk_nvmf_async_event_completion *event;
2187982c25feSChangpeng Liu 
2188982c25feSChangpeng Liu 		event = calloc(1, sizeof(*event));
218995dd9003SChangpeng Liu 		if (!event) {
219095dd9003SChangpeng Liu 			return -ENOMEM;
219195dd9003SChangpeng Liu 		}
2192982c25feSChangpeng Liu 
2193982c25feSChangpeng Liu 		event->event.raw = data_local.async_events[i].raw;
219495dd9003SChangpeng Liu 		STAILQ_INSERT_TAIL(&ctrlr->async_events, event, link);
219595dd9003SChangpeng Liu 	}
2196982c25feSChangpeng Liu 	ctrlr->notice_aen_mask = data_local.notice_aen_mask;
219795dd9003SChangpeng Liu 
219895dd9003SChangpeng Liu 	return 0;
219995dd9003SChangpeng Liu }
220095dd9003SChangpeng Liu 
22015b4b66baSDaniel Verkamp static int
2202198fd2ceSSeth Howell nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req)
220303788f93SBen Walker {
22041d304bc5SBen Walker 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
220503788f93SBen Walker 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
220603788f93SBen Walker 
22072172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Set Features - Async Event Configuration, cdw11 0x%08x\n",
220803788f93SBen Walker 		      cmd->cdw11);
22090f561837SDaniel Verkamp 	ctrlr->feat.async_event_configuration.raw = cmd->cdw11;
221068ff34bcSNick Connolly 	ctrlr->feat.async_event_configuration.bits.reserved1 = 0;
221168ff34bcSNick Connolly 	ctrlr->feat.async_event_configuration.bits.reserved2 = 0;
221203788f93SBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
221303788f93SBen Walker }
221403788f93SBen Walker 
22155b4b66baSDaniel Verkamp static int
2216198fd2ceSSeth Howell nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req)
221703788f93SBen Walker {
22181d304bc5SBen Walker 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
221903788f93SBen Walker 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
22208cd9ef28SJiewei Ke 	struct spdk_nvmf_async_event_completion *pending_event;
222103788f93SBen Walker 
22222172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Async Event Request\n");
222303788f93SBen Walker 
2224b7cc4dd7SJin Yu 	/* Four asynchronous events are supported for now */
2225982c25feSChangpeng Liu 	if (ctrlr->nr_aer_reqs >= SPDK_NVMF_MAX_ASYNC_EVENTS) {
22262172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "AERL exceeded\n");
222703788f93SBen Walker 		rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
222803788f93SBen Walker 		rsp->status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED;
222903788f93SBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
223003788f93SBen Walker 	}
223103788f93SBen Walker 
22328cd9ef28SJiewei Ke 	if (!STAILQ_EMPTY(&ctrlr->async_events)) {
22338cd9ef28SJiewei Ke 		pending_event = STAILQ_FIRST(&ctrlr->async_events);
22348cd9ef28SJiewei Ke 		rsp->cdw0 = pending_event->event.raw;
22358cd9ef28SJiewei Ke 		STAILQ_REMOVE(&ctrlr->async_events, pending_event, spdk_nvmf_async_event_completion, link);
22368cd9ef28SJiewei Ke 		free(pending_event);
22374fa486a1SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
22384fa486a1SChangpeng Liu 	}
22394fa486a1SChangpeng Liu 
2240b7cc4dd7SJin Yu 	ctrlr->aer_req[ctrlr->nr_aer_reqs++] = req;
224103788f93SBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
224203788f93SBen Walker }
224389d35cefSDaniel Verkamp 
2244ed6e83f9SDaniel Verkamp static void
2245adc2942aSJiewei Ke nvmf_get_firmware_slot_log_page(struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length)
2246ed6e83f9SDaniel Verkamp {
2247ed6e83f9SDaniel Verkamp 	struct spdk_nvme_firmware_page fw_page;
2248ed6e83f9SDaniel Verkamp 	size_t copy_len;
2249ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
2250adc2942aSJiewei Ke 
2251ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, iovs, iovcnt);
2252ed6e83f9SDaniel Verkamp 
2253ed6e83f9SDaniel Verkamp 	memset(&fw_page, 0, sizeof(fw_page));
2254ed6e83f9SDaniel Verkamp 	fw_page.afi.active_slot = 1;
2255ed6e83f9SDaniel Verkamp 	fw_page.afi.next_reset_slot = 0;
2256ed6e83f9SDaniel Verkamp 	spdk_strcpy_pad(fw_page.revision[0], FW_VERSION, sizeof(fw_page.revision[0]), ' ');
2257ed6e83f9SDaniel Verkamp 
2258ed6e83f9SDaniel Verkamp 	if (offset < sizeof(fw_page)) {
2259ed6e83f9SDaniel Verkamp 		copy_len = spdk_min(sizeof(fw_page) - offset, length);
2260ed6e83f9SDaniel Verkamp 		if (copy_len > 0) {
2261ecc80dfcSJohn Levon 			spdk_iov_xfer_from_buf(&ix, (const char *)&fw_page + offset, copy_len);
2262ed6e83f9SDaniel Verkamp 		}
2263ed6e83f9SDaniel Verkamp 	}
2264ed6e83f9SDaniel Verkamp }
2265ed6e83f9SDaniel Verkamp 
2266a9bdb1eeSJiewei Ke /*
2267a9bdb1eeSJiewei Ke  * Asynchronous Event Mask Bit
2268a9bdb1eeSJiewei Ke  */
2269a9bdb1eeSJiewei Ke enum spdk_nvme_async_event_mask_bit {
2270cc6920a4SJosh Soref 	/* Mask Namespace Change Notification */
2271a9bdb1eeSJiewei Ke 	SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT		= 0,
2272a9bdb1eeSJiewei Ke 	/* Mask Asymmetric Namespace Access Change Notification */
2273a9bdb1eeSJiewei Ke 	SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT		= 1,
2274a9bdb1eeSJiewei Ke 	/* Mask Discovery Log Change Notification */
2275a9bdb1eeSJiewei Ke 	SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT	= 2,
2276a9bdb1eeSJiewei Ke 	/* Mask Reservation Log Page Available Notification */
2277a9bdb1eeSJiewei Ke 	SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT	= 3,
227886ad145bSChangpeng Liu 	/* Mask Error Event */
227986ad145bSChangpeng Liu 	SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT			= 4,
2280a9bdb1eeSJiewei Ke 	/* 4 - 63 Reserved */
2281a9bdb1eeSJiewei Ke };
2282a9bdb1eeSJiewei Ke 
2283a9bdb1eeSJiewei Ke static inline void
2284a9bdb1eeSJiewei Ke nvmf_ctrlr_unmask_aen(struct spdk_nvmf_ctrlr *ctrlr,
2285a9bdb1eeSJiewei Ke 		      enum spdk_nvme_async_event_mask_bit mask)
2286a9bdb1eeSJiewei Ke {
2287a9bdb1eeSJiewei Ke 	ctrlr->notice_aen_mask &= ~(1 << mask);
2288a9bdb1eeSJiewei Ke }
2289a9bdb1eeSJiewei Ke 
2290a9bdb1eeSJiewei Ke static inline bool
2291a9bdb1eeSJiewei Ke nvmf_ctrlr_mask_aen(struct spdk_nvmf_ctrlr *ctrlr,
2292a9bdb1eeSJiewei Ke 		    enum spdk_nvme_async_event_mask_bit mask)
2293a9bdb1eeSJiewei Ke {
2294a9bdb1eeSJiewei Ke 	if (ctrlr->notice_aen_mask & (1 << mask)) {
2295a9bdb1eeSJiewei Ke 		return false;
2296a9bdb1eeSJiewei Ke 	} else {
2297a9bdb1eeSJiewei Ke 		ctrlr->notice_aen_mask |= (1 << mask);
2298a9bdb1eeSJiewei Ke 		return true;
2299a9bdb1eeSJiewei Ke 	}
2300a9bdb1eeSJiewei Ke }
2301a9bdb1eeSJiewei Ke 
23021c81d1afSKonrad Sztyber /* we have to use the typedef in the function declaration to appease astyle. */
23031c81d1afSKonrad Sztyber typedef enum spdk_nvme_ana_state spdk_nvme_ana_state_t;
23041c81d1afSKonrad Sztyber 
23051c81d1afSKonrad Sztyber static inline spdk_nvme_ana_state_t
23061c81d1afSKonrad Sztyber nvmf_ctrlr_get_ana_state(struct spdk_nvmf_ctrlr *ctrlr, uint32_t anagrpid)
23071c81d1afSKonrad Sztyber {
2308c777cfa7SShuhei Matsumoto 	if (!ctrlr->subsys->flags.ana_reporting) {
2309c777cfa7SShuhei Matsumoto 		return SPDK_NVME_ANA_OPTIMIZED_STATE;
2310c777cfa7SShuhei Matsumoto 	}
2311c777cfa7SShuhei Matsumoto 
23121c81d1afSKonrad Sztyber 	if (spdk_unlikely(ctrlr->listener == NULL)) {
23131c81d1afSKonrad Sztyber 		return SPDK_NVME_ANA_INACCESSIBLE_STATE;
23141c81d1afSKonrad Sztyber 	}
23151c81d1afSKonrad Sztyber 
23161c81d1afSKonrad Sztyber 	assert(anagrpid - 1 < ctrlr->subsys->max_nsid);
23171c81d1afSKonrad Sztyber 	return ctrlr->listener->ana_state[anagrpid - 1];
23181c81d1afSKonrad Sztyber }
23191c81d1afSKonrad Sztyber 
23201c81d1afSKonrad Sztyber static spdk_nvme_ana_state_t
23211c81d1afSKonrad Sztyber nvmf_ctrlr_get_ana_state_from_nsid(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
23221c81d1afSKonrad Sztyber {
23231c81d1afSKonrad Sztyber 	struct spdk_nvmf_ns *ns;
23241c81d1afSKonrad Sztyber 
23251c81d1afSKonrad Sztyber 	/* We do not have NVM subsystem specific ANA state. Hence if NSID is either
23261c81d1afSKonrad Sztyber 	 * SPDK_NVMF_GLOBAL_NS_TAG, invalid, or for inactive namespace, return
23271c81d1afSKonrad Sztyber 	 * the optimized state.
23281c81d1afSKonrad Sztyber 	 */
232905859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, nsid);
23301c81d1afSKonrad Sztyber 	if (ns == NULL) {
23311c81d1afSKonrad Sztyber 		return SPDK_NVME_ANA_OPTIMIZED_STATE;
23321c81d1afSKonrad Sztyber 	}
23331c81d1afSKonrad Sztyber 
23341c81d1afSKonrad Sztyber 	return nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid);
23351c81d1afSKonrad Sztyber }
23361c81d1afSKonrad Sztyber 
2337332e846fSMonica Kenguva static void
233886ad145bSChangpeng Liu nvmf_get_error_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt,
233986ad145bSChangpeng Liu 			uint64_t offset, uint32_t length, uint32_t rae)
234086ad145bSChangpeng Liu {
234186ad145bSChangpeng Liu 	if (!rae) {
234286ad145bSChangpeng Liu 		nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT);
234386ad145bSChangpeng Liu 	}
234486ad145bSChangpeng Liu 
234586ad145bSChangpeng Liu 	/* TODO: actually fill out log page data */
234686ad145bSChangpeng Liu }
234786ad145bSChangpeng Liu 
234886ad145bSChangpeng Liu static void
2349d07c581bSJiewei Ke nvmf_get_ana_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt,
2350568c5c3bSAlex Michon 		      uint64_t offset, uint32_t length, uint32_t rae, uint32_t rgo)
2351332e846fSMonica Kenguva {
2352332e846fSMonica Kenguva 	struct spdk_nvme_ana_page ana_hdr;
235307bfc3cbSShuhei Matsumoto 	struct spdk_nvme_ana_group_descriptor ana_desc;
2354d07c581bSJiewei Ke 	size_t copy_len, copied_len;
235507bfc3cbSShuhei Matsumoto 	uint32_t num_anagrp = 0, anagrpid;
2356332e846fSMonica Kenguva 	struct spdk_nvmf_ns *ns;
2357ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
2358d07c581bSJiewei Ke 
2359ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, iovs, iovcnt);
2360332e846fSMonica Kenguva 
2361332e846fSMonica Kenguva 	if (length == 0) {
236207bfc3cbSShuhei Matsumoto 		goto done;
2363332e846fSMonica Kenguva 	}
2364332e846fSMonica Kenguva 
2365332e846fSMonica Kenguva 	if (offset >= sizeof(ana_hdr)) {
2366332e846fSMonica Kenguva 		offset -= sizeof(ana_hdr);
2367332e846fSMonica Kenguva 	} else {
236807bfc3cbSShuhei Matsumoto 		for (anagrpid = 1; anagrpid <= ctrlr->subsys->max_nsid; anagrpid++) {
236907bfc3cbSShuhei Matsumoto 			if (ctrlr->subsys->ana_group[anagrpid - 1] > 0) {
237007bfc3cbSShuhei Matsumoto 				num_anagrp++;
237107bfc3cbSShuhei Matsumoto 			}
2372332e846fSMonica Kenguva 		}
2373332e846fSMonica Kenguva 
2374332e846fSMonica Kenguva 		memset(&ana_hdr, 0, sizeof(ana_hdr));
2375332e846fSMonica Kenguva 
237607bfc3cbSShuhei Matsumoto 		ana_hdr.num_ana_group_desc = num_anagrp;
2377332e846fSMonica Kenguva 		/* TODO: Support Change Count. */
2378332e846fSMonica Kenguva 		ana_hdr.change_count = 0;
2379332e846fSMonica Kenguva 
2380332e846fSMonica Kenguva 		copy_len = spdk_min(sizeof(ana_hdr) - offset, length);
2381ecc80dfcSJohn Levon 		copied_len = spdk_iov_xfer_from_buf(&ix, (const char *)&ana_hdr + offset, copy_len);
2382d07c581bSJiewei Ke 		assert(copied_len == copy_len);
2383d07c581bSJiewei Ke 		length -= copied_len;
2384332e846fSMonica Kenguva 		offset = 0;
2385332e846fSMonica Kenguva 	}
2386332e846fSMonica Kenguva 
2387332e846fSMonica Kenguva 	if (length == 0) {
238807bfc3cbSShuhei Matsumoto 		goto done;
2389332e846fSMonica Kenguva 	}
2390332e846fSMonica Kenguva 
239107bfc3cbSShuhei Matsumoto 	for (anagrpid = 1; anagrpid <= ctrlr->subsys->max_nsid; anagrpid++) {
239207bfc3cbSShuhei Matsumoto 		if (ctrlr->subsys->ana_group[anagrpid - 1] == 0) {
2393332e846fSMonica Kenguva 			continue;
2394332e846fSMonica Kenguva 		}
2395332e846fSMonica Kenguva 
239607bfc3cbSShuhei Matsumoto 		if (offset >= sizeof(ana_desc)) {
239707bfc3cbSShuhei Matsumoto 			offset -= sizeof(ana_desc);
239807bfc3cbSShuhei Matsumoto 		} else {
239907bfc3cbSShuhei Matsumoto 			memset(&ana_desc, 0, sizeof(ana_desc));
2400332e846fSMonica Kenguva 
240107bfc3cbSShuhei Matsumoto 			ana_desc.ana_group_id = anagrpid;
2402568c5c3bSAlex Michon 			if (rgo) {
2403568c5c3bSAlex Michon 				ana_desc.num_of_nsid = 0;
2404568c5c3bSAlex Michon 			} else {
240507bfc3cbSShuhei Matsumoto 				ana_desc.num_of_nsid = ctrlr->subsys->ana_group[anagrpid - 1];
2406568c5c3bSAlex Michon 			}
24071c81d1afSKonrad Sztyber 			ana_desc.ana_state = nvmf_ctrlr_get_ana_state(ctrlr, anagrpid);
2408332e846fSMonica Kenguva 
240907bfc3cbSShuhei Matsumoto 			copy_len = spdk_min(sizeof(ana_desc) - offset, length);
2410ecc80dfcSJohn Levon 			copied_len = spdk_iov_xfer_from_buf(&ix, (const char *)&ana_desc + offset,
241107bfc3cbSShuhei Matsumoto 							    copy_len);
2412d07c581bSJiewei Ke 			assert(copied_len == copy_len);
2413d07c581bSJiewei Ke 			length -= copied_len;
2414332e846fSMonica Kenguva 			offset = 0;
2415332e846fSMonica Kenguva 
2416332e846fSMonica Kenguva 			if (length == 0) {
2417a9bdb1eeSJiewei Ke 				goto done;
2418332e846fSMonica Kenguva 			}
2419332e846fSMonica Kenguva 		}
2420a9bdb1eeSJiewei Ke 
2421568c5c3bSAlex Michon 		if (rgo) {
2422568c5c3bSAlex Michon 			continue;
2423568c5c3bSAlex Michon 		}
2424568c5c3bSAlex Michon 
242507bfc3cbSShuhei Matsumoto 		/* TODO: Revisit here about O(n^2) cost if we have subsystem with
242607bfc3cbSShuhei Matsumoto 		 * many namespaces in the future.
242707bfc3cbSShuhei Matsumoto 		 */
242807bfc3cbSShuhei Matsumoto 		for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL;
242907bfc3cbSShuhei Matsumoto 		     ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) {
243007bfc3cbSShuhei Matsumoto 			if (ns->anagrpid != anagrpid) {
243107bfc3cbSShuhei Matsumoto 				continue;
243207bfc3cbSShuhei Matsumoto 			}
243307bfc3cbSShuhei Matsumoto 
243407bfc3cbSShuhei Matsumoto 			if (offset >= sizeof(uint32_t)) {
243507bfc3cbSShuhei Matsumoto 				offset -= sizeof(uint32_t);
243607bfc3cbSShuhei Matsumoto 				continue;
243707bfc3cbSShuhei Matsumoto 			}
243807bfc3cbSShuhei Matsumoto 
243907bfc3cbSShuhei Matsumoto 			copy_len = spdk_min(sizeof(uint32_t) - offset, length);
2440ecc80dfcSJohn Levon 			copied_len = spdk_iov_xfer_from_buf(&ix, (const char *)&ns->nsid + offset,
244107bfc3cbSShuhei Matsumoto 							    copy_len);
244207bfc3cbSShuhei Matsumoto 			assert(copied_len == copy_len);
244307bfc3cbSShuhei Matsumoto 			length -= copied_len;
244407bfc3cbSShuhei Matsumoto 			offset = 0;
244507bfc3cbSShuhei Matsumoto 
244607bfc3cbSShuhei Matsumoto 			if (length == 0) {
244707bfc3cbSShuhei Matsumoto 				goto done;
244807bfc3cbSShuhei Matsumoto 			}
244907bfc3cbSShuhei Matsumoto 		}
245007bfc3cbSShuhei Matsumoto 	}
245107bfc3cbSShuhei Matsumoto 
2452a9bdb1eeSJiewei Ke done:
2453a9bdb1eeSJiewei Ke 	if (!rae) {
2454a9bdb1eeSJiewei Ke 		nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT);
2455a9bdb1eeSJiewei Ke 	}
2456332e846fSMonica Kenguva }
2457332e846fSMonica Kenguva 
245826541489SDaniel Verkamp void
24599cb21ad6SSeth Howell nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
246026541489SDaniel Verkamp {
246126541489SDaniel Verkamp 	uint16_t max_changes = SPDK_COUNTOF(ctrlr->changed_ns_list.ns_list);
246226541489SDaniel Verkamp 	uint16_t i;
246326541489SDaniel Verkamp 	bool found = false;
246426541489SDaniel Verkamp 
246526541489SDaniel Verkamp 	for (i = 0; i < ctrlr->changed_ns_list_count; i++) {
246626541489SDaniel Verkamp 		if (ctrlr->changed_ns_list.ns_list[i] == nsid) {
246726541489SDaniel Verkamp 			/* nsid is already in the list */
246826541489SDaniel Verkamp 			found = true;
246926541489SDaniel Verkamp 			break;
247026541489SDaniel Verkamp 		}
247126541489SDaniel Verkamp 	}
247226541489SDaniel Verkamp 
247326541489SDaniel Verkamp 	if (!found) {
247426541489SDaniel Verkamp 		if (ctrlr->changed_ns_list_count == max_changes) {
247526541489SDaniel Verkamp 			/* Out of space - set first entry to FFFFFFFFh and zero-fill the rest. */
247626541489SDaniel Verkamp 			ctrlr->changed_ns_list.ns_list[0] = 0xFFFFFFFFu;
247726541489SDaniel Verkamp 			for (i = 1; i < max_changes; i++) {
247826541489SDaniel Verkamp 				ctrlr->changed_ns_list.ns_list[i] = 0;
247926541489SDaniel Verkamp 			}
248026541489SDaniel Verkamp 		} else {
248126541489SDaniel Verkamp 			ctrlr->changed_ns_list.ns_list[ctrlr->changed_ns_list_count++] = nsid;
248226541489SDaniel Verkamp 		}
248326541489SDaniel Verkamp 	}
248426541489SDaniel Verkamp }
248526541489SDaniel Verkamp 
248626541489SDaniel Verkamp static void
2487198fd2ceSSeth Howell nvmf_get_changed_ns_list_log_page(struct spdk_nvmf_ctrlr *ctrlr,
2488adc2942aSJiewei Ke 				  struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length, uint32_t rae)
248926541489SDaniel Verkamp {
249026541489SDaniel Verkamp 	size_t copy_length;
2491ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
2492adc2942aSJiewei Ke 
2493ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, iovs, iovcnt);
249426541489SDaniel Verkamp 
249526541489SDaniel Verkamp 	if (offset < sizeof(ctrlr->changed_ns_list)) {
249626541489SDaniel Verkamp 		copy_length = spdk_min(length, sizeof(ctrlr->changed_ns_list) - offset);
249726541489SDaniel Verkamp 		if (copy_length) {
2498ecc80dfcSJohn Levon 			spdk_iov_xfer_from_buf(&ix, (char *)&ctrlr->changed_ns_list + offset, copy_length);
249926541489SDaniel Verkamp 		}
250026541489SDaniel Verkamp 	}
250126541489SDaniel Verkamp 
250226541489SDaniel Verkamp 	/* Clear log page each time it is read */
250326541489SDaniel Verkamp 	ctrlr->changed_ns_list_count = 0;
250426541489SDaniel Verkamp 	memset(&ctrlr->changed_ns_list, 0, sizeof(ctrlr->changed_ns_list));
2505a9bdb1eeSJiewei Ke 
2506a9bdb1eeSJiewei Ke 	if (!rae) {
2507a9bdb1eeSJiewei Ke 		nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT);
2508a9bdb1eeSJiewei Ke 	}
250926541489SDaniel Verkamp }
251026541489SDaniel Verkamp 
251161d379fdSDaniel Verkamp /* The structure can be modified if we provide support for other commands in future */
251261d379fdSDaniel Verkamp static const struct spdk_nvme_cmds_and_effect_log_page g_cmds_and_effect_log_page = {
251361d379fdSDaniel Verkamp 	.admin_cmds_supported = {
251461d379fdSDaniel Verkamp 		/* CSUPP, LBCC, NCC, NIC, CCC, CSE */
251561d379fdSDaniel Verkamp 		/* Get Log Page */
251661d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_GET_LOG_PAGE]		= {1, 0, 0, 0, 0, 0, 0, 0},
251761d379fdSDaniel Verkamp 		/* Identify */
251861d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_IDENTIFY]		= {1, 0, 0, 0, 0, 0, 0, 0},
251961d379fdSDaniel Verkamp 		/* Abort */
252061d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_ABORT]			= {1, 0, 0, 0, 0, 0, 0, 0},
252161d379fdSDaniel Verkamp 		/* Set Features */
252261d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_SET_FEATURES]		= {1, 0, 0, 0, 0, 0, 0, 0},
252361d379fdSDaniel Verkamp 		/* Get Features */
252461d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_GET_FEATURES]		= {1, 0, 0, 0, 0, 0, 0, 0},
252561d379fdSDaniel Verkamp 		/* Async Event Request */
252661d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_ASYNC_EVENT_REQUEST]	= {1, 0, 0, 0, 0, 0, 0, 0},
252761d379fdSDaniel Verkamp 		/* Keep Alive */
252861d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_KEEP_ALIVE]		= {1, 0, 0, 0, 0, 0, 0, 0},
252961d379fdSDaniel Verkamp 	},
253061d379fdSDaniel Verkamp 	.io_cmds_supported = {
253161d379fdSDaniel Verkamp 		/* FLUSH */
253261d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_FLUSH]			= {1, 1, 0, 0, 0, 0, 0, 0},
253361d379fdSDaniel Verkamp 		/* WRITE */
253461d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_WRITE]			= {1, 1, 0, 0, 0, 0, 0, 0},
253561d379fdSDaniel Verkamp 		/* READ */
253661d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_READ]			= {1, 0, 0, 0, 0, 0, 0, 0},
253761d379fdSDaniel Verkamp 		/* WRITE ZEROES */
253861d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_WRITE_ZEROES]		= {1, 1, 0, 0, 0, 0, 0, 0},
253961d379fdSDaniel Verkamp 		/* DATASET MANAGEMENT */
254061d379fdSDaniel Verkamp 		[SPDK_NVME_OPC_DATASET_MANAGEMENT]	= {1, 1, 0, 0, 0, 0, 0, 0},
2541941d9e7aSMaciej Szwed 		/* COMPARE */
2542941d9e7aSMaciej Szwed 		[SPDK_NVME_OPC_COMPARE]			= {1, 0, 0, 0, 0, 0, 0, 0},
25432aa7fa70SDennis Maisenbacher 		/* ZONE MANAGEMENT SEND */
25442aa7fa70SDennis Maisenbacher 		[SPDK_NVME_OPC_ZONE_MGMT_SEND]		= {1, 1, 0, 0, 0, 0, 0, 0},
25452aa7fa70SDennis Maisenbacher 		/* ZONE MANAGEMENT RECEIVE */
25462aa7fa70SDennis Maisenbacher 		[SPDK_NVME_OPC_ZONE_MGMT_RECV]		= {1, 0, 0, 0, 0, 0, 0, 0},
25475f76f79cSKonrad Sztyber 		/* ZONE APPEND */
25485f76f79cSKonrad Sztyber 		[SPDK_NVME_OPC_ZONE_APPEND]		= {1, 1, 0, 0, 0, 0, 0, 0},
2549d0516312SRui Chang 		/* COPY */
2550d0516312SRui Chang 		[SPDK_NVME_OPC_COPY]			= {1, 1, 0, 0, 0, 0, 0, 0},
255161d379fdSDaniel Verkamp 	},
255261d379fdSDaniel Verkamp };
255361d379fdSDaniel Verkamp 
255461d379fdSDaniel Verkamp static void
25552aa7fa70SDennis Maisenbacher nvmf_get_cmds_and_effects_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt,
255661d379fdSDaniel Verkamp 				   uint64_t offset, uint32_t length)
255761d379fdSDaniel Verkamp {
255861d379fdSDaniel Verkamp 	uint32_t page_size = sizeof(struct spdk_nvme_cmds_and_effect_log_page);
255961d379fdSDaniel Verkamp 	size_t copy_len = 0;
25602aa7fa70SDennis Maisenbacher 	struct spdk_nvme_cmds_and_effect_log_page cmds_and_effect_log_page = g_cmds_and_effect_log_page;
25615f76f79cSKonrad Sztyber 	struct spdk_nvme_cmds_and_effect_entry zero = {};
2562ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
2563adc2942aSJiewei Ke 
2564ff69ab47SKonrad Sztyber 	if (!ctrlr->cdata.oncs.write_zeroes || !nvmf_ctrlr_write_zeroes_supported(ctrlr)) {
2565ff69ab47SKonrad Sztyber 		cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_WRITE_ZEROES] = zero;
2566ff69ab47SKonrad Sztyber 	}
2567ff69ab47SKonrad Sztyber 	if (!ctrlr->cdata.oncs.dsm || !nvmf_ctrlr_dsm_supported(ctrlr)) {
2568ff69ab47SKonrad Sztyber 		cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_DATASET_MANAGEMENT] = zero;
2569ff69ab47SKonrad Sztyber 	}
2570ff69ab47SKonrad Sztyber 	if (!ctrlr->cdata.oncs.compare) {
2571ff69ab47SKonrad Sztyber 		cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_COMPARE] = zero;
2572ff69ab47SKonrad Sztyber 	}
2573d1b37c16SKonrad Sztyber 	if (!nvmf_subsystem_has_zns_iocs(ctrlr->subsys)) {
2574d1b37c16SKonrad Sztyber 		cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_ZONE_MGMT_SEND] = zero;
2575d1b37c16SKonrad Sztyber 		cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_ZONE_MGMT_RECV] = zero;
2576d1b37c16SKonrad Sztyber 	}
25775f76f79cSKonrad Sztyber 	if (!nvmf_subsystem_zone_append_supported(ctrlr->subsys)) {
25785f76f79cSKonrad Sztyber 		cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_ZONE_APPEND] = zero;
25792aa7fa70SDennis Maisenbacher 	}
2580ff69ab47SKonrad Sztyber 	if (!ctrlr->cdata.oncs.copy) {
2581ff69ab47SKonrad Sztyber 		cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_COPY] = zero;
2582ff69ab47SKonrad Sztyber 	}
25835f76f79cSKonrad Sztyber 
25845f76f79cSKonrad Sztyber 	spdk_iov_xfer_init(&ix, iovs, iovcnt);
25855f76f79cSKonrad Sztyber 	if (offset < page_size) {
258661d379fdSDaniel Verkamp 		copy_len = spdk_min(page_size - offset, length);
2587ecc80dfcSJohn Levon 		spdk_iov_xfer_from_buf(&ix, (char *)(&cmds_and_effect_log_page) + offset, copy_len);
258861d379fdSDaniel Verkamp 	}
258961d379fdSDaniel Verkamp }
259061d379fdSDaniel Verkamp 
2591468c6c18SChangpeng Liu static void
2592198fd2ceSSeth Howell nvmf_get_reservation_notification_log_page(struct spdk_nvmf_ctrlr *ctrlr,
2593adc2942aSJiewei Ke 		struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length, uint32_t rae)
2594468c6c18SChangpeng Liu {
2595468c6c18SChangpeng Liu 	uint32_t unit_log_len, avail_log_len, next_pos, copy_len;
2596468c6c18SChangpeng Liu 	struct spdk_nvmf_reservation_log *log, *log_tmp;
2597ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
2598adc2942aSJiewei Ke 
2599ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, iovs, iovcnt);
2600468c6c18SChangpeng Liu 
2601468c6c18SChangpeng Liu 	unit_log_len = sizeof(struct spdk_nvme_reservation_notification_log);
2602adc2942aSJiewei Ke 	/* No available log, return zeroed log pages */
2603468c6c18SChangpeng Liu 	if (!ctrlr->num_avail_log_pages) {
2604468c6c18SChangpeng Liu 		return;
2605468c6c18SChangpeng Liu 	}
2606468c6c18SChangpeng Liu 
2607468c6c18SChangpeng Liu 	avail_log_len = ctrlr->num_avail_log_pages * unit_log_len;
2608468c6c18SChangpeng Liu 	if (offset >= avail_log_len) {
2609468c6c18SChangpeng Liu 		return;
2610468c6c18SChangpeng Liu 	}
2611468c6c18SChangpeng Liu 
2612fd661859Syidong0635 	next_pos = 0;
2613468c6c18SChangpeng Liu 	TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) {
2614468c6c18SChangpeng Liu 		TAILQ_REMOVE(&ctrlr->log_head, log, link);
2615468c6c18SChangpeng Liu 		ctrlr->num_avail_log_pages--;
2616468c6c18SChangpeng Liu 
2617468c6c18SChangpeng Liu 		next_pos += unit_log_len;
2618468c6c18SChangpeng Liu 		if (next_pos > offset) {
2619468c6c18SChangpeng Liu 			copy_len = spdk_min(next_pos - offset, length);
2620ecc80dfcSJohn Levon 			spdk_iov_xfer_from_buf(&ix, &log->log, copy_len);
2621468c6c18SChangpeng Liu 			length -= copy_len;
2622468c6c18SChangpeng Liu 			offset += copy_len;
2623468c6c18SChangpeng Liu 		}
2624468c6c18SChangpeng Liu 		free(log);
2625468c6c18SChangpeng Liu 
2626468c6c18SChangpeng Liu 		if (length == 0) {
2627468c6c18SChangpeng Liu 			break;
2628468c6c18SChangpeng Liu 		}
2629468c6c18SChangpeng Liu 	}
2630a9bdb1eeSJiewei Ke 
2631a9bdb1eeSJiewei Ke 	if (!rae) {
2632a9bdb1eeSJiewei Ke 		nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT);
2633a9bdb1eeSJiewei Ke 	}
2634468c6c18SChangpeng Liu 	return;
2635468c6c18SChangpeng Liu }
2636468c6c18SChangpeng Liu 
26375b4b66baSDaniel Verkamp static int
2638198fd2ceSSeth Howell nvmf_ctrlr_get_log_page(struct spdk_nvmf_request *req)
263989d35cefSDaniel Verkamp {
264026541489SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
264126541489SDaniel Verkamp 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
264289d35cefSDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
264389d35cefSDaniel Verkamp 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
26443d8904c6SAlexey Marchuk 	struct spdk_nvme_transport_id cmd_source_trid;
264589d35cefSDaniel Verkamp 	uint64_t offset, len;
2646a9bdb1eeSJiewei Ke 	uint32_t rae, numdl, numdu;
264789d35cefSDaniel Verkamp 	uint8_t lid;
264889d35cefSDaniel Verkamp 
264949c0d28aSJohn Levon 	if (req->iovcnt < 1) {
26509cb78e1aSJim Harris 		SPDK_DEBUGLOG(nvmf, "get log command with no buffer\n");
265189d35cefSDaniel Verkamp 		response->status.sct = SPDK_NVME_SCT_GENERIC;
265289d35cefSDaniel Verkamp 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
265389d35cefSDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
265489d35cefSDaniel Verkamp 	}
265589d35cefSDaniel Verkamp 
265689d35cefSDaniel Verkamp 	offset = (uint64_t)cmd->cdw12 | ((uint64_t)cmd->cdw13 << 32);
265789d35cefSDaniel Verkamp 	if (offset & 3) {
265889d35cefSDaniel Verkamp 		SPDK_ERRLOG("Invalid log page offset 0x%" PRIx64 "\n", offset);
265989d35cefSDaniel Verkamp 		response->status.sct = SPDK_NVME_SCT_GENERIC;
266089d35cefSDaniel Verkamp 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
266189d35cefSDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
266289d35cefSDaniel Verkamp 	}
266389d35cefSDaniel Verkamp 
2664a9bdb1eeSJiewei Ke 	rae = cmd->cdw10_bits.get_log_page.rae;
26651fea1fccSChangpeng Liu 	numdl = cmd->cdw10_bits.get_log_page.numdl;
26660c9057f0SChangpeng Liu 	numdu = cmd->cdw11_bits.get_log_page.numdu;
266789d35cefSDaniel Verkamp 	len = ((numdu << 16) + numdl + (uint64_t)1) * 4;
266889d35cefSDaniel Verkamp 	if (len > req->length) {
266989d35cefSDaniel Verkamp 		SPDK_ERRLOG("Get log page: len (%" PRIu64 ") > buf size (%u)\n",
267089d35cefSDaniel Verkamp 			    len, req->length);
267189d35cefSDaniel Verkamp 		response->status.sct = SPDK_NVME_SCT_GENERIC;
267289d35cefSDaniel Verkamp 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
267389d35cefSDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
267489d35cefSDaniel Verkamp 	}
267589d35cefSDaniel Verkamp 
26761fea1fccSChangpeng Liu 	lid = cmd->cdw10_bits.get_log_page.lid;
2677a9bdb1eeSJiewei Ke 	SPDK_DEBUGLOG(nvmf, "Get log page: LID=0x%02X offset=0x%" PRIx64 " len=0x%" PRIx64 " rae=%u\n",
2678a9bdb1eeSJiewei Ke 		      lid, offset, len, rae);
267989d35cefSDaniel Verkamp 
26807efdf905SSlawomir Ptak 	if (spdk_nvmf_subsystem_is_discovery(subsystem)) {
268189d35cefSDaniel Verkamp 		switch (lid) {
268289d35cefSDaniel Verkamp 		case SPDK_NVME_LOG_DISCOVERY:
26833d8904c6SAlexey Marchuk 			if (spdk_nvmf_qpair_get_listen_trid(req->qpair, &cmd_source_trid)) {
26843d8904c6SAlexey Marchuk 				SPDK_ERRLOG("Failed to get LOG_DISCOVERY source trid\n");
26853d8904c6SAlexey Marchuk 				response->status.sct = SPDK_NVME_SCT_GENERIC;
26863d8904c6SAlexey Marchuk 				response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
26873d8904c6SAlexey Marchuk 				return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
26883d8904c6SAlexey Marchuk 			}
26893d8904c6SAlexey Marchuk 			nvmf_get_discovery_log_page(subsystem->tgt, ctrlr->hostnqn, req->iov, req->iovcnt,
26903d8904c6SAlexey Marchuk 						    offset, len, &cmd_source_trid);
2691a9bdb1eeSJiewei Ke 			if (!rae) {
2692a9bdb1eeSJiewei Ke 				nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT);
2693a9bdb1eeSJiewei Ke 			}
269489d35cefSDaniel Verkamp 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
269589d35cefSDaniel Verkamp 		default:
269689d35cefSDaniel Verkamp 			goto invalid_log_page;
269789d35cefSDaniel Verkamp 		}
269889d35cefSDaniel Verkamp 	} else {
26997a2a588cSChangpeng Liu 		if (offset > len) {
27007a2a588cSChangpeng Liu 			SPDK_ERRLOG("Get log page: offset (%" PRIu64 ") > len (%" PRIu64 ")\n",
27017a2a588cSChangpeng Liu 				    offset, len);
27027a2a588cSChangpeng Liu 			response->status.sct = SPDK_NVME_SCT_GENERIC;
27037a2a588cSChangpeng Liu 			response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
27047a2a588cSChangpeng Liu 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
27057a2a588cSChangpeng Liu 		}
27067a2a588cSChangpeng Liu 
270789d35cefSDaniel Verkamp 		switch (lid) {
270889d35cefSDaniel Verkamp 		case SPDK_NVME_LOG_ERROR:
270986ad145bSChangpeng Liu 			nvmf_get_error_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae);
271086ad145bSChangpeng Liu 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
271189d35cefSDaniel Verkamp 		case SPDK_NVME_LOG_HEALTH_INFORMATION:
271289d35cefSDaniel Verkamp 			/* TODO: actually fill out log page data */
271389d35cefSDaniel Verkamp 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
2714ed6e83f9SDaniel Verkamp 		case SPDK_NVME_LOG_FIRMWARE_SLOT:
2715adc2942aSJiewei Ke 			nvmf_get_firmware_slot_log_page(req->iov, req->iovcnt, offset, len);
2716ed6e83f9SDaniel Verkamp 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
2717332e846fSMonica Kenguva 		case SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS:
27185e4e4bc4SBen Walker 			if (subsystem->flags.ana_reporting) {
2719568c5c3bSAlex Michon 				uint32_t rgo = cmd->cdw10_bits.get_log_page.lsp & 1;
2720568c5c3bSAlex Michon 				nvmf_get_ana_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae, rgo);
2721332e846fSMonica Kenguva 				return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
27226f226573SShuhei Matsumoto 			} else {
27236f226573SShuhei Matsumoto 				goto invalid_log_page;
27246f226573SShuhei Matsumoto 			}
2725c5cd53ccSparam 		case SPDK_NVME_LOG_COMMAND_EFFECTS_LOG:
27262aa7fa70SDennis Maisenbacher 			nvmf_get_cmds_and_effects_log_page(ctrlr, req->iov, req->iovcnt, offset, len);
2727c5cd53ccSparam 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
272826541489SDaniel Verkamp 		case SPDK_NVME_LOG_CHANGED_NS_LIST:
2729adc2942aSJiewei Ke 			nvmf_get_changed_ns_list_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae);
273026541489SDaniel Verkamp 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
2731468c6c18SChangpeng Liu 		case SPDK_NVME_LOG_RESERVATION_NOTIFICATION:
2732adc2942aSJiewei Ke 			nvmf_get_reservation_notification_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae);
2733468c6c18SChangpeng Liu 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
273489d35cefSDaniel Verkamp 		default:
273589d35cefSDaniel Verkamp 			goto invalid_log_page;
273689d35cefSDaniel Verkamp 		}
273789d35cefSDaniel Verkamp 	}
273889d35cefSDaniel Verkamp 
273989d35cefSDaniel Verkamp invalid_log_page:
27407d136a20SChangpeng Liu 	SPDK_INFOLOG(nvmf, "Unsupported Get Log Page 0x%02X\n", lid);
274189d35cefSDaniel Verkamp 	response->status.sct = SPDK_NVME_SCT_GENERIC;
274289d35cefSDaniel Verkamp 	response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
274389d35cefSDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
274489d35cefSDaniel Verkamp }
2745bf6caa75SDaniel Verkamp 
274628092d2fSDennis Maisenbacher static struct spdk_nvmf_ns *
274705859ef1SJim Harris _nvmf_ctrlr_get_ns_safe(struct spdk_nvmf_ctrlr *ctrlr,
274828092d2fSDennis Maisenbacher 			uint32_t nsid,
274928092d2fSDennis Maisenbacher 			struct spdk_nvme_cpl *rsp)
275028092d2fSDennis Maisenbacher {
275128092d2fSDennis Maisenbacher 	struct spdk_nvmf_ns *ns;
275205859ef1SJim Harris 	if (nsid == 0 || nsid > ctrlr->subsys->max_nsid) {
275328092d2fSDennis Maisenbacher 		SPDK_ERRLOG("Identify Namespace for invalid NSID %u\n", nsid);
275428092d2fSDennis Maisenbacher 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
275528092d2fSDennis Maisenbacher 		rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
275628092d2fSDennis Maisenbacher 		return NULL;
275728092d2fSDennis Maisenbacher 	}
275828092d2fSDennis Maisenbacher 
275905859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, nsid);
276028092d2fSDennis Maisenbacher 	if (ns == NULL || ns->bdev == NULL) {
276128092d2fSDennis Maisenbacher 		/*
276228092d2fSDennis Maisenbacher 		 * Inactive namespaces should return a zero filled data structure.
276328092d2fSDennis Maisenbacher 		 * The data buffer is already zeroed by nvmf_ctrlr_process_admin_cmd(),
276428092d2fSDennis Maisenbacher 		 * so we can just return early here.
276528092d2fSDennis Maisenbacher 		 */
276628092d2fSDennis Maisenbacher 		SPDK_DEBUGLOG(nvmf, "Identify Namespace for inactive NSID %u\n", nsid);
276728092d2fSDennis Maisenbacher 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
276828092d2fSDennis Maisenbacher 		rsp->status.sc = SPDK_NVME_SC_SUCCESS;
276928092d2fSDennis Maisenbacher 		return NULL;
277028092d2fSDennis Maisenbacher 	}
277128092d2fSDennis Maisenbacher 	return ns;
277228092d2fSDennis Maisenbacher }
277328092d2fSDennis Maisenbacher 
277405632afdSAtul Malakar static void
277505632afdSAtul Malakar nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr *ctrlr,
2776bf6caa75SDaniel Verkamp 		       struct spdk_nvme_cmd *cmd,
2777bf6caa75SDaniel Verkamp 		       struct spdk_nvme_cpl *rsp,
2778892c29f4SKonrad Sztyber 		       struct spdk_nvme_ns_data *nsdata,
2779892c29f4SKonrad Sztyber 		       uint32_t nsid)
2780bf6caa75SDaniel Verkamp {
2781ddb17216SPiotr Pelplinski 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
278214451d76SDaniel Verkamp 	struct spdk_nvmf_ns *ns;
27837bbeb80aSAnkit Kumar 	uint32_t max_num_blocks, format_index;
2784785d10b5SShuhei Matsumoto 	enum spdk_nvme_ana_state ana_state;
2785bf6caa75SDaniel Verkamp 
2786892c29f4SKonrad Sztyber 	ns = _nvmf_ctrlr_get_ns_safe(ctrlr, nsid, rsp);
278728092d2fSDennis Maisenbacher 	if (ns == NULL) {
278805632afdSAtul Malakar 		return;
2789bf6caa75SDaniel Verkamp 	}
2790bf6caa75SDaniel Verkamp 
27919cb21ad6SSeth Howell 	nvmf_bdev_ctrlr_identify_ns(ns, nsdata, ctrlr->dif_insert_or_strip);
2792ddda03efSPiotr Pelplinski 
2793478f6524SAlexey Marchuk 	assert(ctrlr->admin_qpair);
27947bbeb80aSAnkit Kumar 
27957bbeb80aSAnkit Kumar 	format_index = spdk_nvme_ns_get_format_index(nsdata);
27967bbeb80aSAnkit Kumar 
2797acca82acSPiotr Pelplinski 	/* Due to bug in the Linux kernel NVMe driver we have to set noiob no larger than mdts */
2798acca82acSPiotr Pelplinski 	max_num_blocks = ctrlr->admin_qpair->transport->opts.max_io_size /
27997bbeb80aSAnkit Kumar 			 (1U << nsdata->lbaf[format_index].lbads);
2800acca82acSPiotr Pelplinski 	if (nsdata->noiob > max_num_blocks) {
2801acca82acSPiotr Pelplinski 		nsdata->noiob = max_num_blocks;
2802acca82acSPiotr Pelplinski 	}
2803acca82acSPiotr Pelplinski 
280413e5be0bSSwapnil Ingle 	/* Set NOWS equal to Controller MDTS */
280513e5be0bSSwapnil Ingle 	if (nsdata->nsfeat.optperf) {
280613e5be0bSSwapnil Ingle 		nsdata->nows = max_num_blocks - 1;
280713e5be0bSSwapnil Ingle 	}
280813e5be0bSSwapnil Ingle 
28095e4e4bc4SBen Walker 	if (subsystem->flags.ana_reporting) {
2810785d10b5SShuhei Matsumoto 		assert(ns->anagrpid - 1 < subsystem->max_nsid);
281107bfc3cbSShuhei Matsumoto 		nsdata->anagrpid = ns->anagrpid;
281268f16817SShuhei Matsumoto 
28131c81d1afSKonrad Sztyber 		ana_state = nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid);
2814785d10b5SShuhei Matsumoto 		if (ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE ||
2815785d10b5SShuhei Matsumoto 		    ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE) {
281668f16817SShuhei Matsumoto 			nsdata->nuse = 0;
281768f16817SShuhei Matsumoto 		}
28186f226573SShuhei Matsumoto 	}
281905632afdSAtul Malakar }
282005632afdSAtul Malakar 
282105632afdSAtul Malakar int
282205632afdSAtul Malakar spdk_nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr *ctrlr,
282305632afdSAtul Malakar 			    struct spdk_nvme_cmd *cmd,
282405632afdSAtul Malakar 			    struct spdk_nvme_cpl *rsp,
282505632afdSAtul Malakar 			    struct spdk_nvme_ns_data *nsdata)
282605632afdSAtul Malakar {
2827892c29f4SKonrad Sztyber 	nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, nsdata, cmd->nsid);
2828332e846fSMonica Kenguva 
2829ddda03efSPiotr Pelplinski 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
2830bf6caa75SDaniel Verkamp }
2831bf6caa75SDaniel Verkamp 
2832e93902a0SMichael Haeuptle static void
283305632afdSAtul Malakar identify_ns_passthru_cb(struct spdk_nvmf_request *req)
283405632afdSAtul Malakar {
283505632afdSAtul Malakar 	struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req);
283605632afdSAtul Malakar 	struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
283705632afdSAtul Malakar 	struct spdk_nvme_cpl *rsp = spdk_nvmf_request_get_response(req);
283805632afdSAtul Malakar 	struct spdk_nvme_ns_data nvmf_nsdata = {};
283905632afdSAtul Malakar 	struct spdk_nvme_ns_data nvme_nsdata = {};
284005632afdSAtul Malakar 	size_t datalen;
284105632afdSAtul Malakar 
284205632afdSAtul Malakar 	/* This is the identify data from the NVMe drive */
284305632afdSAtul Malakar 	datalen = spdk_nvmf_request_copy_to_buf(req, &nvme_nsdata,
284405632afdSAtul Malakar 						sizeof(nvme_nsdata));
28451a158640SKonrad Sztyber 	nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, &nvmf_nsdata, req->orig_nsid);
284605632afdSAtul Malakar 
284705632afdSAtul Malakar 	/* Update fabric's namespace according to SSD's namespace */
284805632afdSAtul Malakar 	if (nvme_nsdata.nsfeat.optperf) {
284905632afdSAtul Malakar 		nvmf_nsdata.nsfeat.optperf = nvme_nsdata.nsfeat.optperf;
285005632afdSAtul Malakar 		nvmf_nsdata.npwg = nvme_nsdata.npwg;
285105632afdSAtul Malakar 		nvmf_nsdata.npwa = nvme_nsdata.npwa;
285205632afdSAtul Malakar 		nvmf_nsdata.npdg = nvme_nsdata.npdg;
285305632afdSAtul Malakar 		nvmf_nsdata.npda = nvme_nsdata.npda;
285405632afdSAtul Malakar 		nvmf_nsdata.nows = nvme_nsdata.nows;
285505632afdSAtul Malakar 	}
285605632afdSAtul Malakar 
285705632afdSAtul Malakar 	if (nvme_nsdata.nsfeat.ns_atomic_write_unit) {
285805632afdSAtul Malakar 		nvmf_nsdata.nsfeat.ns_atomic_write_unit = nvme_nsdata.nsfeat.ns_atomic_write_unit;
285905632afdSAtul Malakar 		nvmf_nsdata.nawun = nvme_nsdata.nawun;
286005632afdSAtul Malakar 		nvmf_nsdata.nawupf = nvme_nsdata.nawupf;
286105632afdSAtul Malakar 		nvmf_nsdata.nacwu = nvme_nsdata.nacwu;
286205632afdSAtul Malakar 	}
286305632afdSAtul Malakar 
286405632afdSAtul Malakar 	nvmf_nsdata.nabsn = nvme_nsdata.nabsn;
286505632afdSAtul Malakar 	nvmf_nsdata.nabo = nvme_nsdata.nabo;
286605632afdSAtul Malakar 	nvmf_nsdata.nabspf = nvme_nsdata.nabspf;
286705632afdSAtul Malakar 
286805632afdSAtul Malakar 	spdk_nvmf_request_copy_from_buf(req, &nvmf_nsdata, datalen);
286905632afdSAtul Malakar }
287005632afdSAtul Malakar 
287105632afdSAtul Malakar int
287205632afdSAtul Malakar spdk_nvmf_ctrlr_identify_ns_ext(struct spdk_nvmf_request *req)
287305632afdSAtul Malakar {
287405632afdSAtul Malakar 	struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req);
287505632afdSAtul Malakar 	struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
28761a158640SKonrad Sztyber 	struct spdk_nvmf_ns *ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
287705632afdSAtul Malakar 	struct spdk_nvme_cpl *rsp = spdk_nvmf_request_get_response(req);
287805632afdSAtul Malakar 	struct spdk_bdev *bdev;
287905632afdSAtul Malakar 	struct spdk_bdev_desc *desc;
288005632afdSAtul Malakar 	struct spdk_io_channel *ch;
288105632afdSAtul Malakar 	struct spdk_nvme_ns_data nsdata = {};
288205632afdSAtul Malakar 	struct spdk_iov_xfer ix;
288305632afdSAtul Malakar 	int rc;
288405632afdSAtul Malakar 
2885892c29f4SKonrad Sztyber 	nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, &nsdata, cmd->nsid);
288605632afdSAtul Malakar 
288705632afdSAtul Malakar 	rc = spdk_nvmf_request_get_bdev(cmd->nsid, req, &bdev, &desc, &ch);
288805632afdSAtul Malakar 	if (rc) {
288905632afdSAtul Malakar 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
289005632afdSAtul Malakar 	}
289105632afdSAtul Malakar 
289205632afdSAtul Malakar 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
289305632afdSAtul Malakar 		spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
289405632afdSAtul Malakar 		spdk_iov_xfer_from_buf(&ix, &nsdata, sizeof(nsdata));
289505632afdSAtul Malakar 
289605632afdSAtul Malakar 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
289705632afdSAtul Malakar 	}
289805632afdSAtul Malakar 
2899f220d590SKonrad Sztyber 	assert(ns->passthru_nsid != 0);
29001a158640SKonrad Sztyber 	req->orig_nsid = ns->nsid;
2901f220d590SKonrad Sztyber 	cmd->nsid = ns->passthru_nsid;
29021a158640SKonrad Sztyber 
290305632afdSAtul Malakar 	return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, identify_ns_passthru_cb);
290405632afdSAtul Malakar }
290505632afdSAtul Malakar 
290605632afdSAtul Malakar static void
2907e93902a0SMichael Haeuptle nvmf_ctrlr_populate_oacs(struct spdk_nvmf_ctrlr *ctrlr,
2908e93902a0SMichael Haeuptle 			 struct spdk_nvme_ctrlr_data *cdata)
2909e93902a0SMichael Haeuptle {
29107d8bbf00SJohn Levon 	cdata->oacs = ctrlr->cdata.oacs;
29117d8bbf00SJohn Levon 
2912e93902a0SMichael Haeuptle 	cdata->oacs.virtualization_management =
2913e93902a0SMichael Haeuptle 		g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT].hdlr != NULL;
2914e93902a0SMichael Haeuptle 	cdata->oacs.nvme_mi = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NVME_MI_SEND].hdlr != NULL
2915e93902a0SMichael Haeuptle 			      && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NVME_MI_RECEIVE].hdlr != NULL;
2916e93902a0SMichael Haeuptle 	cdata->oacs.directives = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DIRECTIVE_SEND].hdlr != NULL
2917e93902a0SMichael Haeuptle 				 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DIRECTIVE_RECEIVE].hdlr != NULL;
2918e93902a0SMichael Haeuptle 	cdata->oacs.device_self_test =
2919e93902a0SMichael Haeuptle 		g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DEVICE_SELF_TEST].hdlr != NULL;
2920e93902a0SMichael Haeuptle 	cdata->oacs.ns_manage = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NS_MANAGEMENT].hdlr != NULL
2921e93902a0SMichael Haeuptle 				&& g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NS_ATTACHMENT].hdlr != NULL;
2922e93902a0SMichael Haeuptle 	cdata->oacs.firmware = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD].hdlr !=
2923e93902a0SMichael Haeuptle 			       NULL
2924e93902a0SMichael Haeuptle 			       && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FIRMWARE_COMMIT].hdlr != NULL;
2925e93902a0SMichael Haeuptle 	cdata->oacs.format =
2926e93902a0SMichael Haeuptle 		g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FORMAT_NVM].hdlr != NULL;
2927e93902a0SMichael Haeuptle 	cdata->oacs.security = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_SECURITY_SEND].hdlr != NULL
2928e93902a0SMichael Haeuptle 			       && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_SECURITY_RECEIVE].hdlr != NULL;
2929e93902a0SMichael Haeuptle 	cdata->oacs.get_lba_status = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_GET_LBA_STATUS].hdlr !=
2930e93902a0SMichael Haeuptle 				     NULL;
2931e93902a0SMichael Haeuptle }
2932e93902a0SMichael Haeuptle 
2933d9afeb15SMichael Haeuptle int
2934bf6caa75SDaniel Verkamp spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata)
2935bf6caa75SDaniel Verkamp {
2936bf6caa75SDaniel Verkamp 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
2937478f6524SAlexey Marchuk 	struct spdk_nvmf_transport *transport;
2938bf6caa75SDaniel Verkamp 
2939bf6caa75SDaniel Verkamp 	/*
2940bf6caa75SDaniel Verkamp 	 * Common fields for discovery and NVM subsystems
2941bf6caa75SDaniel Verkamp 	 */
2942478f6524SAlexey Marchuk 	assert(ctrlr->admin_qpair);
2943478f6524SAlexey Marchuk 	transport = ctrlr->admin_qpair->transport;
2944bf6caa75SDaniel Verkamp 	spdk_strcpy_pad(cdata->fr, FW_VERSION, sizeof(cdata->fr), ' ');
29458e808490SJohn Barnard 	assert((transport->opts.max_io_size % 4096) == 0);
29468e808490SJohn Barnard 	cdata->mdts = spdk_u32log2(transport->opts.max_io_size / 4096);
2947bf6caa75SDaniel Verkamp 	cdata->cntlid = ctrlr->cntlid;
2948bf6caa75SDaniel Verkamp 	cdata->ver = ctrlr->vcprop.vs;
29496237da13SJacek Kalwas 	cdata->aerl = ctrlr->cdata.aerl;
2950bf6caa75SDaniel Verkamp 	cdata->lpa.edlp = 1;
2951bf6caa75SDaniel Verkamp 	cdata->elpe = 127;
29528e808490SJohn Barnard 	cdata->maxcmd = transport->opts.max_queue_depth;
2953000e6f5bSJacek Kalwas 	cdata->sgls = ctrlr->cdata.sgls;
2954cbfd581cSAlexis Lescouet 	cdata->fuses = ctrlr->cdata.fuses;
2955b2ee0bc1SJim Harris 	cdata->acwu = 0; /* ACWU is 0-based. */
29565e4e4bc4SBen Walker 	if (subsystem->flags.ana_reporting) {
2957332e846fSMonica Kenguva 		cdata->mnan = subsystem->max_nsid;
29586f226573SShuhei Matsumoto 	}
2959bf6caa75SDaniel Verkamp 	spdk_strcpy_pad(cdata->subnqn, subsystem->subnqn, sizeof(cdata->subnqn), '\0');
2960bf6caa75SDaniel Verkamp 
29612172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "ctrlr data: maxcmd 0x%x\n", cdata->maxcmd);
29622172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "sgls data: 0x%x\n", from_le32(&cdata->sgls));
2963bf6caa75SDaniel Verkamp 
2964fce94287SMadhu Adav MJ 
29657efdf905SSlawomir Ptak 	if (spdk_nvmf_subsystem_is_discovery(subsystem)) {
2966fce94287SMadhu Adav MJ 		/*
2967fce94287SMadhu Adav MJ 		 * NVM Discovery subsystem fields
2968fce94287SMadhu Adav MJ 		 */
2969fce94287SMadhu Adav MJ 		cdata->oaes.discovery_log_change_notices = 1;
29706ee9cd25Skyuho.son 		cdata->cntrltype = SPDK_NVME_CTRLR_DISCOVERY;
2971fce94287SMadhu Adav MJ 	} else {
2972b023e638SChangpeng Liu 		cdata->vid = ctrlr->cdata.vid;
2973b023e638SChangpeng Liu 		cdata->ssvid = ctrlr->cdata.ssvid;
2974b023e638SChangpeng Liu 		cdata->ieee[0] = ctrlr->cdata.ieee[0];
2975b023e638SChangpeng Liu 		cdata->ieee[1] = ctrlr->cdata.ieee[1];
2976b023e638SChangpeng Liu 		cdata->ieee[2] = ctrlr->cdata.ieee[2];
2977b023e638SChangpeng Liu 
2978bf6caa75SDaniel Verkamp 		/*
2979bf6caa75SDaniel Verkamp 		 * NVM subsystem fields (reserved for discovery subsystems)
2980bf6caa75SDaniel Verkamp 		 */
298114032a98SGregory Shapiro 		spdk_strcpy_pad(cdata->mn, spdk_nvmf_subsystem_get_mn(subsystem), sizeof(cdata->mn), ' ');
2982bf6caa75SDaniel Verkamp 		spdk_strcpy_pad(cdata->sn, spdk_nvmf_subsystem_get_sn(subsystem), sizeof(cdata->sn), ' ');
2983000e6f5bSJacek Kalwas 		cdata->kas = ctrlr->cdata.kas;
2984bf6caa75SDaniel Verkamp 
2985bf6caa75SDaniel Verkamp 		cdata->rab = 6;
29861ec2c81bSDaniel Verkamp 		cdata->cmic.multi_port = 1;
2987efbd101bSShuhei Matsumoto 		cdata->cmic.multi_ctrlr = 1;
29883165d0c0SDaniel Verkamp 		cdata->oaes.ns_attribute_notices = 1;
2989b4d406b7SAnkit Kumar 		cdata->ctratt.bits.host_id_exhid_supported = 1;
2990037d5165SAnkit Kumar 		cdata->ctratt.bits.fdps = ctrlr->subsys->fdp_supported;
29916ee9cd25Skyuho.son 		cdata->cntrltype = SPDK_NVME_CTRLR_IO;
29922a99dbc7SShuhei Matsumoto 		/* We do not have any actual limitation to the number of abort commands.
29932a99dbc7SShuhei Matsumoto 		 * We follow the recommendation by the NVMe specification.
29942a99dbc7SShuhei Matsumoto 		 */
29952a99dbc7SShuhei Matsumoto 		cdata->acl = NVMF_ABORT_COMMAND_LIMIT;
2996bf6caa75SDaniel Verkamp 		cdata->frmw.slot1_ro = 1;
2997bf6caa75SDaniel Verkamp 		cdata->frmw.num_slots = 1;
2998bf6caa75SDaniel Verkamp 
2999c5008b37SDaniel Verkamp 		cdata->lpa.celp = 1; /* Command Effects log page supported */
3000c5008b37SDaniel Verkamp 
3001bf6caa75SDaniel Verkamp 		cdata->sqes.min = 6;
3002bf6caa75SDaniel Verkamp 		cdata->sqes.max = 6;
3003bf6caa75SDaniel Verkamp 		cdata->cqes.min = 4;
3004bf6caa75SDaniel Verkamp 		cdata->cqes.max = 4;
300514451d76SDaniel Verkamp 		cdata->nn = subsystem->max_nsid;
3006bf6caa75SDaniel Verkamp 		cdata->vwc.present = 1;
3007d4ef57c0SDaniel Verkamp 		cdata->vwc.flush_broadcast = SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED;
3008bf6caa75SDaniel Verkamp 
3009000e6f5bSJacek Kalwas 		cdata->nvmf_specific = ctrlr->cdata.nvmf_specific;
3010bf6caa75SDaniel Verkamp 
301116c65744SAlexis Lescouet 		cdata->oncs.compare = ctrlr->cdata.oncs.compare;
30127d19bf23SKonrad Sztyber 		cdata->oncs.dsm = ctrlr->cdata.oncs.dsm && nvmf_ctrlr_dsm_supported(ctrlr);
30137d19bf23SKonrad Sztyber 		cdata->oncs.write_zeroes = ctrlr->cdata.oncs.write_zeroes &&
30147d19bf23SKonrad Sztyber 					   nvmf_ctrlr_write_zeroes_supported(ctrlr);
301508f25fb9SChangpeng Liu 		cdata->oncs.reservations = ctrlr->cdata.oncs.reservations;
301686136540SRui Chang 		cdata->oncs.copy = ctrlr->cdata.oncs.copy;
30178305e49bSEvgeniy Kochetov 		cdata->ocfs.copy_format0 = cdata->oncs.copy;
30185e4e4bc4SBen Walker 		if (subsystem->flags.ana_reporting) {
30197a9d089bSJon Kohler 			/* Asymmetric Namespace Access Reporting is supported. */
30207a9d089bSJon Kohler 			cdata->cmic.ana_reporting = 1;
30217a9d089bSJon Kohler 			cdata->oaes.ana_change_notices = 1;
30227a9d089bSJon Kohler 
30236206e468SShuhei Matsumoto 			cdata->anatt = ANA_TRANSITION_TIME_IN_SEC;
3024523f2a85SShuhei Matsumoto 			/* ANA Change state is not used, and ANA Persistent Loss state
3025523f2a85SShuhei Matsumoto 			 * is not supported for now.
3026523f2a85SShuhei Matsumoto 			 */
3027332e846fSMonica Kenguva 			cdata->anacap.ana_optimized_state = 1;
3028523f2a85SShuhei Matsumoto 			cdata->anacap.ana_non_optimized_state = 1;
3029523f2a85SShuhei Matsumoto 			cdata->anacap.ana_inaccessible_state = 1;
3030332e846fSMonica Kenguva 			/* ANAGRPID does not change while namespace is attached to controller */
3031332e846fSMonica Kenguva 			cdata->anacap.no_change_anagrpid = 1;
3032332e846fSMonica Kenguva 			cdata->anagrpmax = subsystem->max_nsid;
3033332e846fSMonica Kenguva 			cdata->nanagrpid = subsystem->max_nsid;
30346f226573SShuhei Matsumoto 		}
3035bf6caa75SDaniel Verkamp 
3036e93902a0SMichael Haeuptle 		nvmf_ctrlr_populate_oacs(ctrlr, cdata);
3037e93902a0SMichael Haeuptle 
3038b832f99fSyupeng 		assert(subsystem->tgt != NULL);
3039b832f99fSyupeng 		cdata->crdt[0] = subsystem->tgt->crdt[0];
3040b832f99fSyupeng 		cdata->crdt[1] = subsystem->tgt->crdt[1];
3041b832f99fSyupeng 		cdata->crdt[2] = subsystem->tgt->crdt[2];
30425cc56599Syupeng 
30432172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ioccsz 0x%x\n",
3044bf6caa75SDaniel Verkamp 			      cdata->nvmf_specific.ioccsz);
30452172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "ext ctrlr data: iorcsz 0x%x\n",
3046bf6caa75SDaniel Verkamp 			      cdata->nvmf_specific.iorcsz);
30472172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "ext ctrlr data: icdoff 0x%x\n",
3048bf6caa75SDaniel Verkamp 			      cdata->nvmf_specific.icdoff);
30492172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ctrattr 0x%x\n",
3050bf6caa75SDaniel Verkamp 			      *(uint8_t *)&cdata->nvmf_specific.ctrattr);
30512172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "ext ctrlr data: msdbd 0x%x\n",
3052bf6caa75SDaniel Verkamp 			      cdata->nvmf_specific.msdbd);
3053bf6caa75SDaniel Verkamp 	}
3054bf6caa75SDaniel Verkamp 
3055bf6caa75SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3056bf6caa75SDaniel Verkamp }
3057bf6caa75SDaniel Verkamp 
3058bf6caa75SDaniel Verkamp static int
3059a36785dfSDennis Maisenbacher nvmf_ns_identify_iocs_zns(struct spdk_nvmf_ns *ns,
3060a36785dfSDennis Maisenbacher 			  struct spdk_nvme_cmd *cmd,
3061a36785dfSDennis Maisenbacher 			  struct spdk_nvme_cpl *rsp,
3062a36785dfSDennis Maisenbacher 			  struct spdk_nvme_zns_ns_data *nsdata_zns)
3063a36785dfSDennis Maisenbacher {
3064a36785dfSDennis Maisenbacher 	nsdata_zns->zoc.variable_zone_capacity = 0;
3065a36785dfSDennis Maisenbacher 	nsdata_zns->zoc.zone_active_excursions = 0;
3066a36785dfSDennis Maisenbacher 	nsdata_zns->ozcs.read_across_zone_boundaries = 1;
3067a36785dfSDennis Maisenbacher 	/* Underflowing the zero based mar and mor bdev helper results in the correct
3068a36785dfSDennis Maisenbacher 	   value of FFFFFFFFh. */
3069a36785dfSDennis Maisenbacher 	nsdata_zns->mar = spdk_bdev_get_max_active_zones(ns->bdev) - 1;
3070a36785dfSDennis Maisenbacher 	nsdata_zns->mor = spdk_bdev_get_max_open_zones(ns->bdev) - 1;
3071a36785dfSDennis Maisenbacher 	nsdata_zns->rrl = 0;
3072a36785dfSDennis Maisenbacher 	nsdata_zns->frl = 0;
3073a36785dfSDennis Maisenbacher 	nsdata_zns->lbafe[0].zsze = spdk_bdev_get_zone_size(ns->bdev);
3074a36785dfSDennis Maisenbacher 
3075a36785dfSDennis Maisenbacher 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3076a36785dfSDennis Maisenbacher 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
3077a36785dfSDennis Maisenbacher 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3078a36785dfSDennis Maisenbacher }
3079a36785dfSDennis Maisenbacher 
30805e4d957eSShuhei Matsumoto static int
30815e4d957eSShuhei Matsumoto nvmf_ns_identify_iocs_nvm(struct spdk_nvmf_ns *ns,
30825e4d957eSShuhei Matsumoto 			  struct spdk_nvme_cpl *rsp,
30835e4d957eSShuhei Matsumoto 			  struct spdk_nvme_nvm_ns_data *nsdata_nvm)
30845e4d957eSShuhei Matsumoto {
30855e4d957eSShuhei Matsumoto 	nvmf_bdev_ctrlr_identify_iocs_nvm(ns, nsdata_nvm);
30865e4d957eSShuhei Matsumoto 
30875e4d957eSShuhei Matsumoto 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
30885e4d957eSShuhei Matsumoto 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
30895e4d957eSShuhei Matsumoto 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
30905e4d957eSShuhei Matsumoto }
30915e4d957eSShuhei Matsumoto 
3092a36785dfSDennis Maisenbacher int
3093a36785dfSDennis Maisenbacher spdk_nvmf_ns_identify_iocs_specific(struct spdk_nvmf_ctrlr *ctrlr,
3094a36785dfSDennis Maisenbacher 				    struct spdk_nvme_cmd *cmd,
3095a36785dfSDennis Maisenbacher 				    struct spdk_nvme_cpl *rsp,
3096a36785dfSDennis Maisenbacher 				    void *nsdata,
3097a36785dfSDennis Maisenbacher 				    size_t nsdata_size)
3098a36785dfSDennis Maisenbacher {
3099a36785dfSDennis Maisenbacher 	uint8_t csi = cmd->cdw11_bits.identify.csi;
310005859ef1SJim Harris 	struct spdk_nvmf_ns *ns = _nvmf_ctrlr_get_ns_safe(ctrlr, cmd->nsid, rsp);
3101a36785dfSDennis Maisenbacher 
3102a36785dfSDennis Maisenbacher 	memset(nsdata, 0, nsdata_size);
3103a36785dfSDennis Maisenbacher 
3104a36785dfSDennis Maisenbacher 	if (ns == NULL) {
3105a36785dfSDennis Maisenbacher 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3106a36785dfSDennis Maisenbacher 		rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
3107a36785dfSDennis Maisenbacher 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3108a36785dfSDennis Maisenbacher 	}
3109a36785dfSDennis Maisenbacher 
3110a36785dfSDennis Maisenbacher 	switch (csi) {
3111a36785dfSDennis Maisenbacher 	case SPDK_NVME_CSI_ZNS:
3112a36785dfSDennis Maisenbacher 		return nvmf_ns_identify_iocs_zns(ns, cmd, rsp, nsdata);
31135e4d957eSShuhei Matsumoto 	case SPDK_NVME_CSI_NVM:
31145e4d957eSShuhei Matsumoto 		if (!ctrlr->dif_insert_or_strip) {
31155e4d957eSShuhei Matsumoto 			return nvmf_ns_identify_iocs_nvm(ns, rsp, nsdata);
31165e4d957eSShuhei Matsumoto 		}
31175e4d957eSShuhei Matsumoto 		break;
3118a36785dfSDennis Maisenbacher 	default:
3119a36785dfSDennis Maisenbacher 		break;
3120a36785dfSDennis Maisenbacher 	}
3121a36785dfSDennis Maisenbacher 
3122a36785dfSDennis Maisenbacher 	SPDK_DEBUGLOG(nvmf,
3123a36785dfSDennis Maisenbacher 		      "Returning zero filled struct for the iocs specific ns "
3124a36785dfSDennis Maisenbacher 		      "identify command and CSI 0x%02x\n",
3125a36785dfSDennis Maisenbacher 		      csi);
3126a36785dfSDennis Maisenbacher 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3127a36785dfSDennis Maisenbacher 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
3128a36785dfSDennis Maisenbacher 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3129a36785dfSDennis Maisenbacher }
3130a36785dfSDennis Maisenbacher 
3131a36785dfSDennis Maisenbacher static int
3132c7feb85dSHaoqian He nvmf_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ctrlr *ctrlr,
3133c7feb85dSHaoqian He 			     struct spdk_nvme_cmd *cmd,
3134c7feb85dSHaoqian He 			     struct spdk_nvme_cpl *rsp,
3135c7feb85dSHaoqian He 			     struct spdk_nvme_nvm_ctrlr_data *cdata_nvm)
3136c7feb85dSHaoqian He {
3137c7feb85dSHaoqian He 	/* The unit of max_write_zeroes_size_kib is KiB.
3138c7feb85dSHaoqian He 	 * The unit of wzsl is the minimum memory page size(2 ^ (12 + CAP.MPSMIN) bytes)
3139c7feb85dSHaoqian He 	 * and is reported as a power of two (2^n).
3140c7feb85dSHaoqian He 	 */
3141c7feb85dSHaoqian He 	cdata_nvm->wzsl = spdk_u64log2(ctrlr->subsys->max_write_zeroes_size_kib >>
3142c7feb85dSHaoqian He 				       (2 + ctrlr->vcprop.cap.bits.mpsmin));
3143c7feb85dSHaoqian He 
3144c7feb85dSHaoqian He 	/* The unit of max_discard_size_kib is KiB.
3145c7feb85dSHaoqian He 	 * The dmrsl indicates the maximum number of logical blocks for
3146c7feb85dSHaoqian He 	 * dataset management command.
3147c7feb85dSHaoqian He 	 */
3148c7feb85dSHaoqian He 	cdata_nvm->dmrsl = ctrlr->subsys->max_discard_size_kib << 1;
3149c7feb85dSHaoqian He 	cdata_nvm->dmrl = 1;
3150c7feb85dSHaoqian He 
3151c7feb85dSHaoqian He 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3152c7feb85dSHaoqian He 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
3153c7feb85dSHaoqian He 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3154c7feb85dSHaoqian He }
3155c7feb85dSHaoqian He 
3156c7feb85dSHaoqian He static int
3157a36785dfSDennis Maisenbacher nvmf_ctrlr_identify_iocs_zns(struct spdk_nvmf_ctrlr *ctrlr,
3158a36785dfSDennis Maisenbacher 			     struct spdk_nvme_cmd *cmd,
3159a36785dfSDennis Maisenbacher 			     struct spdk_nvme_cpl *rsp,
3160a36785dfSDennis Maisenbacher 			     struct spdk_nvme_zns_ctrlr_data *cdata_zns)
3161a36785dfSDennis Maisenbacher {
3162a36785dfSDennis Maisenbacher 	/* The unit of max_zone_append_size_kib is KiB.
3163a36785dfSDennis Maisenbacher 	The unit of zasl is the minimum memory page size
3164a36785dfSDennis Maisenbacher 	(2 ^ (12 + CAP.MPSMIN) KiB)
3165a36785dfSDennis Maisenbacher 	and is reported as a power of two (2^n). */
3166a36785dfSDennis Maisenbacher 	cdata_zns->zasl = spdk_u64log2(ctrlr->subsys->max_zone_append_size_kib >>
3167a36785dfSDennis Maisenbacher 				       (12 + ctrlr->vcprop.cap.bits.mpsmin));
3168a36785dfSDennis Maisenbacher 
3169a36785dfSDennis Maisenbacher 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3170a36785dfSDennis Maisenbacher 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
3171a36785dfSDennis Maisenbacher 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3172a36785dfSDennis Maisenbacher }
3173a36785dfSDennis Maisenbacher 
3174a36785dfSDennis Maisenbacher int
3175a36785dfSDennis Maisenbacher spdk_nvmf_ctrlr_identify_iocs_specific(struct spdk_nvmf_ctrlr *ctrlr,
3176a36785dfSDennis Maisenbacher 				       struct spdk_nvme_cmd *cmd,
3177a36785dfSDennis Maisenbacher 				       struct spdk_nvme_cpl *rsp,
3178a36785dfSDennis Maisenbacher 				       void *cdata,
3179a36785dfSDennis Maisenbacher 				       size_t cdata_size)
3180a36785dfSDennis Maisenbacher {
3181a36785dfSDennis Maisenbacher 	uint8_t csi = cmd->cdw11_bits.identify.csi;
3182a36785dfSDennis Maisenbacher 
3183a36785dfSDennis Maisenbacher 	memset(cdata, 0, cdata_size);
3184a36785dfSDennis Maisenbacher 
3185a36785dfSDennis Maisenbacher 	switch (csi) {
3186c7feb85dSHaoqian He 	case SPDK_NVME_CSI_NVM:
3187c7feb85dSHaoqian He 		return nvmf_ctrlr_identify_iocs_nvm(ctrlr, cmd, rsp, cdata);
3188a36785dfSDennis Maisenbacher 	case SPDK_NVME_CSI_ZNS:
3189a36785dfSDennis Maisenbacher 		return nvmf_ctrlr_identify_iocs_zns(ctrlr, cmd, rsp, cdata);
3190a36785dfSDennis Maisenbacher 	default:
3191a36785dfSDennis Maisenbacher 		break;
3192a36785dfSDennis Maisenbacher 	}
3193a36785dfSDennis Maisenbacher 
3194a36785dfSDennis Maisenbacher 	SPDK_DEBUGLOG(nvmf,
3195a36785dfSDennis Maisenbacher 		      "Returning zero filled struct for the iocs specific ctrlr "
3196a36785dfSDennis Maisenbacher 		      "identify command and CSI 0x%02x\n",
3197a36785dfSDennis Maisenbacher 		      csi);
3198a36785dfSDennis Maisenbacher 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3199a36785dfSDennis Maisenbacher 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
3200a36785dfSDennis Maisenbacher 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3201a36785dfSDennis Maisenbacher }
3202a36785dfSDennis Maisenbacher 
3203a36785dfSDennis Maisenbacher static int
320405859ef1SJim Harris nvmf_ctrlr_identify_active_ns_list(struct spdk_nvmf_ctrlr *ctrlr,
3205bf6caa75SDaniel Verkamp 				   struct spdk_nvme_cmd *cmd,
3206bf6caa75SDaniel Verkamp 				   struct spdk_nvme_cpl *rsp,
3207bf6caa75SDaniel Verkamp 				   struct spdk_nvme_ns_list *ns_list)
3208bf6caa75SDaniel Verkamp {
320905859ef1SJim Harris 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
321014451d76SDaniel Verkamp 	struct spdk_nvmf_ns *ns;
321114451d76SDaniel Verkamp 	uint32_t count = 0;
3212bf6caa75SDaniel Verkamp 
3213bf6caa75SDaniel Verkamp 	if (cmd->nsid >= 0xfffffffeUL) {
3214bf6caa75SDaniel Verkamp 		SPDK_ERRLOG("Identify Active Namespace List with invalid NSID %u\n", cmd->nsid);
3215bf6caa75SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
3216bf6caa75SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3217bf6caa75SDaniel Verkamp 	}
3218bf6caa75SDaniel Verkamp 
321924a8c04aSJim Harris 	memset(ns_list, 0, sizeof(*ns_list));
322024a8c04aSJim Harris 
322114451d76SDaniel Verkamp 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
322214451d76SDaniel Verkamp 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
3223d37555b4SJonas Pfefferle 		if (ns->opts.nsid <= cmd->nsid || !nvmf_ctrlr_ns_is_visible(ctrlr, ns->opts.nsid)) {
322414451d76SDaniel Verkamp 			continue;
322514451d76SDaniel Verkamp 		}
3226bf6caa75SDaniel Verkamp 
3227250d342bSDaniel Verkamp 		ns_list->ns_list[count++] = ns->opts.nsid;
3228bf6caa75SDaniel Verkamp 		if (count == SPDK_COUNTOF(ns_list->ns_list)) {
3229bf6caa75SDaniel Verkamp 			break;
3230bf6caa75SDaniel Verkamp 		}
3231bf6caa75SDaniel Verkamp 	}
3232bf6caa75SDaniel Verkamp 
3233bf6caa75SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3234bf6caa75SDaniel Verkamp }
3235bf6caa75SDaniel Verkamp 
32365c2952abSDaniel Verkamp static void
32375c2952abSDaniel Verkamp _add_ns_id_desc(void **buf_ptr, size_t *buf_remain,
32385c2952abSDaniel Verkamp 		enum spdk_nvme_nidt type,
32395c2952abSDaniel Verkamp 		const void *data, size_t data_size)
32405c2952abSDaniel Verkamp {
32415c2952abSDaniel Verkamp 	struct spdk_nvme_ns_id_desc *desc;
32425c2952abSDaniel Verkamp 	size_t desc_size = sizeof(*desc) + data_size;
32435c2952abSDaniel Verkamp 
32445c2952abSDaniel Verkamp 	/*
32455c2952abSDaniel Verkamp 	 * These should never fail in practice, since all valid NS ID descriptors
32465c2952abSDaniel Verkamp 	 * should be defined so that they fit in the available 4096-byte buffer.
32475c2952abSDaniel Verkamp 	 */
32485c2952abSDaniel Verkamp 	assert(data_size > 0);
32495c2952abSDaniel Verkamp 	assert(data_size <= UINT8_MAX);
32505c2952abSDaniel Verkamp 	assert(desc_size < *buf_remain);
32515c2952abSDaniel Verkamp 	if (data_size == 0 || data_size > UINT8_MAX || desc_size > *buf_remain) {
32525c2952abSDaniel Verkamp 		return;
32535c2952abSDaniel Verkamp 	}
32545c2952abSDaniel Verkamp 
32555c2952abSDaniel Verkamp 	desc = *buf_ptr;
32565c2952abSDaniel Verkamp 	desc->nidt = type;
32575c2952abSDaniel Verkamp 	desc->nidl = data_size;
32585c2952abSDaniel Verkamp 	memcpy(desc->nid, data, data_size);
32595c2952abSDaniel Verkamp 
32605c2952abSDaniel Verkamp 	*buf_ptr += desc_size;
32615c2952abSDaniel Verkamp 	*buf_remain -= desc_size;
32625c2952abSDaniel Verkamp }
32635c2952abSDaniel Verkamp 
32645c2952abSDaniel Verkamp static int
3265198fd2ceSSeth Howell nvmf_ctrlr_identify_ns_id_descriptor_list(
326605859ef1SJim Harris 	struct spdk_nvmf_ctrlr *ctrlr,
32675c2952abSDaniel Verkamp 	struct spdk_nvme_cmd *cmd,
32685c2952abSDaniel Verkamp 	struct spdk_nvme_cpl *rsp,
32695c2952abSDaniel Verkamp 	void *id_desc_list, size_t id_desc_list_size)
32705c2952abSDaniel Verkamp {
32715c2952abSDaniel Verkamp 	struct spdk_nvmf_ns *ns;
32725c2952abSDaniel Verkamp 	size_t buf_remain = id_desc_list_size;
32735c2952abSDaniel Verkamp 	void *buf_ptr = id_desc_list;
32745c2952abSDaniel Verkamp 
327505859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
32765c2952abSDaniel Verkamp 	if (ns == NULL || ns->bdev == NULL) {
32775c2952abSDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
32785c2952abSDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
32795c2952abSDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
32805c2952abSDaniel Verkamp 	}
32815c2952abSDaniel Verkamp 
3282a05f88ffSDaniel Verkamp #define ADD_ID_DESC(type, data, size) \
32835c2952abSDaniel Verkamp 	do { \
3284a05f88ffSDaniel Verkamp 		if (!spdk_mem_all_zero(data, size)) { \
3285a05f88ffSDaniel Verkamp 			_add_ns_id_desc(&buf_ptr, &buf_remain, type, data, size); \
32865c2952abSDaniel Verkamp 		} \
32875c2952abSDaniel Verkamp 	} while (0)
32885c2952abSDaniel Verkamp 
3289a05f88ffSDaniel Verkamp 	ADD_ID_DESC(SPDK_NVME_NIDT_EUI64, ns->opts.eui64, sizeof(ns->opts.eui64));
3290a05f88ffSDaniel Verkamp 	ADD_ID_DESC(SPDK_NVME_NIDT_NGUID, ns->opts.nguid, sizeof(ns->opts.nguid));
3291a05f88ffSDaniel Verkamp 	ADD_ID_DESC(SPDK_NVME_NIDT_UUID, &ns->opts.uuid, sizeof(ns->opts.uuid));
3292a36785dfSDennis Maisenbacher 	ADD_ID_DESC(SPDK_NVME_NIDT_CSI, &ns->csi, sizeof(uint8_t));
32935c2952abSDaniel Verkamp 
32945c2952abSDaniel Verkamp 	/*
3295acc4d176SJohn Levon 	 * The list is automatically 0-terminated, both in the temporary buffer
3296acc4d176SJohn Levon 	 * used by nvmf_ctrlr_identify(), and the eventual iov destination -
3297acc4d176SJohn Levon 	 * controller to host buffers in admin commands always get zeroed in
3298acc4d176SJohn Levon 	 * nvmf_ctrlr_process_admin_cmd().
32995c2952abSDaniel Verkamp 	 */
33005c2952abSDaniel Verkamp 
33015c2952abSDaniel Verkamp #undef ADD_ID_DESC
33025c2952abSDaniel Verkamp 
33035c2952abSDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
33045c2952abSDaniel Verkamp }
33055c2952abSDaniel Verkamp 
33065b4b66baSDaniel Verkamp static int
33075fbe6ef9SJim Harris nvmf_ctrlr_identify_iocs(struct spdk_nvmf_ctrlr *ctrlr,
33085fbe6ef9SJim Harris 			 struct spdk_nvme_cmd *cmd,
33095fbe6ef9SJim Harris 			 struct spdk_nvme_cpl *rsp,
33105fbe6ef9SJim Harris 			 void *cdata, size_t cdata_size)
33115fbe6ef9SJim Harris {
33125fbe6ef9SJim Harris 	struct spdk_nvme_iocs_vector *vector;
33135fbe6ef9SJim Harris 	struct spdk_nvmf_ns *ns;
33145fbe6ef9SJim Harris 
33155fbe6ef9SJim Harris 	if (cdata_size < sizeof(struct spdk_nvme_iocs_vector)) {
33165fbe6ef9SJim Harris 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
33175fbe6ef9SJim Harris 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
33185fbe6ef9SJim Harris 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
33195fbe6ef9SJim Harris 	}
33205fbe6ef9SJim Harris 
33215fbe6ef9SJim Harris 	/* For now we only support this command sent to the current
33225fbe6ef9SJim Harris 	 * controller.
33235fbe6ef9SJim Harris 	 */
33245fbe6ef9SJim Harris 	if (cmd->cdw10_bits.identify.cntid != 0xFFFF &&
33255fbe6ef9SJim Harris 	    cmd->cdw10_bits.identify.cntid != ctrlr->cntlid) {
33265fbe6ef9SJim Harris 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
33275fbe6ef9SJim Harris 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
33285fbe6ef9SJim Harris 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
33295fbe6ef9SJim Harris 	}
33305fbe6ef9SJim Harris 	memset(cdata, 0, cdata_size);
33315fbe6ef9SJim Harris 
33325fbe6ef9SJim Harris 	vector = cdata;
33335fbe6ef9SJim Harris 	vector->nvm = 1;
33345fbe6ef9SJim Harris 	for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL;
33355fbe6ef9SJim Harris 	     ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) {
33365fbe6ef9SJim Harris 		if (ns->bdev == NULL) {
33375fbe6ef9SJim Harris 			continue;
33385fbe6ef9SJim Harris 		}
33395fbe6ef9SJim Harris 		if (spdk_bdev_is_zoned(ns->bdev)) {
33405fbe6ef9SJim Harris 			vector->zns = 1;
33415fbe6ef9SJim Harris 		}
33425fbe6ef9SJim Harris 	}
33435fbe6ef9SJim Harris 
33445fbe6ef9SJim Harris 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
33455fbe6ef9SJim Harris 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
33465fbe6ef9SJim Harris 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
33475fbe6ef9SJim Harris }
33485fbe6ef9SJim Harris 
33495fbe6ef9SJim Harris static int
3350198fd2ceSSeth Howell nvmf_ctrlr_identify(struct spdk_nvmf_request *req)
3351bf6caa75SDaniel Verkamp {
3352bf6caa75SDaniel Verkamp 	uint8_t cns;
3353bf6caa75SDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
3354bf6caa75SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
3355bf6caa75SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
3356bf6caa75SDaniel Verkamp 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
3357acc4d176SJohn Levon 	int ret = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3358acc4d176SJohn Levon 	char tmpbuf[SPDK_NVME_IDENTIFY_BUFLEN] = "";
3359ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
3360bf6caa75SDaniel Verkamp 
3361ecc80dfcSJohn Levon 	if (req->iovcnt < 1 || req->length < SPDK_NVME_IDENTIFY_BUFLEN) {
33629cb78e1aSJim Harris 		SPDK_DEBUGLOG(nvmf, "identify command with invalid buffer\n");
3363bf6caa75SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3364bf6caa75SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
3365acc4d176SJohn Levon 		return ret;
3366bf6caa75SDaniel Verkamp 	}
3367bf6caa75SDaniel Verkamp 
33681fea1fccSChangpeng Liu 	cns = cmd->cdw10_bits.identify.cns;
3369bf6caa75SDaniel Verkamp 
33707efdf905SSlawomir Ptak 	if (spdk_nvmf_subsystem_is_discovery(subsystem) &&
3371bf6caa75SDaniel Verkamp 	    cns != SPDK_NVME_IDENTIFY_CTRLR) {
3372bf6caa75SDaniel Verkamp 		/* Discovery controllers only support Identify Controller */
3373bf6caa75SDaniel Verkamp 		goto invalid_cns;
3374bf6caa75SDaniel Verkamp 	}
3375bf6caa75SDaniel Verkamp 
3376acc4d176SJohn Levon 	/*
3377acc4d176SJohn Levon 	 * We must use a temporary buffer: it's entirely possible the out buffer
3378acc4d176SJohn Levon 	 * is split across more than one IOV.
3379acc4d176SJohn Levon 	 */
3380ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
3381acc4d176SJohn Levon 
3382a36785dfSDennis Maisenbacher 	SPDK_DEBUGLOG(nvmf, "Received identify command with CNS 0x%02x\n", cns);
3383a36785dfSDennis Maisenbacher 
3384bf6caa75SDaniel Verkamp 	switch (cns) {
3385bf6caa75SDaniel Verkamp 	case SPDK_NVME_IDENTIFY_NS:
338605632afdSAtul Malakar 		/* Function below can be asynchronous & we always need to have the data in request's buffer
338705632afdSAtul Malakar 		 * So just return here */
338805632afdSAtul Malakar 		return spdk_nvmf_ctrlr_identify_ns_ext(req);
3389bf6caa75SDaniel Verkamp 	case SPDK_NVME_IDENTIFY_CTRLR:
3390acc4d176SJohn Levon 		ret = spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, (void *)&tmpbuf);
3391acc4d176SJohn Levon 		break;
3392bf6caa75SDaniel Verkamp 	case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST:
339305859ef1SJim Harris 		ret = nvmf_ctrlr_identify_active_ns_list(ctrlr, cmd, rsp, (void *)&tmpbuf);
3394acc4d176SJohn Levon 		break;
33955c2952abSDaniel Verkamp 	case SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST:
339605859ef1SJim Harris 		ret = nvmf_ctrlr_identify_ns_id_descriptor_list(ctrlr, cmd, rsp,
3397acc4d176SJohn Levon 				tmpbuf, req->length);
3398acc4d176SJohn Levon 		break;
3399a36785dfSDennis Maisenbacher 	case SPDK_NVME_IDENTIFY_NS_IOCS:
3400a36785dfSDennis Maisenbacher 		ret = spdk_nvmf_ns_identify_iocs_specific(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length);
3401a36785dfSDennis Maisenbacher 		break;
3402a36785dfSDennis Maisenbacher 	case SPDK_NVME_IDENTIFY_CTRLR_IOCS:
3403a36785dfSDennis Maisenbacher 		ret = spdk_nvmf_ctrlr_identify_iocs_specific(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length);
3404a36785dfSDennis Maisenbacher 		break;
34055fbe6ef9SJim Harris 	case SPDK_NVME_IDENTIFY_IOCS:
34065fbe6ef9SJim Harris 		ret = nvmf_ctrlr_identify_iocs(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length);
34075fbe6ef9SJim Harris 		break;
3408bf6caa75SDaniel Verkamp 	default:
3409bf6caa75SDaniel Verkamp 		goto invalid_cns;
3410bf6caa75SDaniel Verkamp 	}
3411bf6caa75SDaniel Verkamp 
3412acc4d176SJohn Levon 	if (ret == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
3413ecc80dfcSJohn Levon 		spdk_iov_xfer_from_buf(&ix, tmpbuf, sizeof(tmpbuf));
3414acc4d176SJohn Levon 	}
3415acc4d176SJohn Levon 
3416acc4d176SJohn Levon 	return ret;
3417acc4d176SJohn Levon 
3418bf6caa75SDaniel Verkamp invalid_cns:
3419478c0fa8SJohn Levon 	SPDK_DEBUGLOG(nvmf, "Identify command with unsupported CNS 0x%02x\n", cns);
3420bf6caa75SDaniel Verkamp 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3421bf6caa75SDaniel Verkamp 	rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
3422acc4d176SJohn Levon 	return ret;
3423bf6caa75SDaniel Verkamp }
34245b4b66baSDaniel Verkamp 
3425315d47daSShuhei Matsumoto static bool
3426315d47daSShuhei Matsumoto nvmf_qpair_abort_aer(struct spdk_nvmf_qpair *qpair, uint16_t cid)
34277d87273fSBen Walker {
34287d87273fSBen Walker 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
3429315d47daSShuhei Matsumoto 	struct spdk_nvmf_request *req;
3430b7cc4dd7SJin Yu 	int i;
34317d87273fSBen Walker 
3432315d47daSShuhei Matsumoto 	if (!nvmf_qpair_is_admin_queue(qpair)) {
3433315d47daSShuhei Matsumoto 		return false;
3434315d47daSShuhei Matsumoto 	}
3435315d47daSShuhei Matsumoto 
3436fcc426bdSJacek Kalwas 	assert(spdk_get_thread() == ctrlr->thread);
3437fcc426bdSJacek Kalwas 
3438b7cc4dd7SJin Yu 	for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
3439b7cc4dd7SJin Yu 		if (ctrlr->aer_req[i]->cmd->nvme_cmd.cid == cid) {
34402172c432STomasz Zawadzki 			SPDK_DEBUGLOG(nvmf, "Aborting AER request\n");
3441b7cc4dd7SJin Yu 			req = ctrlr->aer_req[i];
3442b7cc4dd7SJin Yu 			ctrlr->aer_req[i] = NULL;
3443b7cc4dd7SJin Yu 			ctrlr->nr_aer_reqs--;
3444b7cc4dd7SJin Yu 
3445315d47daSShuhei Matsumoto 			/* Move the last req to the aborting position for making aer_reqs
3446315d47daSShuhei Matsumoto 			 * in continuous
3447315d47daSShuhei Matsumoto 			 */
3448b7cc4dd7SJin Yu 			if (i < ctrlr->nr_aer_reqs) {
3449b7cc4dd7SJin Yu 				ctrlr->aer_req[i] = ctrlr->aer_req[ctrlr->nr_aer_reqs];
3450b7cc4dd7SJin Yu 				ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL;
34517d87273fSBen Walker 			}
3452315d47daSShuhei Matsumoto 
3453315d47daSShuhei Matsumoto 			req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3454315d47daSShuhei Matsumoto 			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
3455315d47daSShuhei Matsumoto 			_nvmf_request_complete(req);
3456315d47daSShuhei Matsumoto 			return true;
3457315d47daSShuhei Matsumoto 		}
34587d87273fSBen Walker 	}
34597d87273fSBen Walker 
3460315d47daSShuhei Matsumoto 	return false;
34617d87273fSBen Walker }
34627d87273fSBen Walker 
3463a50a70ecSKonrad Sztyber void
3464a50a70ecSKonrad Sztyber nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair)
3465a50a70ecSKonrad Sztyber {
3466a50a70ecSKonrad Sztyber 	struct spdk_nvmf_request *req, *tmp;
3467a50a70ecSKonrad Sztyber 
3468a50a70ecSKonrad Sztyber 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding, link, tmp) {
3469a50a70ecSKonrad Sztyber 		if (req->zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE) {
3470a50a70ecSKonrad Sztyber 			/* Zero-copy requests are kept on the outstanding queue from the moment
3471a50a70ecSKonrad Sztyber 			 * zcopy_start is sent until a zcopy_end callback is received.  Therefore,
3472a50a70ecSKonrad Sztyber 			 * we can't remove them from the outstanding queue here, but need to rely on
3473a50a70ecSKonrad Sztyber 			 * the transport to do a zcopy_end to release their buffers and, in turn,
3474a50a70ecSKonrad Sztyber 			 * remove them from the queue.
3475a50a70ecSKonrad Sztyber 			 */
3476a50a70ecSKonrad Sztyber 			req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3477a50a70ecSKonrad Sztyber 			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
3478a50a70ecSKonrad Sztyber 			nvmf_transport_req_free(req);
3479a50a70ecSKonrad Sztyber 		}
3480a50a70ecSKonrad Sztyber 	}
3481a50a70ecSKonrad Sztyber }
3482a50a70ecSKonrad Sztyber 
34837d87273fSBen Walker static void
3484f045d924SShuhei Matsumoto nvmf_qpair_abort_request(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)
3485f045d924SShuhei Matsumoto {
3486f045d924SShuhei Matsumoto 	uint16_t cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;
3487f045d924SShuhei Matsumoto 
3488f045d924SShuhei Matsumoto 	if (nvmf_qpair_abort_aer(qpair, cid)) {
34892172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "abort ctrlr=%p sqid=%u cid=%u successful\n",
3490f045d924SShuhei Matsumoto 			      qpair->ctrlr, qpair->qid, cid);
3491f045d924SShuhei Matsumoto 		req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command successfully aborted */
3492f045d924SShuhei Matsumoto 
3493f045d924SShuhei Matsumoto 		spdk_nvmf_request_complete(req);
3494f045d924SShuhei Matsumoto 		return;
3495f045d924SShuhei Matsumoto 	}
3496f045d924SShuhei Matsumoto 
3497604b4503SShuhei Matsumoto 	nvmf_transport_qpair_abort_request(qpair, req);
3498f045d924SShuhei Matsumoto }
3499f045d924SShuhei Matsumoto 
3500f045d924SShuhei Matsumoto static void
3501198fd2ceSSeth Howell nvmf_ctrlr_abort_done(struct spdk_io_channel_iter *i, int status)
35027d87273fSBen Walker {
35037d87273fSBen Walker 	struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i);
35047d87273fSBen Walker 
3505f045d924SShuhei Matsumoto 	if (status == 0) {
3506f045d924SShuhei Matsumoto 		/* There was no qpair whose ID matches SQID of the abort command.
3507f045d924SShuhei Matsumoto 		 * Hence call _nvmf_request_complete() here.
3508f045d924SShuhei Matsumoto 		 */
3509db96437eSShuhei Matsumoto 		_nvmf_request_complete(req);
35107d87273fSBen Walker 	}
3511f045d924SShuhei Matsumoto }
35127d87273fSBen Walker 
35137d87273fSBen Walker static void
3514198fd2ceSSeth Howell nvmf_ctrlr_abort_on_pg(struct spdk_io_channel_iter *i)
35157d87273fSBen Walker {
35167d87273fSBen Walker 	struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i);
35177d87273fSBen Walker 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
35187d87273fSBen Walker 	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
3519f045d924SShuhei Matsumoto 	uint16_t sqid = req->cmd->nvme_cmd.cdw10_bits.abort.sqid;
35207d87273fSBen Walker 	struct spdk_nvmf_qpair *qpair;
35217d87273fSBen Walker 
35227d87273fSBen Walker 	TAILQ_FOREACH(qpair, &group->qpairs, link) {
35237d87273fSBen Walker 		if (qpair->ctrlr == req->qpair->ctrlr && qpair->qid == sqid) {
35247d87273fSBen Walker 			/* Found the qpair */
35257d87273fSBen Walker 
3526f045d924SShuhei Matsumoto 			nvmf_qpair_abort_request(qpair, req);
35277d87273fSBen Walker 
35287d87273fSBen Walker 			/* Return -1 for the status so the iteration across threads stops. */
35297d87273fSBen Walker 			spdk_for_each_channel_continue(i, -1);
35309975d4a1SShuhei Matsumoto 			return;
35317d87273fSBen Walker 		}
35327d87273fSBen Walker 	}
35337d87273fSBen Walker 
35347d87273fSBen Walker 	spdk_for_each_channel_continue(i, 0);
35357d87273fSBen Walker }
35367d87273fSBen Walker 
353735efde74SDaniel Verkamp static int
3538198fd2ceSSeth Howell nvmf_ctrlr_abort(struct spdk_nvmf_request *req)
353935efde74SDaniel Verkamp {
354035efde74SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
354135efde74SDaniel Verkamp 
35429975d4a1SShuhei Matsumoto 	rsp->cdw0 = 1U; /* Command not aborted */
35439975d4a1SShuhei Matsumoto 	rsp->status.sct = SPDK_NVME_SCT_GENERIC;
35449975d4a1SShuhei Matsumoto 	rsp->status.sc = SPDK_NVME_SC_SUCCESS;
35457d87273fSBen Walker 
35467d87273fSBen Walker 	/* Send a message to each poll group, searching for this ctrlr, sqid, and command. */
35477d87273fSBen Walker 	spdk_for_each_channel(req->qpair->ctrlr->subsys->tgt,
3548198fd2ceSSeth Howell 			      nvmf_ctrlr_abort_on_pg,
35497d87273fSBen Walker 			      req,
3550198fd2ceSSeth Howell 			      nvmf_ctrlr_abort_done
35517d87273fSBen Walker 			     );
35527d87273fSBen Walker 
35537d87273fSBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
35545b4b66baSDaniel Verkamp }
35555b4b66baSDaniel Verkamp 
3556a8beb79eSShuhei Matsumoto int
355781437ff6SShuhei Matsumoto nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req)
3558a8beb79eSShuhei Matsumoto {
355981437ff6SShuhei Matsumoto 	struct spdk_nvmf_request *req_to_abort = req->req_to_abort;
3560a8beb79eSShuhei Matsumoto 	struct spdk_bdev *bdev;
3561a8beb79eSShuhei Matsumoto 	struct spdk_bdev_desc *desc;
3562a8beb79eSShuhei Matsumoto 	struct spdk_io_channel *ch;
3563a8beb79eSShuhei Matsumoto 	int rc;
3564a8beb79eSShuhei Matsumoto 
356581437ff6SShuhei Matsumoto 	assert(req_to_abort != NULL);
356681437ff6SShuhei Matsumoto 
356762649a7dSMichael Haeuptle 	if (g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_ABORT].hdlr &&
356862649a7dSMichael Haeuptle 	    nvmf_qpair_is_admin_queue(req_to_abort->qpair)) {
356962649a7dSMichael Haeuptle 		return g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_ABORT].hdlr(req);
357062649a7dSMichael Haeuptle 	}
357162649a7dSMichael Haeuptle 
3572a8beb79eSShuhei Matsumoto 	rc = spdk_nvmf_request_get_bdev(req_to_abort->cmd->nvme_cmd.nsid, req_to_abort,
3573a8beb79eSShuhei Matsumoto 					&bdev, &desc, &ch);
3574a8beb79eSShuhei Matsumoto 	if (rc != 0) {
3575a8beb79eSShuhei Matsumoto 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3576a8beb79eSShuhei Matsumoto 	}
3577a8beb79eSShuhei Matsumoto 
357862649a7dSMichael Haeuptle 	return spdk_nvmf_bdev_ctrlr_abort_cmd(bdev, desc, ch, req, req_to_abort);
3579a8beb79eSShuhei Matsumoto }
3580a8beb79eSShuhei Matsumoto 
35815b4b66baSDaniel Verkamp static int
35824fce1a5fSDaniel Verkamp get_features_generic(struct spdk_nvmf_request *req, uint32_t cdw0)
35834fce1a5fSDaniel Verkamp {
35844fce1a5fSDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
35854fce1a5fSDaniel Verkamp 
35864fce1a5fSDaniel Verkamp 	rsp->cdw0 = cdw0;
35874fce1a5fSDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
35884fce1a5fSDaniel Verkamp }
35894fce1a5fSDaniel Verkamp 
359068f16817SShuhei Matsumoto /* we have to use the typedef in the function declaration to appease astyle. */
359168f16817SShuhei Matsumoto typedef enum spdk_nvme_path_status_code spdk_nvme_path_status_code_t;
359268f16817SShuhei Matsumoto 
359368f16817SShuhei Matsumoto static spdk_nvme_path_status_code_t
359468f16817SShuhei Matsumoto _nvme_ana_state_to_path_status(enum spdk_nvme_ana_state ana_state)
359568f16817SShuhei Matsumoto {
359668f16817SShuhei Matsumoto 	switch (ana_state) {
359768f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_INACCESSIBLE_STATE:
359868f16817SShuhei Matsumoto 		return SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
359968f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE:
360068f16817SShuhei Matsumoto 		return SPDK_NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS;
360168f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_CHANGE_STATE:
360268f16817SShuhei Matsumoto 		return SPDK_NVME_SC_ASYMMETRIC_ACCESS_TRANSITION;
360368f16817SShuhei Matsumoto 	default:
360468f16817SShuhei Matsumoto 		return SPDK_NVME_SC_INTERNAL_PATH_ERROR;
360568f16817SShuhei Matsumoto 	}
360668f16817SShuhei Matsumoto }
360768f16817SShuhei Matsumoto 
36084fce1a5fSDaniel Verkamp static int
3609198fd2ceSSeth Howell nvmf_ctrlr_get_features(struct spdk_nvmf_request *req)
36105b4b66baSDaniel Verkamp {
36115b4b66baSDaniel Verkamp 	uint8_t feature;
36124fce1a5fSDaniel Verkamp 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
36135b4b66baSDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
36145b4b66baSDaniel Verkamp 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
361568f16817SShuhei Matsumoto 	enum spdk_nvme_ana_state ana_state;
36165b4b66baSDaniel Verkamp 
36171fea1fccSChangpeng Liu 	feature = cmd->cdw10_bits.get_features.fid;
361868f16817SShuhei Matsumoto 
36197efdf905SSlawomir Ptak 	if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
3620fce94287SMadhu Adav MJ 		/*
3621fce94287SMadhu Adav MJ 		 * Features supported by Discovery controller
3622fce94287SMadhu Adav MJ 		 */
3623fce94287SMadhu Adav MJ 		switch (feature) {
3624fce94287SMadhu Adav MJ 		case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
3625fce94287SMadhu Adav MJ 			return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw);
3626fce94287SMadhu Adav MJ 		case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
3627fce94287SMadhu Adav MJ 			return get_features_generic(req, ctrlr->feat.async_event_configuration.raw);
3628fce94287SMadhu Adav MJ 		default:
36297d136a20SChangpeng Liu 			SPDK_INFOLOG(nvmf, "Get Features command with unsupported feature ID 0x%02x\n", feature);
3630fce94287SMadhu Adav MJ 			response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
3631fce94287SMadhu Adav MJ 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3632fce94287SMadhu Adav MJ 		}
3633fce94287SMadhu Adav MJ 	}
3634fce94287SMadhu Adav MJ 	/*
3635fce94287SMadhu Adav MJ 	 * Process Get Features command for non-discovery controller
3636fce94287SMadhu Adav MJ 	 */
36371c81d1afSKonrad Sztyber 	ana_state = nvmf_ctrlr_get_ana_state_from_nsid(ctrlr, cmd->nsid);
363868f16817SShuhei Matsumoto 	switch (ana_state) {
363968f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_INACCESSIBLE_STATE:
364068f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE:
364168f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_CHANGE_STATE:
364268f16817SShuhei Matsumoto 		switch (feature) {
364368f16817SShuhei Matsumoto 		case SPDK_NVME_FEAT_ERROR_RECOVERY:
364468f16817SShuhei Matsumoto 		case SPDK_NVME_FEAT_WRITE_ATOMICITY:
364568f16817SShuhei Matsumoto 		case SPDK_NVME_FEAT_HOST_RESERVE_MASK:
364668f16817SShuhei Matsumoto 		case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST:
364768f16817SShuhei Matsumoto 			response->status.sct = SPDK_NVME_SCT_PATH;
364868f16817SShuhei Matsumoto 			response->status.sc = _nvme_ana_state_to_path_status(ana_state);
364968f16817SShuhei Matsumoto 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
365068f16817SShuhei Matsumoto 		default:
365168f16817SShuhei Matsumoto 			break;
365268f16817SShuhei Matsumoto 		}
365368f16817SShuhei Matsumoto 		break;
365468f16817SShuhei Matsumoto 	default:
365568f16817SShuhei Matsumoto 		break;
365668f16817SShuhei Matsumoto 	}
365768f16817SShuhei Matsumoto 
36585b4b66baSDaniel Verkamp 	switch (feature) {
3659d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_ARBITRATION:
3660d2e7daa4SDaniel Verkamp 		return get_features_generic(req, ctrlr->feat.arbitration.raw);
3661d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_POWER_MANAGEMENT:
3662d2e7daa4SDaniel Verkamp 		return get_features_generic(req, ctrlr->feat.power_management.raw);
3663d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD:
3664198fd2ceSSeth Howell 		return nvmf_ctrlr_get_features_temperature_threshold(req);
3665d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_ERROR_RECOVERY:
3666d2e7daa4SDaniel Verkamp 		return get_features_generic(req, ctrlr->feat.error_recovery.raw);
36675b4b66baSDaniel Verkamp 	case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE:
36684fce1a5fSDaniel Verkamp 		return get_features_generic(req, ctrlr->feat.volatile_write_cache.raw);
3669d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
3670d2e7daa4SDaniel Verkamp 		return get_features_generic(req, ctrlr->feat.number_of_queues.raw);
36713eed8456SChangpeng Liu 	case SPDK_NVME_FEAT_INTERRUPT_COALESCING:
36723eed8456SChangpeng Liu 		return get_features_generic(req, ctrlr->feat.interrupt_coalescing.raw);
36733eed8456SChangpeng Liu 	case SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
36743eed8456SChangpeng Liu 		return nvmf_ctrlr_get_features_interrupt_vector_configuration(req);
3675d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_WRITE_ATOMICITY:
3676d2e7daa4SDaniel Verkamp 		return get_features_generic(req, ctrlr->feat.write_atomicity.raw);
36775b4b66baSDaniel Verkamp 	case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
36784fce1a5fSDaniel Verkamp 		return get_features_generic(req, ctrlr->feat.async_event_configuration.raw);
3679d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
3680d2e7daa4SDaniel Verkamp 		return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw);
36815b4b66baSDaniel Verkamp 	case SPDK_NVME_FEAT_HOST_IDENTIFIER:
3682198fd2ceSSeth Howell 		return nvmf_ctrlr_get_features_host_identifier(req);
3683d5b89466SChangpeng Liu 	case SPDK_NVME_FEAT_HOST_RESERVE_MASK:
3684198fd2ceSSeth Howell 		return nvmf_ctrlr_get_features_reservation_notification_mask(req);
3685da30cda9SChangpeng Liu 	case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST:
3686198fd2ceSSeth Howell 		return nvmf_ctrlr_get_features_reservation_persistence(req);
36878a1862c0SChangpeng Liu 	case SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
36888a1862c0SChangpeng Liu 		return nvmf_ctrlr_get_features_host_behavior_support(req);
36895b4b66baSDaniel Verkamp 	default:
36907d136a20SChangpeng Liu 		SPDK_INFOLOG(nvmf, "Get Features command with unsupported feature ID 0x%02x\n", feature);
36915b4b66baSDaniel Verkamp 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
36925b4b66baSDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
36935b4b66baSDaniel Verkamp 	}
36945b4b66baSDaniel Verkamp }
36955b4b66baSDaniel Verkamp 
36965b4b66baSDaniel Verkamp static int
3697198fd2ceSSeth Howell nvmf_ctrlr_set_features(struct spdk_nvmf_request *req)
36985b4b66baSDaniel Verkamp {
36992eda8390SJacek Kalwas 	uint8_t feature, save;
370068f16817SShuhei Matsumoto 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
37015b4b66baSDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
37025b4b66baSDaniel Verkamp 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
370368f16817SShuhei Matsumoto 	enum spdk_nvme_ana_state ana_state;
37042eda8390SJacek Kalwas 	/*
37052eda8390SJacek Kalwas 	 * Features are not saveable by the controller as indicated by
37062eda8390SJacek Kalwas 	 * ONCS field of the Identify Controller data.
37072eda8390SJacek Kalwas 	 * */
37082eda8390SJacek Kalwas 	save = cmd->cdw10_bits.set_features.sv;
37092eda8390SJacek Kalwas 	if (save) {
37102eda8390SJacek Kalwas 		response->status.sc = SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE;
37112eda8390SJacek Kalwas 		response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
37122eda8390SJacek Kalwas 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
37132eda8390SJacek Kalwas 	}
37142eda8390SJacek Kalwas 
37151fea1fccSChangpeng Liu 	feature = cmd->cdw10_bits.set_features.fid;
371668f16817SShuhei Matsumoto 
37177efdf905SSlawomir Ptak 	if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
3718fce94287SMadhu Adav MJ 		/*
3719fce94287SMadhu Adav MJ 		 * Features supported by Discovery controller
3720fce94287SMadhu Adav MJ 		 */
3721fce94287SMadhu Adav MJ 		switch (feature) {
3722fce94287SMadhu Adav MJ 		case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
3723fce94287SMadhu Adav MJ 			return nvmf_ctrlr_set_features_keep_alive_timer(req);
3724fce94287SMadhu Adav MJ 		case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
3725fce94287SMadhu Adav MJ 			return nvmf_ctrlr_set_features_async_event_configuration(req);
3726fce94287SMadhu Adav MJ 		default:
37277d136a20SChangpeng Liu 			SPDK_INFOLOG(nvmf, "Set Features command with unsupported feature ID 0x%02x\n", feature);
3728fce94287SMadhu Adav MJ 			response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
3729fce94287SMadhu Adav MJ 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3730fce94287SMadhu Adav MJ 		}
3731fce94287SMadhu Adav MJ 	}
3732fce94287SMadhu Adav MJ 	/*
3733fce94287SMadhu Adav MJ 	 * Process Set Features command for non-discovery controller
3734fce94287SMadhu Adav MJ 	 */
37351c81d1afSKonrad Sztyber 	ana_state = nvmf_ctrlr_get_ana_state_from_nsid(ctrlr, cmd->nsid);
373668f16817SShuhei Matsumoto 	switch (ana_state) {
373768f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_INACCESSIBLE_STATE:
373868f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_CHANGE_STATE:
373998145aa6SShuhei Matsumoto 		if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) {
374068f16817SShuhei Matsumoto 			response->status.sct = SPDK_NVME_SCT_PATH;
374168f16817SShuhei Matsumoto 			response->status.sc = _nvme_ana_state_to_path_status(ana_state);
374268f16817SShuhei Matsumoto 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
374368f16817SShuhei Matsumoto 		} else {
374468f16817SShuhei Matsumoto 			switch (feature) {
374568f16817SShuhei Matsumoto 			case SPDK_NVME_FEAT_ERROR_RECOVERY:
374668f16817SShuhei Matsumoto 			case SPDK_NVME_FEAT_WRITE_ATOMICITY:
374768f16817SShuhei Matsumoto 			case SPDK_NVME_FEAT_HOST_RESERVE_MASK:
374868f16817SShuhei Matsumoto 			case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST:
374968f16817SShuhei Matsumoto 				response->status.sct = SPDK_NVME_SCT_PATH;
375068f16817SShuhei Matsumoto 				response->status.sc = _nvme_ana_state_to_path_status(ana_state);
375168f16817SShuhei Matsumoto 				return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
375268f16817SShuhei Matsumoto 			default:
375368f16817SShuhei Matsumoto 				break;
375468f16817SShuhei Matsumoto 			}
375568f16817SShuhei Matsumoto 		}
375668f16817SShuhei Matsumoto 		break;
375768f16817SShuhei Matsumoto 	case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE:
375868f16817SShuhei Matsumoto 		response->status.sct = SPDK_NVME_SCT_PATH;
375968f16817SShuhei Matsumoto 		response->status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS;
376068f16817SShuhei Matsumoto 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
376168f16817SShuhei Matsumoto 	default:
376268f16817SShuhei Matsumoto 		break;
376368f16817SShuhei Matsumoto 	}
376468f16817SShuhei Matsumoto 
37655b4b66baSDaniel Verkamp 	switch (feature) {
3766d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_ARBITRATION:
3767198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_arbitration(req);
3768d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_POWER_MANAGEMENT:
3769198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_power_management(req);
3770d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD:
3771198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_temperature_threshold(req);
3772d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_ERROR_RECOVERY:
3773198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_error_recovery(req);
3774d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE:
3775198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_volatile_write_cache(req);
37765b4b66baSDaniel Verkamp 	case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
3777198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_number_of_queues(req);
377809c0c976SChangpeng Liu 	case SPDK_NVME_FEAT_INTERRUPT_COALESCING:
377909c0c976SChangpeng Liu 		response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
378009c0c976SChangpeng Liu 		response->status.sc = SPDK_NVME_SC_FEATURE_NOT_CHANGEABLE;
378109c0c976SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3782d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_WRITE_ATOMICITY:
3783198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_write_atomicity(req);
37845b4b66baSDaniel Verkamp 	case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
3785198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_async_event_configuration(req);
3786d2e7daa4SDaniel Verkamp 	case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
3787198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_keep_alive_timer(req);
37885b4b66baSDaniel Verkamp 	case SPDK_NVME_FEAT_HOST_IDENTIFIER:
3789198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_host_identifier(req);
3790d5b89466SChangpeng Liu 	case SPDK_NVME_FEAT_HOST_RESERVE_MASK:
3791198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_reservation_notification_mask(req);
3792da30cda9SChangpeng Liu 	case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST:
3793198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features_reservation_persistence(req);
37945cc56599Syupeng 	case SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
37955cc56599Syupeng 		return nvmf_ctrlr_set_features_host_behavior_support(req);
37965b4b66baSDaniel Verkamp 	default:
3797814cd258SChangpeng Liu 		SPDK_INFOLOG(nvmf, "Set Features command with unsupported feature ID 0x%02x\n", feature);
37985b4b66baSDaniel Verkamp 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
37995b4b66baSDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
38005b4b66baSDaniel Verkamp 	}
38015b4b66baSDaniel Verkamp }
38025b4b66baSDaniel Verkamp 
38035b4b66baSDaniel Verkamp static int
3804198fd2ceSSeth Howell nvmf_ctrlr_keep_alive(struct spdk_nvmf_request *req)
38055b4b66baSDaniel Verkamp {
3806b8769cdbSJinYu 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
3807b8769cdbSJinYu 
38082172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "Keep Alive\n");
38095b4b66baSDaniel Verkamp 	/*
38105b4b66baSDaniel Verkamp 	 * To handle keep alive just clear or reset the
38115b4b66baSDaniel Verkamp 	 * ctrlr based keep alive duration counter.
38125b4b66baSDaniel Verkamp 	 * When added, a separate timer based process
38135b4b66baSDaniel Verkamp 	 * will monitor if the time since last recorded
38145b4b66baSDaniel Verkamp 	 * keep alive has exceeded the max duration and
38155b4b66baSDaniel Verkamp 	 * take appropriate action.
38165b4b66baSDaniel Verkamp 	 */
3817b8769cdbSJinYu 	ctrlr->last_keep_alive_tick = spdk_get_ticks();
3818b8769cdbSJinYu 
38195b4b66baSDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
38205b4b66baSDaniel Verkamp }
38215b4b66baSDaniel Verkamp 
38228bbc7b69SPawel Baldysiak static bool
38238bbc7b69SPawel Baldysiak is_cmd_ctrlr_specific(struct spdk_nvme_cmd *cmd)
38248bbc7b69SPawel Baldysiak {
38258bbc7b69SPawel Baldysiak 	switch (cmd->opc) {
38268bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_DELETE_IO_SQ:
38278bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_CREATE_IO_SQ:
38288bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_DELETE_IO_CQ:
38298bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_CREATE_IO_CQ:
38308bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_ABORT:
38318bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST:
38328bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_FIRMWARE_COMMIT:
38338bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD:
38348bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_KEEP_ALIVE:
38358bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT:
38368bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_NVME_MI_SEND:
38378bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_NVME_MI_RECEIVE:
38388bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG:
38398bbc7b69SPawel Baldysiak 	case SPDK_NVME_OPC_SANITIZE:
38408bbc7b69SPawel Baldysiak 		return true;
38418bbc7b69SPawel Baldysiak 	default:
38428bbc7b69SPawel Baldysiak 		return false;
38438bbc7b69SPawel Baldysiak 	}
38448bbc7b69SPawel Baldysiak }
38458bbc7b69SPawel Baldysiak 
38465b4b66baSDaniel Verkamp int
38479cb21ad6SSeth Howell nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
38485b4b66baSDaniel Verkamp {
3849823b565bSBen Walker 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
38505b4b66baSDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
38515b4b66baSDaniel Verkamp 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
38529cb5f885SJim Harris 	struct spdk_nvmf_subsystem_poll_group *sgroup;
38533fa22056SMichael Haeuptle 	int rc;
38545b4b66baSDaniel Verkamp 
385533e23361SKonrad Sztyber 	assert(ctrlr != NULL);
38569cb5f885SJim Harris 	if (cmd->opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
38579cb5f885SJim Harris 		/* We do not want to treat AERs as outstanding commands,
38589cb5f885SJim Harris 		 * so decrement mgmt_io_outstanding here to offset
38599cb5f885SJim Harris 		 * the increment that happened prior to this call.
38609cb5f885SJim Harris 		 */
38619cb5f885SJim Harris 		sgroup = &req->qpair->group->sgroups[ctrlr->subsys->id];
38629cb5f885SJim Harris 		assert(sgroup != NULL);
38639cb5f885SJim Harris 		sgroup->mgmt_io_outstanding--;
38649cb5f885SJim Harris 	}
38659cb5f885SJim Harris 
3866fcc426bdSJacek Kalwas 	assert(spdk_get_thread() == ctrlr->thread);
3867fcc426bdSJacek Kalwas 
38688bbc7b69SPawel Baldysiak 	if (cmd->fuse != 0 ||
38698bbc7b69SPawel Baldysiak 	    (is_cmd_ctrlr_specific(cmd) && (cmd->nsid != 0))) {
38708bbc7b69SPawel Baldysiak 		/* Fused admin commands are not supported.
38718bbc7b69SPawel Baldysiak 		 * Commands with controller scope - should be rejected if NSID is set.
38728bbc7b69SPawel Baldysiak 		 */
38733ca8b72cSJim Harris 		response->status.sct = SPDK_NVME_SCT_GENERIC;
38743ca8b72cSJim Harris 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
38753ca8b72cSJim Harris 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
38763ca8b72cSJim Harris 	}
38773ca8b72cSJim Harris 
3878823b565bSBen Walker 	if (ctrlr->vcprop.cc.bits.en != 1) {
3879823b565bSBen Walker 		SPDK_ERRLOG("Admin command sent to disabled controller\n");
3880823b565bSBen Walker 		response->status.sct = SPDK_NVME_SCT_GENERIC;
3881823b565bSBen Walker 		response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
3882823b565bSBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
3883823b565bSBen Walker 	}
3884823b565bSBen Walker 
388549c0d28aSJohn Levon 	if (req->iovcnt && spdk_nvme_opc_get_data_transfer(cmd->opc) == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
388647568c65SJohn Levon 		spdk_iov_memset(req->iov, req->iovcnt, 0);
3887afb3f738SDaniel Verkamp 	}
3888afb3f738SDaniel Verkamp 
38897efdf905SSlawomir Ptak 	if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
3890fce94287SMadhu Adav MJ 		/* Discovery controllers only support these admin OPS. */
38915b4b66baSDaniel Verkamp 		switch (cmd->opc) {
38925b4b66baSDaniel Verkamp 		case SPDK_NVME_OPC_IDENTIFY:
38935b4b66baSDaniel Verkamp 		case SPDK_NVME_OPC_GET_LOG_PAGE:
3894cb8621ebSJin Yu 		case SPDK_NVME_OPC_KEEP_ALIVE:
3895fce94287SMadhu Adav MJ 		case SPDK_NVME_OPC_SET_FEATURES:
3896fce94287SMadhu Adav MJ 		case SPDK_NVME_OPC_GET_FEATURES:
3897fce94287SMadhu Adav MJ 		case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST:
38985b4b66baSDaniel Verkamp 			break;
38995b4b66baSDaniel Verkamp 		default:
39005b4b66baSDaniel Verkamp 			goto invalid_opcode;
39015b4b66baSDaniel Verkamp 		}
39025b4b66baSDaniel Verkamp 	}
39035b4b66baSDaniel Verkamp 
390462649a7dSMichael Haeuptle 	/* Call a custom adm cmd handler if set. Aborts are handled in a different path (see nvmf_passthru_admin_cmd) */
390562649a7dSMichael Haeuptle 	if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr && cmd->opc != SPDK_NVME_OPC_ABORT) {
39063fa22056SMichael Haeuptle 		rc = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr(req);
39073fa22056SMichael Haeuptle 		if (rc >= SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
3908cc6920a4SJosh Soref 			/* The handler took care of this command */
39093fa22056SMichael Haeuptle 			return rc;
39103fa22056SMichael Haeuptle 		}
39113fa22056SMichael Haeuptle 	}
39123fa22056SMichael Haeuptle 
391360241941SKarl Bonde Torp 	/* We only want to send passthrough admin commands to namespaces.
391460241941SKarl Bonde Torp 	 * However, we don't want to passthrough a command with intended for all namespaces.
391560241941SKarl Bonde Torp 	 */
391660241941SKarl Bonde Torp 	if (ctrlr->subsys->passthrough && cmd->nsid && cmd->nsid != SPDK_NVME_GLOBAL_NS_TAG) {
391760241941SKarl Bonde Torp 		return nvmf_passthru_admin_cmd(req);
391860241941SKarl Bonde Torp 	}
391960241941SKarl Bonde Torp 
39205b4b66baSDaniel Verkamp 	switch (cmd->opc) {
39215b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_GET_LOG_PAGE:
3922198fd2ceSSeth Howell 		return nvmf_ctrlr_get_log_page(req);
39235b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_IDENTIFY:
3924198fd2ceSSeth Howell 		return nvmf_ctrlr_identify(req);
39255b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_ABORT:
3926198fd2ceSSeth Howell 		return nvmf_ctrlr_abort(req);
39275b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_GET_FEATURES:
3928198fd2ceSSeth Howell 		return nvmf_ctrlr_get_features(req);
39295b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_SET_FEATURES:
3930198fd2ceSSeth Howell 		return nvmf_ctrlr_set_features(req);
39315b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST:
3932198fd2ceSSeth Howell 		return nvmf_ctrlr_async_event_request(req);
39335b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_KEEP_ALIVE:
3934198fd2ceSSeth Howell 		return nvmf_ctrlr_keep_alive(req);
39355b4b66baSDaniel Verkamp 
39365b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_CREATE_IO_SQ:
39375b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_CREATE_IO_CQ:
39385b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_DELETE_IO_SQ:
39395b4b66baSDaniel Verkamp 	case SPDK_NVME_OPC_DELETE_IO_CQ:
39405b4b66baSDaniel Verkamp 		/* Create and Delete I/O CQ/SQ not allowed in NVMe-oF */
39415b4b66baSDaniel Verkamp 		goto invalid_opcode;
39425b4b66baSDaniel Verkamp 
39435b4b66baSDaniel Verkamp 	default:
39445b4b66baSDaniel Verkamp 		goto invalid_opcode;
39455b4b66baSDaniel Verkamp 	}
39465b4b66baSDaniel Verkamp 
39475b4b66baSDaniel Verkamp invalid_opcode:
39487d136a20SChangpeng Liu 	SPDK_INFOLOG(nvmf, "Unsupported admin opcode 0x%x\n", cmd->opc);
39495b4b66baSDaniel Verkamp 	response->status.sct = SPDK_NVME_SCT_GENERIC;
39505b4b66baSDaniel Verkamp 	response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
39515b4b66baSDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
39525b4b66baSDaniel Verkamp }
39535323a026SDaniel Verkamp 
3954778e4c00SChangpeng Liu static int
39559cb21ad6SSeth Howell nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req)
39565323a026SDaniel Verkamp {
39575323a026SDaniel Verkamp 	struct spdk_nvmf_qpair *qpair = req->qpair;
39585323a026SDaniel Verkamp 	struct spdk_nvmf_capsule_cmd *cap_hdr;
39595323a026SDaniel Verkamp 
39605323a026SDaniel Verkamp 	cap_hdr = &req->cmd->nvmf_cmd;
39615323a026SDaniel Verkamp 
39625323a026SDaniel Verkamp 	if (qpair->ctrlr == NULL) {
39635323a026SDaniel Verkamp 		/* No ctrlr established yet; the only valid command is Connect */
396433e23361SKonrad Sztyber 		assert(cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT);
3965198fd2ceSSeth Howell 		return nvmf_ctrlr_cmd_connect(req);
39669cb21ad6SSeth Howell 	} else if (nvmf_qpair_is_admin_queue(qpair)) {
39675323a026SDaniel Verkamp 		/*
39685323a026SDaniel Verkamp 		 * Controller session is established, and this is an admin queue.
39695323a026SDaniel Verkamp 		 * Disallow Connect and allow other fabrics commands.
39705323a026SDaniel Verkamp 		 */
39715323a026SDaniel Verkamp 		switch (cap_hdr->fctype) {
39725323a026SDaniel Verkamp 		case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET:
3973198fd2ceSSeth Howell 			return nvmf_property_set(req);
39745323a026SDaniel Verkamp 		case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET:
3975198fd2ceSSeth Howell 			return nvmf_property_get(req);
39760a6bb8caSKonrad Sztyber 		case SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND:
39770a6bb8caSKonrad Sztyber 		case SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV:
39780a6bb8caSKonrad Sztyber 			return nvmf_auth_request_exec(req);
39795323a026SDaniel Verkamp 		default:
39802172c432STomasz Zawadzki 			SPDK_DEBUGLOG(nvmf, "unknown fctype 0x%02x\n",
39815323a026SDaniel Verkamp 				      cap_hdr->fctype);
39825323a026SDaniel Verkamp 			req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
39835323a026SDaniel Verkamp 			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
39845323a026SDaniel Verkamp 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
39855323a026SDaniel Verkamp 		}
39865323a026SDaniel Verkamp 	} else {
39870a6bb8caSKonrad Sztyber 		/*
39880a6bb8caSKonrad Sztyber 		 * Controller session is established, and this is an I/O queue.
39890a6bb8caSKonrad Sztyber 		 * Disallow everything besides authentication commands.
39900a6bb8caSKonrad Sztyber 		 */
39910a6bb8caSKonrad Sztyber 		switch (cap_hdr->fctype) {
39920a6bb8caSKonrad Sztyber 		case SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND:
39930a6bb8caSKonrad Sztyber 		case SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV:
39940a6bb8caSKonrad Sztyber 			return nvmf_auth_request_exec(req);
39950a6bb8caSKonrad Sztyber 		default:
39962172c432STomasz Zawadzki 			SPDK_DEBUGLOG(nvmf, "Unexpected I/O fctype 0x%x\n", cap_hdr->fctype);
39975323a026SDaniel Verkamp 			req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
39985323a026SDaniel Verkamp 			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
39995323a026SDaniel Verkamp 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
40005323a026SDaniel Verkamp 		}
40015323a026SDaniel Verkamp 	}
40020a6bb8caSKonrad Sztyber }
4003763ab888SChangpeng Liu 
40048cd9ef28SJiewei Ke static inline void
40058cd9ef28SJiewei Ke nvmf_ctrlr_queue_pending_async_event(struct spdk_nvmf_ctrlr *ctrlr,
40068cd9ef28SJiewei Ke 				     union spdk_nvme_async_event_completion *event)
40078cd9ef28SJiewei Ke {
40088cd9ef28SJiewei Ke 	struct spdk_nvmf_async_event_completion *nvmf_event;
40098cd9ef28SJiewei Ke 
40108cd9ef28SJiewei Ke 	nvmf_event = calloc(1, sizeof(*nvmf_event));
40118cd9ef28SJiewei Ke 	if (!nvmf_event) {
40128cd9ef28SJiewei Ke 		SPDK_ERRLOG("Alloc nvmf event failed, ignore the event\n");
40138cd9ef28SJiewei Ke 		return;
40148cd9ef28SJiewei Ke 	}
40158cd9ef28SJiewei Ke 	nvmf_event->event.raw = event->raw;
40168cd9ef28SJiewei Ke 	STAILQ_INSERT_TAIL(&ctrlr->async_events, nvmf_event, link);
40178cd9ef28SJiewei Ke }
40188cd9ef28SJiewei Ke 
4019f7866a56SChangpeng Liu static inline int
4020f7866a56SChangpeng Liu nvmf_ctrlr_async_event_notification(struct spdk_nvmf_ctrlr *ctrlr,
4021f7866a56SChangpeng Liu 				    union spdk_nvme_async_event_completion *event)
4022f7866a56SChangpeng Liu {
4023f7866a56SChangpeng Liu 	struct spdk_nvmf_request *req;
4024f7866a56SChangpeng Liu 	struct spdk_nvme_cpl *rsp;
4025f7866a56SChangpeng Liu 
4026fcc426bdSJacek Kalwas 	assert(spdk_get_thread() == ctrlr->thread);
4027fcc426bdSJacek Kalwas 
4028f7866a56SChangpeng Liu 	/* If there is no outstanding AER request, queue the event.  Then
4029f7866a56SChangpeng Liu 	 * if an AER is later submitted, this event can be sent as a
4030f7866a56SChangpeng Liu 	 * response.
4031f7866a56SChangpeng Liu 	 */
4032f7866a56SChangpeng Liu 	if (ctrlr->nr_aer_reqs == 0) {
4033f7866a56SChangpeng Liu 		nvmf_ctrlr_queue_pending_async_event(ctrlr, event);
4034f7866a56SChangpeng Liu 		return 0;
4035f7866a56SChangpeng Liu 	}
4036f7866a56SChangpeng Liu 
4037f7866a56SChangpeng Liu 	req = ctrlr->aer_req[--ctrlr->nr_aer_reqs];
4038f7866a56SChangpeng Liu 	rsp = &req->rsp->nvme_cpl;
4039f7866a56SChangpeng Liu 
4040f7866a56SChangpeng Liu 	rsp->cdw0 = event->raw;
4041f7866a56SChangpeng Liu 
4042f7866a56SChangpeng Liu 	_nvmf_request_complete(req);
4043f7866a56SChangpeng Liu 	ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL;
4044f7866a56SChangpeng Liu 
4045f7866a56SChangpeng Liu 	return 0;
4046f7866a56SChangpeng Liu }
4047f7866a56SChangpeng Liu 
404813cc64fdSJin Yu int
404913cc64fdSJin Yu nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr)
405013cc64fdSJin Yu {
4051763ab888SChangpeng Liu 	union spdk_nvme_async_event_completion event = {0};
4052763ab888SChangpeng Liu 
4053763ab888SChangpeng Liu 	/* Users may disable the event notification */
4054763ab888SChangpeng Liu 	if (!ctrlr->feat.async_event_configuration.bits.ns_attr_notice) {
4055763ab888SChangpeng Liu 		return 0;
4056763ab888SChangpeng Liu 	}
4057763ab888SChangpeng Liu 
4058a9bdb1eeSJiewei Ke 	if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT)) {
4059a9bdb1eeSJiewei Ke 		return 0;
4060a9bdb1eeSJiewei Ke 	}
4061a9bdb1eeSJiewei Ke 
4062763ab888SChangpeng Liu 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
4063763ab888SChangpeng Liu 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
406426541489SDaniel Verkamp 	event.bits.log_page_identifier = SPDK_NVME_LOG_CHANGED_NS_LIST;
4065763ab888SChangpeng Liu 
4066db96437eSShuhei Matsumoto 	return nvmf_ctrlr_async_event_notification(ctrlr, &event);
4067763ab888SChangpeng Liu }
40684bfb557dSPhilipp Skadorov 
406958da6223SShuhei Matsumoto int
407058da6223SShuhei Matsumoto nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr)
407158da6223SShuhei Matsumoto {
407258da6223SShuhei Matsumoto 	union spdk_nvme_async_event_completion event = {0};
407358da6223SShuhei Matsumoto 
407458da6223SShuhei Matsumoto 	/* Users may disable the event notification */
407558da6223SShuhei Matsumoto 	if (!ctrlr->feat.async_event_configuration.bits.ana_change_notice) {
407658da6223SShuhei Matsumoto 		return 0;
407758da6223SShuhei Matsumoto 	}
407858da6223SShuhei Matsumoto 
4079a9bdb1eeSJiewei Ke 	if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT)) {
4080a9bdb1eeSJiewei Ke 		return 0;
4081a9bdb1eeSJiewei Ke 	}
4082a9bdb1eeSJiewei Ke 
408358da6223SShuhei Matsumoto 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
408458da6223SShuhei Matsumoto 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
408558da6223SShuhei Matsumoto 	event.bits.log_page_identifier = SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS;
408658da6223SShuhei Matsumoto 
408758da6223SShuhei Matsumoto 	return nvmf_ctrlr_async_event_notification(ctrlr, &event);
408858da6223SShuhei Matsumoto }
408958da6223SShuhei Matsumoto 
40904bfb557dSPhilipp Skadorov void
40919cb21ad6SSeth Howell nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr)
40924fa486a1SChangpeng Liu {
40934fa486a1SChangpeng Liu 	union spdk_nvme_async_event_completion event = {0};
40944fa486a1SChangpeng Liu 
40954fa486a1SChangpeng Liu 	if (!ctrlr->num_avail_log_pages) {
40964fa486a1SChangpeng Liu 		return;
40974fa486a1SChangpeng Liu 	}
4098a9bdb1eeSJiewei Ke 
4099a9bdb1eeSJiewei Ke 	if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT)) {
4100a9bdb1eeSJiewei Ke 		return;
4101a9bdb1eeSJiewei Ke 	}
4102a9bdb1eeSJiewei Ke 
41034fa486a1SChangpeng Liu 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_IO;
41044fa486a1SChangpeng Liu 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL;
41054fa486a1SChangpeng Liu 	event.bits.log_page_identifier = SPDK_NVME_LOG_RESERVATION_NOTIFICATION;
41064fa486a1SChangpeng Liu 
4107db96437eSShuhei Matsumoto 	nvmf_ctrlr_async_event_notification(ctrlr, &event);
41084fa486a1SChangpeng Liu }
41094fa486a1SChangpeng Liu 
411093364164SJacek Kalwas void
411193364164SJacek Kalwas nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx)
4112fce94287SMadhu Adav MJ {
4113fce94287SMadhu Adav MJ 	union spdk_nvme_async_event_completion event = {0};
411493364164SJacek Kalwas 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
4115fce94287SMadhu Adav MJ 
4116fce94287SMadhu Adav MJ 	/* Users may disable the event notification manually or
4117fce94287SMadhu Adav MJ 	 * it may not be enabled due to keep alive timeout
4118fce94287SMadhu Adav MJ 	 * not being set in connect command to discovery controller.
4119fce94287SMadhu Adav MJ 	 */
4120fce94287SMadhu Adav MJ 	if (!ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice) {
412193364164SJacek Kalwas 		return;
4122fce94287SMadhu Adav MJ 	}
4123fce94287SMadhu Adav MJ 
4124a9bdb1eeSJiewei Ke 	if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT)) {
412593364164SJacek Kalwas 		return;
4126a9bdb1eeSJiewei Ke 	}
4127a9bdb1eeSJiewei Ke 
4128fce94287SMadhu Adav MJ 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
4129fce94287SMadhu Adav MJ 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE;
4130fce94287SMadhu Adav MJ 	event.bits.log_page_identifier = SPDK_NVME_LOG_DISCOVERY;
4131fce94287SMadhu Adav MJ 
413293364164SJacek Kalwas 	nvmf_ctrlr_async_event_notification(ctrlr, &event);
4133fce94287SMadhu Adav MJ }
4134fce94287SMadhu Adav MJ 
413586ad145bSChangpeng Liu int
4136414ff9bcSSzulik, Maciej spdk_nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
4137414ff9bcSSzulik, Maciej 					enum spdk_nvme_async_event_info_error info)
413886ad145bSChangpeng Liu {
4139414ff9bcSSzulik, Maciej 	union spdk_nvme_async_event_completion event;
4140414ff9bcSSzulik, Maciej 
414186ad145bSChangpeng Liu 	if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT)) {
414286ad145bSChangpeng Liu 		return 0;
414386ad145bSChangpeng Liu 	}
414486ad145bSChangpeng Liu 
4145414ff9bcSSzulik, Maciej 	if (info > SPDK_NVME_ASYNC_EVENT_FW_IMAGE_LOAD) {
414686ad145bSChangpeng Liu 		return 0;
414786ad145bSChangpeng Liu 	}
414886ad145bSChangpeng Liu 
4149414ff9bcSSzulik, Maciej 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_ERROR;
4150414ff9bcSSzulik, Maciej 	event.bits.log_page_identifier = SPDK_NVME_LOG_ERROR;
4151414ff9bcSSzulik, Maciej 	event.bits.async_event_info = info;
4152414ff9bcSSzulik, Maciej 
415386ad145bSChangpeng Liu 	return nvmf_ctrlr_async_event_notification(ctrlr, &event);
415486ad145bSChangpeng Liu }
415586ad145bSChangpeng Liu 
41564fa486a1SChangpeng Liu void
41579cb21ad6SSeth Howell nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair)
41584bee4e03SSeth Howell {
41594bee4e03SSeth Howell 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
4160b7cc4dd7SJin Yu 	int i;
41614bee4e03SSeth Howell 
41626c23277dSPeng Lian 	if (ctrlr == NULL || !nvmf_qpair_is_admin_queue(qpair)) {
41634bee4e03SSeth Howell 		return;
41644bee4e03SSeth Howell 	}
41654bee4e03SSeth Howell 
4166fcc426bdSJacek Kalwas 	assert(spdk_get_thread() == ctrlr->thread);
4167fcc426bdSJacek Kalwas 
4168b7cc4dd7SJin Yu 	for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
4169b7cc4dd7SJin Yu 		spdk_nvmf_request_free(ctrlr->aer_req[i]);
4170b7cc4dd7SJin Yu 		ctrlr->aer_req[i] = NULL;
41714bee4e03SSeth Howell 	}
4172b7cc4dd7SJin Yu 
4173b7cc4dd7SJin Yu 	ctrlr->nr_aer_reqs = 0;
41744bee4e03SSeth Howell }
41754bee4e03SSeth Howell 
41764bee4e03SSeth Howell void
4177414ff9bcSSzulik, Maciej spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr)
41784bfb557dSPhilipp Skadorov {
417955c39cd0SShuhei Matsumoto 	struct spdk_nvmf_request *req;
4180b7cc4dd7SJin Yu 	int i;
4181b7cc4dd7SJin Yu 
4182fcc426bdSJacek Kalwas 	assert(spdk_get_thread() == ctrlr->thread);
4183fcc426bdSJacek Kalwas 
4184e6464f32SChangpeng Liu 	if (!ctrlr->nr_aer_reqs) {
4185e6464f32SChangpeng Liu 		return;
4186e6464f32SChangpeng Liu 	}
4187e6464f32SChangpeng Liu 
4188b7cc4dd7SJin Yu 	for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
418955c39cd0SShuhei Matsumoto 		req = ctrlr->aer_req[i];
419055c39cd0SShuhei Matsumoto 
419155c39cd0SShuhei Matsumoto 		req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
419255c39cd0SShuhei Matsumoto 		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
419355c39cd0SShuhei Matsumoto 		_nvmf_request_complete(req);
419455c39cd0SShuhei Matsumoto 
4195b7cc4dd7SJin Yu 		ctrlr->aer_req[i] = NULL;
41964bfb557dSPhilipp Skadorov 	}
41974bfb557dSPhilipp Skadorov 
4198b7cc4dd7SJin Yu 	ctrlr->nr_aer_reqs = 0;
41994bfb557dSPhilipp Skadorov }
42001b6b6cc4SBen Walker 
420160253750SChangpeng Liu static void
420260253750SChangpeng Liu _nvmf_ctrlr_add_reservation_log(void *ctx)
420360253750SChangpeng Liu {
420460253750SChangpeng Liu 	struct spdk_nvmf_reservation_log *log = (struct spdk_nvmf_reservation_log *)ctx;
420560253750SChangpeng Liu 	struct spdk_nvmf_ctrlr *ctrlr = log->ctrlr;
420660253750SChangpeng Liu 
420760253750SChangpeng Liu 	ctrlr->log_page_count++;
420860253750SChangpeng Liu 
420960253750SChangpeng Liu 	/* Maximum number of queued log pages is 255 */
421060253750SChangpeng Liu 	if (ctrlr->num_avail_log_pages == 0xff) {
421160253750SChangpeng Liu 		struct spdk_nvmf_reservation_log *entry;
421260253750SChangpeng Liu 		entry = TAILQ_LAST(&ctrlr->log_head, log_page_head);
421360253750SChangpeng Liu 		entry->log.log_page_count = ctrlr->log_page_count;
421460253750SChangpeng Liu 		free(log);
421560253750SChangpeng Liu 		return;
421660253750SChangpeng Liu 	}
421760253750SChangpeng Liu 
421860253750SChangpeng Liu 	log->log.log_page_count = ctrlr->log_page_count;
421960253750SChangpeng Liu 	log->log.num_avail_log_pages = ctrlr->num_avail_log_pages++;
422060253750SChangpeng Liu 	TAILQ_INSERT_TAIL(&ctrlr->log_head, log, link);
42214fa486a1SChangpeng Liu 
42229cb21ad6SSeth Howell 	nvmf_ctrlr_async_event_reservation_notification(ctrlr);
422360253750SChangpeng Liu }
422460253750SChangpeng Liu 
422578bfb2a1SChangpeng Liu void
42269cb21ad6SSeth Howell nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
422778bfb2a1SChangpeng Liu 				  struct spdk_nvmf_ns *ns,
422878bfb2a1SChangpeng Liu 				  enum spdk_nvme_reservation_notification_log_page_type type)
422978bfb2a1SChangpeng Liu {
423078bfb2a1SChangpeng Liu 	struct spdk_nvmf_reservation_log *log;
423178bfb2a1SChangpeng Liu 
423278bfb2a1SChangpeng Liu 	switch (type) {
423378bfb2a1SChangpeng Liu 	case SPDK_NVME_RESERVATION_LOG_PAGE_EMPTY:
423478bfb2a1SChangpeng Liu 		return;
423578bfb2a1SChangpeng Liu 	case SPDK_NVME_REGISTRATION_PREEMPTED:
423678bfb2a1SChangpeng Liu 		if (ns->mask & SPDK_NVME_REGISTRATION_PREEMPTED_MASK) {
423778bfb2a1SChangpeng Liu 			return;
423878bfb2a1SChangpeng Liu 		}
423978bfb2a1SChangpeng Liu 		break;
424078bfb2a1SChangpeng Liu 	case SPDK_NVME_RESERVATION_RELEASED:
424178bfb2a1SChangpeng Liu 		if (ns->mask & SPDK_NVME_RESERVATION_RELEASED_MASK) {
424278bfb2a1SChangpeng Liu 			return;
424378bfb2a1SChangpeng Liu 		}
424478bfb2a1SChangpeng Liu 		break;
424578bfb2a1SChangpeng Liu 	case SPDK_NVME_RESERVATION_PREEMPTED:
424678bfb2a1SChangpeng Liu 		if (ns->mask & SPDK_NVME_RESERVATION_PREEMPTED_MASK) {
424778bfb2a1SChangpeng Liu 			return;
424878bfb2a1SChangpeng Liu 		}
424978bfb2a1SChangpeng Liu 		break;
425078bfb2a1SChangpeng Liu 	default:
425178bfb2a1SChangpeng Liu 		return;
425278bfb2a1SChangpeng Liu 	}
425378bfb2a1SChangpeng Liu 
425478bfb2a1SChangpeng Liu 	log = calloc(1, sizeof(*log));
425578bfb2a1SChangpeng Liu 	if (!log) {
425678bfb2a1SChangpeng Liu 		SPDK_ERRLOG("Alloc log page failed, ignore the log\n");
425778bfb2a1SChangpeng Liu 		return;
425878bfb2a1SChangpeng Liu 	}
425960253750SChangpeng Liu 	log->ctrlr = ctrlr;
426078bfb2a1SChangpeng Liu 	log->log.type = type;
426178bfb2a1SChangpeng Liu 	log->log.nsid = ns->nsid;
426260253750SChangpeng Liu 
426360253750SChangpeng Liu 	spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_reservation_log, log);
426478bfb2a1SChangpeng Liu }
426578bfb2a1SChangpeng Liu 
4266ca76e519SChangpeng Liu /* Check from subsystem poll group's namespace information data structure */
4267ca76e519SChangpeng Liu static bool
4268ca76e519SChangpeng Liu nvmf_ns_info_ctrlr_is_registrant(struct spdk_nvmf_subsystem_pg_ns_info *ns_info,
4269ca76e519SChangpeng Liu 				 struct spdk_nvmf_ctrlr *ctrlr)
4270ca76e519SChangpeng Liu {
4271ca76e519SChangpeng Liu 	uint32_t i;
4272ca76e519SChangpeng Liu 
4273ca76e519SChangpeng Liu 	for (i = 0; i < SPDK_NVMF_MAX_NUM_REGISTRANTS; i++) {
4274ca76e519SChangpeng Liu 		if (!spdk_uuid_compare(&ns_info->reg_hostid[i], &ctrlr->hostid)) {
4275ca76e519SChangpeng Liu 			return true;
4276ca76e519SChangpeng Liu 		}
4277ca76e519SChangpeng Liu 	}
4278ca76e519SChangpeng Liu 
4279ca76e519SChangpeng Liu 	return false;
4280ca76e519SChangpeng Liu }
4281ca76e519SChangpeng Liu 
4282ca76e519SChangpeng Liu /*
4283ca76e519SChangpeng Liu  * Check the NVMe command is permitted or not for current controller(Host).
4284ca76e519SChangpeng Liu  */
4285ca76e519SChangpeng Liu static int
4286ca76e519SChangpeng Liu nvmf_ns_reservation_request_check(struct spdk_nvmf_subsystem_pg_ns_info *ns_info,
4287ca76e519SChangpeng Liu 				  struct spdk_nvmf_ctrlr *ctrlr,
4288ca76e519SChangpeng Liu 				  struct spdk_nvmf_request *req)
4289ca76e519SChangpeng Liu {
4290ca76e519SChangpeng Liu 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
4291ca76e519SChangpeng Liu 	enum spdk_nvme_reservation_type rtype = ns_info->rtype;
4292ca76e519SChangpeng Liu 	uint8_t status = SPDK_NVME_SC_SUCCESS;
4293ca76e519SChangpeng Liu 	uint8_t racqa;
4294ca76e519SChangpeng Liu 	bool is_registrant;
4295ca76e519SChangpeng Liu 
4296ca76e519SChangpeng Liu 	/* No valid reservation */
4297ca76e519SChangpeng Liu 	if (!rtype) {
4298ca76e519SChangpeng Liu 		return 0;
4299ca76e519SChangpeng Liu 	}
4300ca76e519SChangpeng Liu 
4301ca76e519SChangpeng Liu 	is_registrant = nvmf_ns_info_ctrlr_is_registrant(ns_info, ctrlr);
4302ca76e519SChangpeng Liu 	/* All registrants type and current ctrlr is a valid registrant */
4303ca76e519SChangpeng Liu 	if ((rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS ||
4304ca76e519SChangpeng Liu 	     rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && is_registrant) {
4305ca76e519SChangpeng Liu 		return 0;
4306ca76e519SChangpeng Liu 	} else if (!spdk_uuid_compare(&ns_info->holder_id, &ctrlr->hostid)) {
4307ca76e519SChangpeng Liu 		return 0;
4308ca76e519SChangpeng Liu 	}
4309ca76e519SChangpeng Liu 
4310ca76e519SChangpeng Liu 	/* Non-holder for current controller */
4311ca76e519SChangpeng Liu 	switch (cmd->opc) {
4312ca76e519SChangpeng Liu 	case SPDK_NVME_OPC_READ:
4313ca76e519SChangpeng Liu 	case SPDK_NVME_OPC_COMPARE:
4314ca76e519SChangpeng Liu 		if (rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) {
4315ca76e519SChangpeng Liu 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
4316ca76e519SChangpeng Liu 			goto exit;
4317ca76e519SChangpeng Liu 		}
4318ca76e519SChangpeng Liu 		if ((rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY ||
4319ca76e519SChangpeng Liu 		     rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && !is_registrant) {
4320ca76e519SChangpeng Liu 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
4321ca76e519SChangpeng Liu 		}
4322ca76e519SChangpeng Liu 		break;
4323ca76e519SChangpeng Liu 	case SPDK_NVME_OPC_FLUSH:
4324ca76e519SChangpeng Liu 	case SPDK_NVME_OPC_WRITE:
4325ca76e519SChangpeng Liu 	case SPDK_NVME_OPC_WRITE_UNCORRECTABLE:
4326ca76e519SChangpeng Liu 	case SPDK_NVME_OPC_WRITE_ZEROES:
4327ca76e519SChangpeng Liu 	case SPDK_NVME_OPC_DATASET_MANAGEMENT:
4328ca76e519SChangpeng Liu 		if (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE ||
4329ca76e519SChangpeng Liu 		    rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) {
4330ca76e519SChangpeng Liu 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
4331ca76e519SChangpeng Liu 			goto exit;
4332ca76e519SChangpeng Liu 		}
4333ca76e519SChangpeng Liu 		if (!is_registrant) {
4334ca76e519SChangpeng Liu 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
4335ca76e519SChangpeng Liu 		}
4336ca76e519SChangpeng Liu 		break;
4337ca76e519SChangpeng Liu 	case SPDK_NVME_OPC_RESERVATION_ACQUIRE:
43381fea1fccSChangpeng Liu 		racqa = cmd->cdw10_bits.resv_acquire.racqa;
43395b54916bSChangpeng Liu 		if (racqa == SPDK_NVME_RESERVE_ACQUIRE) {
4340ca76e519SChangpeng Liu 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
4341ca76e519SChangpeng Liu 			goto exit;
4342ca76e519SChangpeng Liu 		}
4343ca76e519SChangpeng Liu 		if (!is_registrant) {
4344ca76e519SChangpeng Liu 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
4345ca76e519SChangpeng Liu 		}
4346ca76e519SChangpeng Liu 		break;
43475b54916bSChangpeng Liu 	case SPDK_NVME_OPC_RESERVATION_RELEASE:
43485b54916bSChangpeng Liu 		if (!is_registrant) {
43495b54916bSChangpeng Liu 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
43505b54916bSChangpeng Liu 		}
43515b54916bSChangpeng Liu 		break;
4352ca76e519SChangpeng Liu 	default:
4353ca76e519SChangpeng Liu 		break;
4354ca76e519SChangpeng Liu 	}
4355ca76e519SChangpeng Liu 
4356ca76e519SChangpeng Liu exit:
4357ca76e519SChangpeng Liu 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4358ca76e519SChangpeng Liu 	req->rsp->nvme_cpl.status.sc = status;
4359ca76e519SChangpeng Liu 	if (status == SPDK_NVME_SC_RESERVATION_CONFLICT) {
4360ca76e519SChangpeng Liu 		return -EPERM;
4361ca76e519SChangpeng Liu 	}
4362ca76e519SChangpeng Liu 
4363ca76e519SChangpeng Liu 	return 0;
4364ca76e519SChangpeng Liu }
4365ca76e519SChangpeng Liu 
436687be077dSMaciej Szwed static int
4367198fd2ceSSeth Howell nvmf_ctrlr_process_io_fused_cmd(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
436887be077dSMaciej Szwed 				struct spdk_bdev_desc *desc, struct spdk_io_channel *ch)
43691b6b6cc4SBen Walker {
43701b6b6cc4SBen Walker 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
437187be077dSMaciej Szwed 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
437271beb568SMaciej Szwed 	struct spdk_nvmf_request *first_fused_req = req->qpair->first_fused_req;
437371beb568SMaciej Szwed 	int rc;
43741b6b6cc4SBen Walker 
437567c9c1c5STomasz Kulasek 	if (cmd->fuse == SPDK_NVME_CMD_FUSE_FIRST) {
437667c9c1c5STomasz Kulasek 		/* first fused operation (should be compare) */
437771beb568SMaciej Szwed 		if (first_fused_req != NULL) {
437871beb568SMaciej Szwed 			struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl;
437967c9c1c5STomasz Kulasek 
438067c9c1c5STomasz Kulasek 			SPDK_ERRLOG("Wrong sequence of fused operations\n");
438171beb568SMaciej Szwed 
438267c9c1c5STomasz Kulasek 			/* abort req->qpair->first_fused_request and continue with new fused command */
438367c9c1c5STomasz Kulasek 			fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
438467c9c1c5STomasz Kulasek 			fused_response->status.sct = SPDK_NVME_SCT_GENERIC;
4385db96437eSShuhei Matsumoto 			_nvmf_request_complete(first_fused_req);
438671beb568SMaciej Szwed 		} else if (cmd->opc != SPDK_NVME_OPC_COMPARE) {
438771beb568SMaciej Szwed 			SPDK_ERRLOG("Wrong op code of fused operations\n");
438871beb568SMaciej Szwed 			rsp->status.sct = SPDK_NVME_SCT_GENERIC;
438971beb568SMaciej Szwed 			rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
439071beb568SMaciej Szwed 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
439167c9c1c5STomasz Kulasek 		}
439267c9c1c5STomasz Kulasek 
439367c9c1c5STomasz Kulasek 		req->qpair->first_fused_req = req;
439471beb568SMaciej Szwed 		return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
439567c9c1c5STomasz Kulasek 	} else if (cmd->fuse == SPDK_NVME_CMD_FUSE_SECOND) {
439671beb568SMaciej Szwed 		/* second fused operation (should be write) */
439771beb568SMaciej Szwed 		if (first_fused_req == NULL) {
439867c9c1c5STomasz Kulasek 			SPDK_ERRLOG("Wrong sequence of fused operations\n");
439987be077dSMaciej Szwed 			rsp->status.sct = SPDK_NVME_SCT_GENERIC;
440087be077dSMaciej Szwed 			rsp->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
440167c9c1c5STomasz Kulasek 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
440271beb568SMaciej Szwed 		} else if (cmd->opc != SPDK_NVME_OPC_WRITE) {
440371beb568SMaciej Szwed 			struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl;
440471beb568SMaciej Szwed 
440571beb568SMaciej Szwed 			SPDK_ERRLOG("Wrong op code of fused operations\n");
440671beb568SMaciej Szwed 
440771beb568SMaciej Szwed 			/* abort req->qpair->first_fused_request and fail current command */
440871beb568SMaciej Szwed 			fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
440971beb568SMaciej Szwed 			fused_response->status.sct = SPDK_NVME_SCT_GENERIC;
4410db96437eSShuhei Matsumoto 			_nvmf_request_complete(first_fused_req);
441171beb568SMaciej Szwed 
441271beb568SMaciej Szwed 			rsp->status.sct = SPDK_NVME_SCT_GENERIC;
441371beb568SMaciej Szwed 			rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
441471beb568SMaciej Szwed 			req->qpair->first_fused_req = NULL;
441571beb568SMaciej Szwed 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
441667c9c1c5STomasz Kulasek 		}
441767c9c1c5STomasz Kulasek 
441867c9c1c5STomasz Kulasek 		/* save request of first command to generate response later */
441971beb568SMaciej Szwed 		req->first_fused_req = first_fused_req;
4420a2c540e3SJacek Kalwas 		req->first_fused = true;
442167c9c1c5STomasz Kulasek 		req->qpair->first_fused_req = NULL;
4422f0ca01e1SSeth Howell 	} else {
4423f0ca01e1SSeth Howell 		SPDK_ERRLOG("Invalid fused command fuse field.\n");
4424f0ca01e1SSeth Howell 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
4425f0ca01e1SSeth Howell 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
4426f0ca01e1SSeth Howell 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
442767c9c1c5STomasz Kulasek 	}
442867c9c1c5STomasz Kulasek 
44299cb21ad6SSeth Howell 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(bdev, desc, ch, req->first_fused_req, req);
443071beb568SMaciej Szwed 
443171beb568SMaciej Szwed 	if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
443271beb568SMaciej Szwed 		if (spdk_nvme_cpl_is_error(rsp)) {
443371beb568SMaciej Szwed 			struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl;
443471beb568SMaciej Szwed 
443571beb568SMaciej Szwed 			fused_response->status = rsp->status;
443671beb568SMaciej Szwed 			rsp->status.sct = SPDK_NVME_SCT_GENERIC;
443771beb568SMaciej Szwed 			rsp->status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
443871beb568SMaciej Szwed 			/* Complete first of fused commands. Second will be completed by upper layer */
4439db96437eSShuhei Matsumoto 			_nvmf_request_complete(first_fused_req);
444071beb568SMaciej Szwed 			req->first_fused_req = NULL;
4441a2c540e3SJacek Kalwas 			req->first_fused = false;
444271beb568SMaciej Szwed 		}
444371beb568SMaciej Szwed 	}
444471beb568SMaciej Szwed 
444571beb568SMaciej Szwed 	return rc;
444687be077dSMaciej Szwed }
444787be077dSMaciej Szwed 
44485818b42fSmatthewb bool
44495818b42fSmatthewb nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req)
44505818b42fSmatthewb {
4451aa1d0398SKonrad Sztyber 	struct spdk_nvmf_transport *transport = req->qpair->transport;
44525818b42fSmatthewb 	struct spdk_nvmf_ns *ns;
44535818b42fSmatthewb 
44546631c2a8SKonrad Sztyber 	assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
44555818b42fSmatthewb 
4456aa1d0398SKonrad Sztyber 	if (!transport->opts.zcopy) {
4457aa1d0398SKonrad Sztyber 		return false;
4458aa1d0398SKonrad Sztyber 	}
4459aa1d0398SKonrad Sztyber 
44605818b42fSmatthewb 	if (nvmf_qpair_is_admin_queue(req->qpair)) {
44615818b42fSmatthewb 		/* Admin queue */
44625818b42fSmatthewb 		return false;
44635818b42fSmatthewb 	}
44645818b42fSmatthewb 
44655818b42fSmatthewb 	if ((req->cmd->nvme_cmd.opc != SPDK_NVME_OPC_WRITE) &&
44665818b42fSmatthewb 	    (req->cmd->nvme_cmd.opc != SPDK_NVME_OPC_READ)) {
44675818b42fSmatthewb 		/* Not a READ or WRITE command */
44685818b42fSmatthewb 		return false;
44695818b42fSmatthewb 	}
44705818b42fSmatthewb 
44715818b42fSmatthewb 	if (req->cmd->nvme_cmd.fuse != SPDK_NVME_CMD_FUSE_NONE) {
44725818b42fSmatthewb 		/* Fused commands dont use zcopy buffers */
44735818b42fSmatthewb 		return false;
44745818b42fSmatthewb 	}
44755818b42fSmatthewb 
447605859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(req->qpair->ctrlr, req->cmd->nvme_cmd.nsid);
44775818b42fSmatthewb 	if (ns == NULL || ns->bdev == NULL || !ns->zcopy) {
44785818b42fSmatthewb 		return false;
44795818b42fSmatthewb 	}
44805818b42fSmatthewb 
44815818b42fSmatthewb 	req->zcopy_phase = NVMF_ZCOPY_PHASE_INIT;
44825818b42fSmatthewb 	return true;
44835818b42fSmatthewb }
44845818b42fSmatthewb 
448592d7df1fSKonrad Sztyber void
44865818b42fSmatthewb spdk_nvmf_request_zcopy_start(struct spdk_nvmf_request *req)
44875818b42fSmatthewb {
448892d7df1fSKonrad Sztyber 	assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
44895818b42fSmatthewb 
449092d7df1fSKonrad Sztyber 	/* Set iovcnt to be the maximum number of iovs that the ZCOPY can use */
44915818b42fSmatthewb 	req->iovcnt = NVMF_REQ_MAX_BUFFERS;
44925818b42fSmatthewb 
449392d7df1fSKonrad Sztyber 	spdk_nvmf_request_exec(req);
44945818b42fSmatthewb }
44955818b42fSmatthewb 
44967a374fbcSKonrad Sztyber void
44971d6adfb0SBen Walker spdk_nvmf_request_zcopy_end(struct spdk_nvmf_request *req, bool commit)
44985818b42fSmatthewb {
44997a374fbcSKonrad Sztyber 	assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
45005818b42fSmatthewb 	req->zcopy_phase = NVMF_ZCOPY_PHASE_END_PENDING;
45017a374fbcSKonrad Sztyber 
45027a374fbcSKonrad Sztyber 	nvmf_bdev_ctrlr_zcopy_end(req, commit);
45035818b42fSmatthewb }
45045818b42fSmatthewb 
450587be077dSMaciej Szwed int
45069cb21ad6SSeth Howell nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
450787be077dSMaciej Szwed {
450887be077dSMaciej Szwed 	uint32_t nsid;
450987be077dSMaciej Szwed 	struct spdk_nvmf_ns *ns;
451087be077dSMaciej Szwed 	struct spdk_bdev *bdev;
451187be077dSMaciej Szwed 	struct spdk_bdev_desc *desc;
451287be077dSMaciej Szwed 	struct spdk_io_channel *ch;
45132a3be8ddSKonrad Sztyber 	struct spdk_nvmf_qpair *qpair = req->qpair;
45142a3be8ddSKonrad Sztyber 	struct spdk_nvmf_poll_group *group = qpair->group;
45152a3be8ddSKonrad Sztyber 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
451687be077dSMaciej Szwed 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
451787be077dSMaciej Szwed 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
451887be077dSMaciej Szwed 	struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
451968f16817SShuhei Matsumoto 	enum spdk_nvme_ana_state ana_state;
452087be077dSMaciej Szwed 
452187be077dSMaciej Szwed 	/* pre-set response details for this command */
452287be077dSMaciej Szwed 	response->status.sc = SPDK_NVME_SC_SUCCESS;
452387be077dSMaciej Szwed 	nsid = cmd->nsid;
452487be077dSMaciej Szwed 
452533e23361SKonrad Sztyber 	assert(ctrlr != NULL);
45261b6b6cc4SBen Walker 	if (spdk_unlikely(ctrlr->vcprop.cc.bits.en != 1)) {
45271b6b6cc4SBen Walker 		SPDK_ERRLOG("I/O command sent to disabled controller\n");
45281b6b6cc4SBen Walker 		response->status.sct = SPDK_NVME_SCT_GENERIC;
45291b6b6cc4SBen Walker 		response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
45301b6b6cc4SBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
45311b6b6cc4SBen Walker 	}
45321b6b6cc4SBen Walker 
453305859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, nsid);
4534a6289d57SJacek Kalwas 	if (spdk_unlikely(ns == NULL || ns->bdev == NULL)) {
4535785d10b5SShuhei Matsumoto 		SPDK_DEBUGLOG(nvmf, "Unsuccessful query for nsid %u\n", cmd->nsid);
4536785d10b5SShuhei Matsumoto 		response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
4537785d10b5SShuhei Matsumoto 		response->status.dnr = 1;
4538785d10b5SShuhei Matsumoto 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
4539785d10b5SShuhei Matsumoto 	}
4540785d10b5SShuhei Matsumoto 
45411c81d1afSKonrad Sztyber 	ana_state = nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid);
454268f16817SShuhei Matsumoto 	if (spdk_unlikely(ana_state != SPDK_NVME_ANA_OPTIMIZED_STATE &&
454368f16817SShuhei Matsumoto 			  ana_state != SPDK_NVME_ANA_NON_OPTIMIZED_STATE)) {
45442172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "Fail I/O command due to ANA state %d\n",
454568f16817SShuhei Matsumoto 			      ana_state);
454668f16817SShuhei Matsumoto 		response->status.sct = SPDK_NVME_SCT_PATH;
454768f16817SShuhei Matsumoto 		response->status.sc = _nvme_ana_state_to_path_status(ana_state);
454868f16817SShuhei Matsumoto 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
454968f16817SShuhei Matsumoto 	}
455068f16817SShuhei Matsumoto 
4551c777cfa7SShuhei Matsumoto 	if (spdk_likely(ctrlr->listener != NULL)) {
45527c30df4eSJim Harris 		SPDK_DTRACE_PROBE3_TICKS(nvmf_request_io_exec_path, req,
4553c777cfa7SShuhei Matsumoto 					 ctrlr->listener->trid->traddr,
4554c777cfa7SShuhei Matsumoto 					 ctrlr->listener->trid->trsvcid);
4555c777cfa7SShuhei Matsumoto 	}
45561e2c5b1fSJim Harris 
45578fc9ac7bSJinYu 	/* scan-build falsely reporting dereference of null pointer */
45588fc9ac7bSJinYu 	assert(group != NULL && group->sgroups != NULL);
4559ca76e519SChangpeng Liu 	ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1];
4560ca76e519SChangpeng Liu 	if (nvmf_ns_reservation_request_check(ns_info, ctrlr, req)) {
45612172c432STomasz Zawadzki 		SPDK_DEBUGLOG(nvmf, "Reservation Conflict for nsid %u, opcode %u\n",
456268bb3995SChangpeng Liu 			      cmd->nsid, cmd->opc);
4563ca76e519SChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
4564ca76e519SChangpeng Liu 	}
4565ca76e519SChangpeng Liu 
45661b6b6cc4SBen Walker 	bdev = ns->bdev;
45671b6b6cc4SBen Walker 	desc = ns->desc;
4568ca76e519SChangpeng Liu 	ch = ns_info->channel;
456987be077dSMaciej Szwed 
457087be077dSMaciej Szwed 	if (spdk_unlikely(cmd->fuse & SPDK_NVME_CMD_FUSE_MASK)) {
4571198fd2ceSSeth Howell 		return nvmf_ctrlr_process_io_fused_cmd(req, bdev, desc, ch);
45722a3be8ddSKonrad Sztyber 	} else if (spdk_unlikely(qpair->first_fused_req != NULL)) {
45732a3be8ddSKonrad Sztyber 		struct spdk_nvme_cpl *fused_response = &qpair->first_fused_req->rsp->nvme_cpl;
457471beb568SMaciej Szwed 
4575c69768bdSMarcin Spiewak 		SPDK_ERRLOG("Second fused cmd expected - failing first one (cntlid:%u, qid:%u, opcode:0x%x)\n",
4576c69768bdSMarcin Spiewak 			    ctrlr->cntlid, qpair->qid,
4577d8804f6bSMarcin Spiewak 			    req->qpair->first_fused_req->cmd->nvmf_cmd.opcode);
457871beb568SMaciej Szwed 
45792a3be8ddSKonrad Sztyber 		/* abort qpair->first_fused_request and continue with new command */
458071beb568SMaciej Szwed 		fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
458171beb568SMaciej Szwed 		fused_response->status.sct = SPDK_NVME_SCT_GENERIC;
45822a3be8ddSKonrad Sztyber 		_nvmf_request_complete(qpair->first_fused_req);
45832a3be8ddSKonrad Sztyber 		qpair->first_fused_req = NULL;
458487be077dSMaciej Szwed 	}
458587be077dSMaciej Szwed 
458660241941SKarl Bonde Torp 	if (ctrlr->subsys->passthrough) {
4587f220d590SKonrad Sztyber 		assert(ns->passthru_nsid > 0);
4588f220d590SKonrad Sztyber 		req->cmd->nvme_cmd.nsid = ns->passthru_nsid;
458960241941SKarl Bonde Torp 
459060241941SKarl Bonde Torp 		return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req);
459160241941SKarl Bonde Torp 	}
459260241941SKarl Bonde Torp 
459371cbc229SKonrad Sztyber 	if (spdk_nvmf_request_using_zcopy(req)) {
459471cbc229SKonrad Sztyber 		assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
45950e09df57SKonrad Sztyber 		return nvmf_bdev_ctrlr_zcopy_start(bdev, desc, ch, req);
459671cbc229SKonrad Sztyber 	} else {
45971b6b6cc4SBen Walker 		switch (cmd->opc) {
45981b6b6cc4SBen Walker 		case SPDK_NVME_OPC_READ:
45999cb21ad6SSeth Howell 			return nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req);
46001b6b6cc4SBen Walker 		case SPDK_NVME_OPC_WRITE:
46019cb21ad6SSeth Howell 			return nvmf_bdev_ctrlr_write_cmd(bdev, desc, ch, req);
4602b5e08f54SKonrad Sztyber 		case SPDK_NVME_OPC_FLUSH:
4603b5e08f54SKonrad Sztyber 			return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req);
4604941d9e7aSMaciej Szwed 		case SPDK_NVME_OPC_COMPARE:
46057d19bf23SKonrad Sztyber 			if (spdk_unlikely(!ctrlr->cdata.oncs.compare)) {
46067d19bf23SKonrad Sztyber 				goto invalid_opcode;
46077d19bf23SKonrad Sztyber 			}
46089cb21ad6SSeth Howell 			return nvmf_bdev_ctrlr_compare_cmd(bdev, desc, ch, req);
46091b6b6cc4SBen Walker 		case SPDK_NVME_OPC_WRITE_ZEROES:
46107d19bf23SKonrad Sztyber 			if (spdk_unlikely(!ctrlr->cdata.oncs.write_zeroes)) {
46117d19bf23SKonrad Sztyber 				goto invalid_opcode;
46127d19bf23SKonrad Sztyber 			}
46139cb21ad6SSeth Howell 			return nvmf_bdev_ctrlr_write_zeroes_cmd(bdev, desc, ch, req);
46141b6b6cc4SBen Walker 		case SPDK_NVME_OPC_DATASET_MANAGEMENT:
46157d19bf23SKonrad Sztyber 			if (spdk_unlikely(!ctrlr->cdata.oncs.dsm)) {
46167d19bf23SKonrad Sztyber 				goto invalid_opcode;
46177d19bf23SKonrad Sztyber 			}
46189cb21ad6SSeth Howell 			return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req);
4619bc1d0b91SChangpeng Liu 		case SPDK_NVME_OPC_RESERVATION_REGISTER:
462071ac18d1SChangpeng Liu 		case SPDK_NVME_OPC_RESERVATION_ACQUIRE:
462184ee3a62SChangpeng Liu 		case SPDK_NVME_OPC_RESERVATION_RELEASE:
46224b55682eSChangpeng Liu 		case SPDK_NVME_OPC_RESERVATION_REPORT:
46237d19bf23SKonrad Sztyber 			if (spdk_unlikely(!ctrlr->cdata.oncs.reservations)) {
46247d19bf23SKonrad Sztyber 				goto invalid_opcode;
46257d19bf23SKonrad Sztyber 			}
46269cb21ad6SSeth Howell 			spdk_thread_send_msg(ctrlr->subsys->thread, nvmf_ns_reservation_request, req);
4627bc1d0b91SChangpeng Liu 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
46288305e49bSEvgeniy Kochetov 		case SPDK_NVME_OPC_COPY:
46297d19bf23SKonrad Sztyber 			if (spdk_unlikely(!ctrlr->cdata.oncs.copy)) {
46307d19bf23SKonrad Sztyber 				goto invalid_opcode;
46317d19bf23SKonrad Sztyber 			}
46328305e49bSEvgeniy Kochetov 			return nvmf_bdev_ctrlr_copy_cmd(bdev, desc, ch, req);
46331b6b6cc4SBen Walker 		default:
463465335336SKonrad Sztyber 			if (spdk_unlikely(qpair->transport->opts.disable_command_passthru)) {
463565335336SKonrad Sztyber 				goto invalid_opcode;
463665335336SKonrad Sztyber 			}
4637f220d590SKonrad Sztyber 			if (ns->passthru_nsid) {
4638f220d590SKonrad Sztyber 				req->cmd->nvme_cmd.nsid = ns->passthru_nsid;
4639c156f3e1SKarl Bonde Torp 			}
46409cb21ad6SSeth Howell 			return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req);
46411b6b6cc4SBen Walker 		}
46421b6b6cc4SBen Walker 	}
464365335336SKonrad Sztyber invalid_opcode:
464465335336SKonrad Sztyber 	SPDK_INFOLOG(nvmf, "Unsupported IO opcode 0x%x\n", cmd->opc);
464565335336SKonrad Sztyber 	response->status.sct = SPDK_NVME_SCT_GENERIC;
464665335336SKonrad Sztyber 	response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
464765335336SKonrad Sztyber 	response->status.dnr = 1;
464865335336SKonrad Sztyber 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
464971cbc229SKonrad Sztyber }
4650a4d666fdSBen Walker 
4651a4d666fdSBen Walker static void
4652198fd2ceSSeth Howell nvmf_qpair_request_cleanup(struct spdk_nvmf_qpair *qpair)
4653a4d666fdSBen Walker {
4654a6289d57SJacek Kalwas 	if (spdk_unlikely(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING)) {
4655a4d666fdSBen Walker 		assert(qpair->state_cb != NULL);
4656a4d666fdSBen Walker 
4657a4d666fdSBen Walker 		if (TAILQ_EMPTY(&qpair->outstanding)) {
4658a4d666fdSBen Walker 			qpair->state_cb(qpair->state_cb_arg, 0);
4659a4d666fdSBen Walker 		}
4660a4d666fdSBen Walker 	}
4661a4d666fdSBen Walker }
4662a4d666fdSBen Walker 
4663a4d666fdSBen Walker int
4664a4d666fdSBen Walker spdk_nvmf_request_free(struct spdk_nvmf_request *req)
4665a4d666fdSBen Walker {
4666a4d666fdSBen Walker 	struct spdk_nvmf_qpair *qpair = req->qpair;
4667a4d666fdSBen Walker 
4668a4d666fdSBen Walker 	TAILQ_REMOVE(&qpair->outstanding, req, link);
4669a6289d57SJacek Kalwas 	if (spdk_unlikely(nvmf_transport_req_free(req))) {
4670a4d666fdSBen Walker 		SPDK_ERRLOG("Unable to free transport level request resources.\n");
4671a4d666fdSBen Walker 	}
4672a4d666fdSBen Walker 
4673198fd2ceSSeth Howell 	nvmf_qpair_request_cleanup(qpair);
4674a4d666fdSBen Walker 
4675a4d666fdSBen Walker 	return 0;
4676a4d666fdSBen Walker }
4677a4d666fdSBen Walker 
4678db96437eSShuhei Matsumoto static void
4679db96437eSShuhei Matsumoto _nvmf_request_complete(void *ctx)
4680a4d666fdSBen Walker {
4681db96437eSShuhei Matsumoto 	struct spdk_nvmf_request *req = ctx;
4682a4d666fdSBen Walker 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
4683a4d666fdSBen Walker 	struct spdk_nvmf_qpair *qpair;
46848fc9ac7bSJinYu 	struct spdk_nvmf_subsystem_poll_group *sgroup = NULL;
4685312a9d60SBen Walker 	struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
46861c5444d6SAlexey Marchuk 	bool is_aer = false;
4687312a9d60SBen Walker 	uint32_t nsid;
4688312a9d60SBen Walker 	bool paused;
4689b70b16acSChangpeng Liu 	uint8_t opcode;
4690a4d666fdSBen Walker 
4691a4d666fdSBen Walker 	rsp->sqid = 0;
4692a4d666fdSBen Walker 	rsp->status.p = 0;
4693a4d666fdSBen Walker 	rsp->cid = req->cmd->nvme_cmd.cid;
4694b70b16acSChangpeng Liu 	nsid = req->cmd->nvme_cmd.nsid;
4695b70b16acSChangpeng Liu 	opcode = req->cmd->nvmf_cmd.opcode;
4696a4d666fdSBen Walker 
4697a4d666fdSBen Walker 	qpair = req->qpair;
4698a6289d57SJacek Kalwas 	if (spdk_likely(qpair->ctrlr)) {
46998fc9ac7bSJinYu 		sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
4700ed4b7d12SJacek Kalwas 		assert(sgroup != NULL);
47019396422bSJin Yu 		is_aer = req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
4702e39512ecSJim Harris 		if (spdk_likely(qpair->qid != 0)) {
4703e39512ecSJim Harris 			qpair->group->stat.completed_nvme_io++;
4704e39512ecSJim Harris 		}
47055cc56599Syupeng 
47065cc56599Syupeng 		/*
47075cc56599Syupeng 		 * Set the crd value.
47085cc56599Syupeng 		 * If the the IO has any error, and dnr (DoNotRetry) is not 1,
47095cc56599Syupeng 		 * and ACRE is enabled, we will set the crd to 1 to select the first CRDT.
47105cc56599Syupeng 		 */
4711a6289d57SJacek Kalwas 		if (spdk_unlikely(spdk_nvme_cpl_is_error(rsp) &&
47125cc56599Syupeng 				  rsp->status.dnr == 0 &&
4713a6289d57SJacek Kalwas 				  qpair->ctrlr->acre_enabled)) {
47145cc56599Syupeng 			rsp->status.crd = 1;
47155cc56599Syupeng 		}
47161c5444d6SAlexey Marchuk 	} else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) {
47171c5444d6SAlexey Marchuk 		sgroup = nvmf_subsystem_pg_from_connect_cmd(req);
47188fc9ac7bSJinYu 	}
4719a4d666fdSBen Walker 
472081717da1SJacek Kalwas 	if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) {
472181717da1SJacek Kalwas 		spdk_nvme_print_completion(qpair->qid, rsp);
472281717da1SJacek Kalwas 	}
4723a4d666fdSBen Walker 
47245818b42fSmatthewb 	switch (req->zcopy_phase) {
47255818b42fSmatthewb 	case NVMF_ZCOPY_PHASE_NONE:
4726a4d666fdSBen Walker 		TAILQ_REMOVE(&qpair->outstanding, req, link);
47275818b42fSmatthewb 		break;
47285818b42fSmatthewb 	case NVMF_ZCOPY_PHASE_INIT:
47295818b42fSmatthewb 		if (spdk_unlikely(spdk_nvme_cpl_is_error(rsp))) {
47305818b42fSmatthewb 			req->zcopy_phase = NVMF_ZCOPY_PHASE_INIT_FAILED;
47315818b42fSmatthewb 			TAILQ_REMOVE(&qpair->outstanding, req, link);
47325818b42fSmatthewb 		} else {
47335818b42fSmatthewb 			req->zcopy_phase = NVMF_ZCOPY_PHASE_EXECUTE;
47345818b42fSmatthewb 		}
47355818b42fSmatthewb 		break;
47365818b42fSmatthewb 	case NVMF_ZCOPY_PHASE_EXECUTE:
47375818b42fSmatthewb 		break;
47385818b42fSmatthewb 	case NVMF_ZCOPY_PHASE_END_PENDING:
47395818b42fSmatthewb 		TAILQ_REMOVE(&qpair->outstanding, req, link);
47405818b42fSmatthewb 		req->zcopy_phase = NVMF_ZCOPY_PHASE_COMPLETE;
47415818b42fSmatthewb 		break;
47425818b42fSmatthewb 	default:
47435818b42fSmatthewb 		SPDK_ERRLOG("Invalid ZCOPY phase %u\n", req->zcopy_phase);
47445818b42fSmatthewb 		break;
47455818b42fSmatthewb 	}
47465818b42fSmatthewb 
4747a6289d57SJacek Kalwas 	if (spdk_unlikely(nvmf_transport_req_complete(req))) {
4748a4d666fdSBen Walker 		SPDK_ERRLOG("Transport request completion error!\n");
4749a4d666fdSBen Walker 	}
4750a4d666fdSBen Walker 
47511c5444d6SAlexey Marchuk 	/* AER cmd is an exception */
4752a6289d57SJacek Kalwas 	if (spdk_likely(sgroup && !is_aer)) {
4753b70b16acSChangpeng Liu 		if (spdk_unlikely(opcode == SPDK_NVME_OPC_FABRIC ||
4754312a9d60SBen Walker 				  nvmf_qpair_is_admin_queue(qpair))) {
4755312a9d60SBen Walker 			assert(sgroup->mgmt_io_outstanding > 0);
4756312a9d60SBen Walker 			sgroup->mgmt_io_outstanding--;
4757312a9d60SBen Walker 		} else {
4758d39038e1SKonrad Sztyber 			if (req->zcopy_phase == NVMF_ZCOPY_PHASE_NONE ||
4759d39038e1SKonrad Sztyber 			    req->zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE ||
4760d39038e1SKonrad Sztyber 			    req->zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED) {
47615818b42fSmatthewb 				/* End of request */
47625818b42fSmatthewb 
4763312a9d60SBen Walker 				/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
4764312a9d60SBen Walker 				if (spdk_likely(nsid - 1 < sgroup->num_ns)) {
4765312a9d60SBen Walker 					sgroup->ns_info[nsid - 1].io_outstanding--;
4766312a9d60SBen Walker 				}
4767312a9d60SBen Walker 			}
47685818b42fSmatthewb 		}
4769312a9d60SBen Walker 
4770312a9d60SBen Walker 		if (spdk_unlikely(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSING &&
4771312a9d60SBen Walker 				  sgroup->mgmt_io_outstanding == 0)) {
4772312a9d60SBen Walker 			paused = true;
4773312a9d60SBen Walker 			for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
4774312a9d60SBen Walker 				ns_info = &sgroup->ns_info[nsid];
4775312a9d60SBen Walker 
4776312a9d60SBen Walker 				if (ns_info->state == SPDK_NVMF_SUBSYSTEM_PAUSING &&
4777312a9d60SBen Walker 				    ns_info->io_outstanding > 0) {
4778312a9d60SBen Walker 					paused = false;
4779312a9d60SBen Walker 					break;
4780312a9d60SBen Walker 				}
4781312a9d60SBen Walker 			}
4782312a9d60SBen Walker 
4783312a9d60SBen Walker 			if (paused) {
47848fc9ac7bSJinYu 				sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
47858fc9ac7bSJinYu 				sgroup->cb_fn(sgroup->cb_arg, 0);
47863e967963SJim Harris 				sgroup->cb_fn = NULL;
47873e967963SJim Harris 				sgroup->cb_arg = NULL;
47888fc9ac7bSJinYu 			}
47898fc9ac7bSJinYu 		}
47908fc9ac7bSJinYu 
4791312a9d60SBen Walker 	}
4792312a9d60SBen Walker 
4793198fd2ceSSeth Howell 	nvmf_qpair_request_cleanup(qpair);
4794db96437eSShuhei Matsumoto }
4795db96437eSShuhei Matsumoto 
4796db96437eSShuhei Matsumoto int
4797db96437eSShuhei Matsumoto spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
4798db96437eSShuhei Matsumoto {
4799db96437eSShuhei Matsumoto 	struct spdk_nvmf_qpair *qpair = req->qpair;
4800db96437eSShuhei Matsumoto 
48015f270928SJohn Levon 	spdk_thread_exec_msg(qpair->group->thread, _nvmf_request_complete, req);
4802a4d666fdSBen Walker 
4803a4d666fdSBen Walker 	return 0;
4804a4d666fdSBen Walker }
4805a4d666fdSBen Walker 
48068dd1cd21SBen Walker static bool
48078dd1cd21SBen Walker nvmf_check_subsystem_active(struct spdk_nvmf_request *req)
4808a4d666fdSBen Walker {
4809a4d666fdSBen Walker 	struct spdk_nvmf_qpair *qpair = req->qpair;
48108fc9ac7bSJinYu 	struct spdk_nvmf_subsystem_poll_group *sgroup = NULL;
4811312a9d60SBen Walker 	struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
4812312a9d60SBen Walker 	uint32_t nsid;
4813a4d666fdSBen Walker 
4814a6289d57SJacek Kalwas 	if (spdk_likely(qpair->ctrlr)) {
48158fc9ac7bSJinYu 		sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
4816ed4b7d12SJacek Kalwas 		assert(sgroup != NULL);
48171c5444d6SAlexey Marchuk 	} else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) {
48181c5444d6SAlexey Marchuk 		sgroup = nvmf_subsystem_pg_from_connect_cmd(req);
48198fc9ac7bSJinYu 	}
48208fc9ac7bSJinYu 
4821b10e3050SBen Walker 	/* Check if the subsystem is paused (if there is a subsystem) */
4822c3a650d5SKonrad Sztyber 	if (spdk_unlikely(sgroup == NULL)) {
4823c3a650d5SKonrad Sztyber 		return true;
4824c3a650d5SKonrad Sztyber 	}
4825c3a650d5SKonrad Sztyber 
4826312a9d60SBen Walker 	if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC ||
4827312a9d60SBen Walker 			  nvmf_qpair_is_admin_queue(qpair))) {
4828b10e3050SBen Walker 		if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
4829b10e3050SBen Walker 			/* The subsystem is not currently active. Queue this request. */
4830b10e3050SBen Walker 			TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
48315818b42fSmatthewb 			return false;
4832b10e3050SBen Walker 		}
4833312a9d60SBen Walker 		sgroup->mgmt_io_outstanding++;
4834312a9d60SBen Walker 	} else {
4835312a9d60SBen Walker 		nsid = req->cmd->nvme_cmd.nsid;
4836312a9d60SBen Walker 
4837312a9d60SBen Walker 		/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
4838312a9d60SBen Walker 		if (spdk_unlikely(nsid - 1 >= sgroup->num_ns)) {
4839312a9d60SBen Walker 			req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4840312a9d60SBen Walker 			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
4841312a9d60SBen Walker 			req->rsp->nvme_cpl.status.dnr = 1;
484227223168SKonrad Sztyber 			TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
4843312a9d60SBen Walker 			_nvmf_request_complete(req);
48445818b42fSmatthewb 			return false;
4845312a9d60SBen Walker 		}
4846312a9d60SBen Walker 
4847312a9d60SBen Walker 		ns_info = &sgroup->ns_info[nsid - 1];
4848a6289d57SJacek Kalwas 		if (spdk_unlikely(ns_info->channel == NULL)) {
4849312a9d60SBen Walker 			/* This can can happen if host sends I/O to a namespace that is
4850312a9d60SBen Walker 			 * in the process of being added, but before the full addition
4851312a9d60SBen Walker 			 * process is complete.  Report invalid namespace in that case.
4852312a9d60SBen Walker 			 */
4853312a9d60SBen Walker 			req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4854312a9d60SBen Walker 			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
4855312a9d60SBen Walker 			req->rsp->nvme_cpl.status.dnr = 1;
485627223168SKonrad Sztyber 			TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
4857312a9d60SBen Walker 			ns_info->io_outstanding++;
4858312a9d60SBen Walker 			_nvmf_request_complete(req);
48595818b42fSmatthewb 			return false;
4860312a9d60SBen Walker 		}
4861312a9d60SBen Walker 
4862a6289d57SJacek Kalwas 		if (spdk_unlikely(ns_info->state != SPDK_NVMF_SUBSYSTEM_ACTIVE)) {
4863312a9d60SBen Walker 			/* The namespace is not currently active. Queue this request. */
4864312a9d60SBen Walker 			TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
48655818b42fSmatthewb 			return false;
4866312a9d60SBen Walker 		}
48675818b42fSmatthewb 
4868312a9d60SBen Walker 		ns_info->io_outstanding++;
4869312a9d60SBen Walker 	}
4870b10e3050SBen Walker 
487133e23361SKonrad Sztyber 	return true;
487233e23361SKonrad Sztyber }
487333e23361SKonrad Sztyber 
487433e23361SKonrad Sztyber static bool
487533e23361SKonrad Sztyber nvmf_check_qpair_active(struct spdk_nvmf_request *req)
487633e23361SKonrad Sztyber {
487733e23361SKonrad Sztyber 	struct spdk_nvmf_qpair *qpair = req->qpair;
48780a6bb8caSKonrad Sztyber 	int sc, sct;
487933e23361SKonrad Sztyber 
488033e23361SKonrad Sztyber 	if (spdk_likely(qpair->state == SPDK_NVMF_QPAIR_ENABLED)) {
488133e23361SKonrad Sztyber 		return true;
488233e23361SKonrad Sztyber 	}
488333e23361SKonrad Sztyber 
48840a6bb8caSKonrad Sztyber 	sct = SPDK_NVME_SCT_GENERIC;
48850a6bb8caSKonrad Sztyber 	sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
48860a6bb8caSKonrad Sztyber 
488733e23361SKonrad Sztyber 	switch (qpair->state) {
488833e23361SKonrad Sztyber 	case SPDK_NVMF_QPAIR_CONNECTING:
488933e23361SKonrad Sztyber 		if (req->cmd->nvmf_cmd.opcode != SPDK_NVME_OPC_FABRIC) {
489033e23361SKonrad Sztyber 			SPDK_ERRLOG("Received command 0x%x on qid %u before CONNECT\n",
489133e23361SKonrad Sztyber 				    req->cmd->nvmf_cmd.opcode, qpair->qid);
489233e23361SKonrad Sztyber 			break;
489333e23361SKonrad Sztyber 		}
489433e23361SKonrad Sztyber 		if (req->cmd->nvmf_cmd.fctype != SPDK_NVMF_FABRIC_COMMAND_CONNECT) {
489533e23361SKonrad Sztyber 			SPDK_ERRLOG("Received fctype 0x%x on qid %u before CONNECT\n",
489633e23361SKonrad Sztyber 				    req->cmd->nvmf_cmd.fctype, qpair->qid);
489733e23361SKonrad Sztyber 			break;
489833e23361SKonrad Sztyber 		}
489933e23361SKonrad Sztyber 		return true;
49000a6bb8caSKonrad Sztyber 	case SPDK_NVMF_QPAIR_AUTHENTICATING:
49010a6bb8caSKonrad Sztyber 		sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
49020a6bb8caSKonrad Sztyber 		sc = SPDK_NVMF_FABRIC_SC_AUTH_REQUIRED;
49030a6bb8caSKonrad Sztyber 		if (req->cmd->nvmf_cmd.opcode != SPDK_NVME_OPC_FABRIC) {
49040a6bb8caSKonrad Sztyber 			SPDK_ERRLOG("Received command 0x%x on qid %u before authentication\n",
49050a6bb8caSKonrad Sztyber 				    req->cmd->nvmf_cmd.opcode, qpair->qid);
49060a6bb8caSKonrad Sztyber 			break;
49070a6bb8caSKonrad Sztyber 		}
49080a6bb8caSKonrad Sztyber 		if (req->cmd->nvmf_cmd.fctype != SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND &&
49090a6bb8caSKonrad Sztyber 		    req->cmd->nvmf_cmd.fctype != SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV) {
49100a6bb8caSKonrad Sztyber 			SPDK_ERRLOG("Received fctype 0x%x on qid %u before authentication\n",
49110a6bb8caSKonrad Sztyber 				    req->cmd->nvmf_cmd.fctype, qpair->qid);
49120a6bb8caSKonrad Sztyber 			break;
49130a6bb8caSKonrad Sztyber 		}
49140a6bb8caSKonrad Sztyber 		return true;
491533e23361SKonrad Sztyber 	default:
491633e23361SKonrad Sztyber 		SPDK_ERRLOG("Received command 0x%x on qid %u in state %d\n",
491733e23361SKonrad Sztyber 			    req->cmd->nvmf_cmd.opcode, qpair->qid, qpair->state);
491833e23361SKonrad Sztyber 		break;
491933e23361SKonrad Sztyber 	}
492033e23361SKonrad Sztyber 
49210a6bb8caSKonrad Sztyber 	req->rsp->nvme_cpl.status.sct = sct;
49220a6bb8caSKonrad Sztyber 	req->rsp->nvme_cpl.status.sc = sc;
492327223168SKonrad Sztyber 	TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
4924db96437eSShuhei Matsumoto 	_nvmf_request_complete(req);
49255818b42fSmatthewb 
492633e23361SKonrad Sztyber 	return false;
49275818b42fSmatthewb }
49285818b42fSmatthewb 
49295818b42fSmatthewb void
49305818b42fSmatthewb spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
49315818b42fSmatthewb {
49325818b42fSmatthewb 	struct spdk_nvmf_qpair *qpair = req->qpair;
49335818b42fSmatthewb 	enum spdk_nvmf_request_exec_status status;
49345818b42fSmatthewb 
4935a6289d57SJacek Kalwas 	if (spdk_unlikely(!nvmf_check_subsystem_active(req))) {
4936a4d666fdSBen Walker 		return;
4937a4d666fdSBen Walker 	}
493833e23361SKonrad Sztyber 	if (spdk_unlikely(!nvmf_check_qpair_active(req))) {
493933e23361SKonrad Sztyber 		return;
494033e23361SKonrad Sztyber 	}
4941a4d666fdSBen Walker 
4942c642e8e2SBen Walker 	if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) {
4943c642e8e2SBen Walker 		spdk_nvme_print_command(qpair->qid, &req->cmd->nvme_cmd);
4944c642e8e2SBen Walker 	}
4945c642e8e2SBen Walker 
4946c642e8e2SBen Walker 	/* Place the request on the outstanding list so we can keep track of it */
494727223168SKonrad Sztyber 	TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
4948c642e8e2SBen Walker 
4949289eef51SKonrad Sztyber 	if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) {
4950c642e8e2SBen Walker 		status = nvmf_ctrlr_process_fabrics_cmd(req);
4951c642e8e2SBen Walker 	} else if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair))) {
4952c642e8e2SBen Walker 		status = nvmf_ctrlr_process_admin_cmd(req);
4953c642e8e2SBen Walker 	} else {
4954c642e8e2SBen Walker 		status = nvmf_ctrlr_process_io_cmd(req);
4955c642e8e2SBen Walker 	}
4956c642e8e2SBen Walker 
4957c642e8e2SBen Walker 	if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
4958c642e8e2SBen Walker 		_nvmf_request_complete(req);
4959c642e8e2SBen Walker 	}
4960a4d666fdSBen Walker }
4961ddb680ebSShuhei Matsumoto 
4962ddb680ebSShuhei Matsumoto static bool
4963198fd2ceSSeth Howell nvmf_ctrlr_get_dif_ctx(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd,
4964ddb680ebSShuhei Matsumoto 		       struct spdk_dif_ctx *dif_ctx)
4965ddb680ebSShuhei Matsumoto {
4966ddb680ebSShuhei Matsumoto 	struct spdk_nvmf_ns *ns;
4967b09de013SShuhei Matsumoto 	struct spdk_bdev_desc *desc;
4968ddb680ebSShuhei Matsumoto 
4969ddb680ebSShuhei Matsumoto 	if (ctrlr == NULL || cmd == NULL) {
4970ddb680ebSShuhei Matsumoto 		return false;
4971ddb680ebSShuhei Matsumoto 	}
4972ddb680ebSShuhei Matsumoto 
497305859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
4974ddb680ebSShuhei Matsumoto 	if (ns == NULL || ns->bdev == NULL) {
4975ddb680ebSShuhei Matsumoto 		return false;
4976ddb680ebSShuhei Matsumoto 	}
4977ddb680ebSShuhei Matsumoto 
4978b09de013SShuhei Matsumoto 	desc = ns->desc;
4979ddb680ebSShuhei Matsumoto 
4980ddb680ebSShuhei Matsumoto 	switch (cmd->opc) {
4981ddb680ebSShuhei Matsumoto 	case SPDK_NVME_OPC_READ:
4982ddb680ebSShuhei Matsumoto 	case SPDK_NVME_OPC_WRITE:
4983ddb680ebSShuhei Matsumoto 	case SPDK_NVME_OPC_COMPARE:
4984b09de013SShuhei Matsumoto 		return nvmf_bdev_ctrlr_get_dif_ctx(desc, cmd, dif_ctx);
4985ddb680ebSShuhei Matsumoto 	default:
4986ddb680ebSShuhei Matsumoto 		break;
4987ddb680ebSShuhei Matsumoto 	}
4988ddb680ebSShuhei Matsumoto 
4989ddb680ebSShuhei Matsumoto 	return false;
4990ddb680ebSShuhei Matsumoto }
4991ddb680ebSShuhei Matsumoto 
4992ddb680ebSShuhei Matsumoto bool
4993ddb680ebSShuhei Matsumoto spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx *dif_ctx)
4994ddb680ebSShuhei Matsumoto {
4995ddb680ebSShuhei Matsumoto 	struct spdk_nvmf_qpair *qpair = req->qpair;
49964ff3665cSShuhei Matsumoto 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
49974ff3665cSShuhei Matsumoto 
49984ff3665cSShuhei Matsumoto 	if (spdk_likely(ctrlr == NULL || !ctrlr->dif_insert_or_strip)) {
49994ff3665cSShuhei Matsumoto 		return false;
50004ff3665cSShuhei Matsumoto 	}
5001ddb680ebSShuhei Matsumoto 
50023caf2080SKonrad Sztyber 	if (spdk_unlikely(!spdk_nvmf_qpair_is_active(qpair))) {
5003ddb680ebSShuhei Matsumoto 		return false;
5004ddb680ebSShuhei Matsumoto 	}
5005ddb680ebSShuhei Matsumoto 
5006ddb680ebSShuhei Matsumoto 	if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) {
5007ddb680ebSShuhei Matsumoto 		return false;
5008ddb680ebSShuhei Matsumoto 	}
5009ddb680ebSShuhei Matsumoto 
50109cb21ad6SSeth Howell 	if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair))) {
5011ddb680ebSShuhei Matsumoto 		return false;
5012ddb680ebSShuhei Matsumoto 	}
5013ddb680ebSShuhei Matsumoto 
5014198fd2ceSSeth Howell 	return nvmf_ctrlr_get_dif_ctx(ctrlr, &req->cmd->nvme_cmd, dif_ctx);
5015ddb680ebSShuhei Matsumoto }
50163fa22056SMichael Haeuptle 
50173fa22056SMichael Haeuptle void
50183fa22056SMichael Haeuptle spdk_nvmf_set_custom_admin_cmd_hdlr(uint8_t opc, spdk_nvmf_custom_cmd_hdlr hdlr)
50193fa22056SMichael Haeuptle {
50203fa22056SMichael Haeuptle 	g_nvmf_custom_admin_cmd_hdlrs[opc].hdlr = hdlr;
50213fa22056SMichael Haeuptle }
50223fa22056SMichael Haeuptle 
50233fa22056SMichael Haeuptle static int
50249910e8d7SJim Harris nvmf_passthru_admin_cmd_for_bdev_nsid(struct spdk_nvmf_request *req, uint32_t bdev_nsid)
50253fa22056SMichael Haeuptle {
50263fa22056SMichael Haeuptle 	struct spdk_bdev *bdev;
50273fa22056SMichael Haeuptle 	struct spdk_bdev_desc *desc;
50283fa22056SMichael Haeuptle 	struct spdk_io_channel *ch;
5029c156f3e1SKarl Bonde Torp 	struct spdk_nvmf_ns *ns;
5030c156f3e1SKarl Bonde Torp 	struct spdk_nvmf_ctrlr *ctrlr;
50313fa22056SMichael Haeuptle 	struct spdk_nvme_cpl *response = spdk_nvmf_request_get_response(req);
50323fa22056SMichael Haeuptle 	int rc;
50333fa22056SMichael Haeuptle 
50343fa22056SMichael Haeuptle 	rc = spdk_nvmf_request_get_bdev(bdev_nsid, req, &bdev, &desc, &ch);
50353fa22056SMichael Haeuptle 	if (rc) {
50363fa22056SMichael Haeuptle 		response->status.sct = SPDK_NVME_SCT_GENERIC;
50373fa22056SMichael Haeuptle 		response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
50383fa22056SMichael Haeuptle 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
50393fa22056SMichael Haeuptle 	}
5040c156f3e1SKarl Bonde Torp 
5041c156f3e1SKarl Bonde Torp 	ctrlr = req->qpair->ctrlr;
5042c156f3e1SKarl Bonde Torp 	ns = nvmf_ctrlr_get_ns(ctrlr, bdev_nsid);
5043c156f3e1SKarl Bonde Torp 
5044f220d590SKonrad Sztyber 	if (ns->passthru_nsid) {
5045f220d590SKonrad Sztyber 		req->cmd->nvme_cmd.nsid = ns->passthru_nsid;
5046c156f3e1SKarl Bonde Torp 	}
5047c156f3e1SKarl Bonde Torp 
50483fa22056SMichael Haeuptle 	return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, NULL);
50493fa22056SMichael Haeuptle }
50503fa22056SMichael Haeuptle 
50519910e8d7SJim Harris static int
50529910e8d7SJim Harris nvmf_passthru_admin_cmd(struct spdk_nvmf_request *req)
50539910e8d7SJim Harris {
50549910e8d7SJim Harris 	struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req);
50559910e8d7SJim Harris 	uint32_t bdev_nsid;
50569910e8d7SJim Harris 
50579910e8d7SJim Harris 	if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].nsid != 0) {
50589910e8d7SJim Harris 		bdev_nsid = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].nsid;
50599910e8d7SJim Harris 	} else {
50609910e8d7SJim Harris 		bdev_nsid = cmd->nsid;
50619910e8d7SJim Harris 	}
50629910e8d7SJim Harris 
50639910e8d7SJim Harris 	return nvmf_passthru_admin_cmd_for_bdev_nsid(req, bdev_nsid);
50649910e8d7SJim Harris }
50659910e8d7SJim Harris 
50669910e8d7SJim Harris int
50679910e8d7SJim Harris nvmf_passthru_admin_cmd_for_ctrlr(struct spdk_nvmf_request *req, struct spdk_nvmf_ctrlr *ctrlr)
50689910e8d7SJim Harris {
50699910e8d7SJim Harris 	struct spdk_nvme_cpl *response = spdk_nvmf_request_get_response(req);
50709910e8d7SJim Harris 	struct spdk_nvmf_ns *ns;
50719910e8d7SJim Harris 
50729910e8d7SJim Harris 	ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys);
50739910e8d7SJim Harris 	if (ns == NULL) {
50749910e8d7SJim Harris 		/* Is there a better sc to use here? */
50759910e8d7SJim Harris 		response->status.sct = SPDK_NVME_SCT_GENERIC;
50769910e8d7SJim Harris 		response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
50779910e8d7SJim Harris 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
50789910e8d7SJim Harris 	}
50799910e8d7SJim Harris 
50809910e8d7SJim Harris 	return nvmf_passthru_admin_cmd_for_bdev_nsid(req, ns->nsid);
50819910e8d7SJim Harris }
50829910e8d7SJim Harris 
50833fa22056SMichael Haeuptle void
50843fa22056SMichael Haeuptle spdk_nvmf_set_passthru_admin_cmd(uint8_t opc, uint32_t forward_nsid)
50853fa22056SMichael Haeuptle {
50863fa22056SMichael Haeuptle 	g_nvmf_custom_admin_cmd_hdlrs[opc].hdlr = nvmf_passthru_admin_cmd;
50873fa22056SMichael Haeuptle 	g_nvmf_custom_admin_cmd_hdlrs[opc].nsid = forward_nsid;
50883fa22056SMichael Haeuptle }
50893fa22056SMichael Haeuptle 
50903fa22056SMichael Haeuptle int
50913fa22056SMichael Haeuptle spdk_nvmf_request_get_bdev(uint32_t nsid, struct spdk_nvmf_request *req,
50923fa22056SMichael Haeuptle 			   struct spdk_bdev **bdev, struct spdk_bdev_desc **desc, struct spdk_io_channel **ch)
50933fa22056SMichael Haeuptle {
50943fa22056SMichael Haeuptle 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
50953fa22056SMichael Haeuptle 	struct spdk_nvmf_ns *ns;
50963fa22056SMichael Haeuptle 	struct spdk_nvmf_poll_group *group = req->qpair->group;
50973fa22056SMichael Haeuptle 	struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
50983fa22056SMichael Haeuptle 
50993fa22056SMichael Haeuptle 	*bdev = NULL;
51003fa22056SMichael Haeuptle 	*desc = NULL;
51013fa22056SMichael Haeuptle 	*ch = NULL;
51023fa22056SMichael Haeuptle 
510305859ef1SJim Harris 	ns = nvmf_ctrlr_get_ns(ctrlr, nsid);
51043fa22056SMichael Haeuptle 	if (ns == NULL || ns->bdev == NULL) {
51053fa22056SMichael Haeuptle 		return -EINVAL;
51063fa22056SMichael Haeuptle 	}
51073fa22056SMichael Haeuptle 
51083fa22056SMichael Haeuptle 	assert(group != NULL && group->sgroups != NULL);
51093fa22056SMichael Haeuptle 	ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1];
51103fa22056SMichael Haeuptle 	*bdev = ns->bdev;
51113fa22056SMichael Haeuptle 	*desc = ns->desc;
51123fa22056SMichael Haeuptle 	*ch = ns_info->channel;
51133fa22056SMichael Haeuptle 
51143fa22056SMichael Haeuptle 	return 0;
51153fa22056SMichael Haeuptle }
51163fa22056SMichael Haeuptle 
51173fa22056SMichael Haeuptle struct spdk_nvmf_ctrlr *spdk_nvmf_request_get_ctrlr(struct spdk_nvmf_request *req)
51183fa22056SMichael Haeuptle {
51193fa22056SMichael Haeuptle 	return req->qpair->ctrlr;
51203fa22056SMichael Haeuptle }
51213fa22056SMichael Haeuptle 
51223fa22056SMichael Haeuptle struct spdk_nvme_cmd *spdk_nvmf_request_get_cmd(struct spdk_nvmf_request *req)
51233fa22056SMichael Haeuptle {
51243fa22056SMichael Haeuptle 	return &req->cmd->nvme_cmd;
51253fa22056SMichael Haeuptle }
51263fa22056SMichael Haeuptle 
51273fa22056SMichael Haeuptle struct spdk_nvme_cpl *spdk_nvmf_request_get_response(struct spdk_nvmf_request *req)
51283fa22056SMichael Haeuptle {
51293fa22056SMichael Haeuptle 	return &req->rsp->nvme_cpl;
51303fa22056SMichael Haeuptle }
51313fa22056SMichael Haeuptle 
51323fa22056SMichael Haeuptle struct spdk_nvmf_subsystem *spdk_nvmf_request_get_subsystem(struct spdk_nvmf_request *req)
51333fa22056SMichael Haeuptle {
51343fa22056SMichael Haeuptle 	return req->qpair->ctrlr->subsys;
51353fa22056SMichael Haeuptle }
51363fa22056SMichael Haeuptle 
513770a82d9aSJohn Levon size_t
513870a82d9aSJohn Levon spdk_nvmf_request_copy_from_buf(struct spdk_nvmf_request *req,
513970a82d9aSJohn Levon 				void *buf, size_t buflen)
514070a82d9aSJohn Levon {
514170a82d9aSJohn Levon 	struct spdk_iov_xfer ix;
514270a82d9aSJohn Levon 
514370a82d9aSJohn Levon 	spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
514470a82d9aSJohn Levon 	return spdk_iov_xfer_from_buf(&ix, buf, buflen);
514570a82d9aSJohn Levon }
514670a82d9aSJohn Levon 
514770a82d9aSJohn Levon size_t
514870a82d9aSJohn Levon spdk_nvmf_request_copy_to_buf(struct spdk_nvmf_request *req,
514970a82d9aSJohn Levon 			      void *buf, size_t buflen)
515070a82d9aSJohn Levon {
515170a82d9aSJohn Levon 	struct spdk_iov_xfer ix;
515270a82d9aSJohn Levon 
515370a82d9aSJohn Levon 	spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
515470a82d9aSJohn Levon 	return spdk_iov_xfer_to_buf(&ix, buf, buflen);
515570a82d9aSJohn Levon }
515670a82d9aSJohn Levon 
5157a00efdbfSJacek Kalwas struct spdk_nvmf_subsystem *spdk_nvmf_ctrlr_get_subsystem(struct spdk_nvmf_ctrlr *ctrlr)
5158a00efdbfSJacek Kalwas {
5159a00efdbfSJacek Kalwas 	return ctrlr->subsys;
5160a00efdbfSJacek Kalwas }
51613dff6a3bSJacek Kalwas 
51628dd1cd21SBen Walker uint16_t
51638dd1cd21SBen Walker spdk_nvmf_ctrlr_get_id(struct spdk_nvmf_ctrlr *ctrlr)
51643dff6a3bSJacek Kalwas {
51653dff6a3bSJacek Kalwas 	return ctrlr->cntlid;
51663dff6a3bSJacek Kalwas }
516762649a7dSMichael Haeuptle 
516862649a7dSMichael Haeuptle struct spdk_nvmf_request *spdk_nvmf_request_get_req_to_abort(struct spdk_nvmf_request *req)
516962649a7dSMichael Haeuptle {
517062649a7dSMichael Haeuptle 	return req->req_to_abort;
517162649a7dSMichael Haeuptle }
5172