xref: /spdk/lib/nvmf/nvmf_internal.h (revision 9889ab2dc80e40dae92dcef361d53dcba722043d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef __NVMF_INTERNAL_H__
35 #define __NVMF_INTERNAL_H__
36 
37 #include "spdk/stdinc.h"
38 
39 #include "spdk/likely.h"
40 #include "spdk/nvmf.h"
41 #include "spdk/nvmf_spec.h"
42 #include "spdk/assert.h"
43 #include "spdk/bdev.h"
44 #include "spdk/queue.h"
45 #include "spdk/util.h"
46 #include "spdk/thread.h"
47 
48 #define SPDK_NVMF_MAX_SGL_ENTRIES	16
49 
50 /* The maximum number of buffers per request */
51 #define NVMF_REQ_MAX_BUFFERS	(SPDK_NVMF_MAX_SGL_ENTRIES * 2)
52 
53 /* AIO backend requires block size aligned data buffers,
54  * extra 4KiB aligned data buffer should work for most devices.
55  */
56 #define SHIFT_4KB			12u
57 #define NVMF_DATA_BUFFER_ALIGNMENT	(1u << SHIFT_4KB)
58 #define NVMF_DATA_BUFFER_MASK		(NVMF_DATA_BUFFER_ALIGNMENT - 1LL)
59 
60 enum spdk_nvmf_subsystem_state {
61 	SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
62 	SPDK_NVMF_SUBSYSTEM_ACTIVATING,
63 	SPDK_NVMF_SUBSYSTEM_ACTIVE,
64 	SPDK_NVMF_SUBSYSTEM_PAUSING,
65 	SPDK_NVMF_SUBSYSTEM_PAUSED,
66 	SPDK_NVMF_SUBSYSTEM_RESUMING,
67 	SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
68 };
69 
70 enum spdk_nvmf_qpair_state {
71 	SPDK_NVMF_QPAIR_UNINITIALIZED = 0,
72 	SPDK_NVMF_QPAIR_ACTIVE,
73 	SPDK_NVMF_QPAIR_DEACTIVATING,
74 	SPDK_NVMF_QPAIR_ERROR,
75 };
76 
77 typedef void (*spdk_nvmf_state_change_done)(void *cb_arg, int status);
78 
79 struct spdk_nvmf_tgt {
80 	char					name[NVMF_TGT_NAME_MAX_LENGTH];
81 
82 	uint64_t				discovery_genctr;
83 
84 	uint32_t				max_subsystems;
85 
86 	/* Array of subsystem pointers of size max_subsystems indexed by sid */
87 	struct spdk_nvmf_subsystem		**subsystems;
88 
89 	TAILQ_HEAD(, spdk_nvmf_transport)	transports;
90 
91 	spdk_nvmf_tgt_destroy_done_fn		*destroy_cb_fn;
92 	void					*destroy_cb_arg;
93 
94 	TAILQ_ENTRY(spdk_nvmf_tgt)		link;
95 };
96 
97 struct spdk_nvmf_host {
98 	char				nqn[SPDK_NVMF_NQN_MAX_LEN + 1];
99 	TAILQ_ENTRY(spdk_nvmf_host)	link;
100 };
101 
102 struct spdk_nvmf_listener {
103 	struct spdk_nvme_transport_id	trid;
104 	struct spdk_nvmf_transport	*transport;
105 	TAILQ_ENTRY(spdk_nvmf_listener)	link;
106 };
107 
108 struct spdk_nvmf_transport_pg_cache_buf {
109 	STAILQ_ENTRY(spdk_nvmf_transport_pg_cache_buf) link;
110 };
111 
112 struct spdk_nvmf_transport_poll_group {
113 	struct spdk_nvmf_transport					*transport;
114 	/* Requests that are waiting to obtain a data buffer */
115 	STAILQ_HEAD(, spdk_nvmf_request)				pending_buf_queue;
116 	STAILQ_HEAD(, spdk_nvmf_transport_pg_cache_buf)			buf_cache;
117 	uint32_t							buf_cache_count;
118 	uint32_t							buf_cache_size;
119 	struct spdk_nvmf_poll_group					*group;
120 	TAILQ_ENTRY(spdk_nvmf_transport_poll_group)			link;
121 };
122 
123 /* Maximum number of registrants supported per namespace */
124 #define SPDK_NVMF_MAX_NUM_REGISTRANTS		16
125 
126 struct spdk_nvmf_registrant_info {
127 	uint64_t		rkey;
128 	char			host_uuid[SPDK_UUID_STRING_LEN];
129 };
130 
131 struct spdk_nvmf_reservation_info {
132 	bool					ptpl_activated;
133 	enum spdk_nvme_reservation_type		rtype;
134 	uint64_t				crkey;
135 	char					bdev_uuid[SPDK_UUID_STRING_LEN];
136 	char					holder_uuid[SPDK_UUID_STRING_LEN];
137 	uint32_t				num_regs;
138 	struct spdk_nvmf_registrant_info	registrants[SPDK_NVMF_MAX_NUM_REGISTRANTS];
139 };
140 
141 struct spdk_nvmf_subsystem_pg_ns_info {
142 	struct spdk_io_channel		*channel;
143 	struct spdk_uuid		uuid;
144 	/* current reservation key, no reservation if the value is 0 */
145 	uint64_t			crkey;
146 	/* reservation type */
147 	enum spdk_nvme_reservation_type	rtype;
148 	/* Host ID which holds the reservation */
149 	struct spdk_uuid		holder_id;
150 	/* Host ID for the registrants with the namespace */
151 	struct spdk_uuid		reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
152 	uint64_t			num_blocks;
153 };
154 
155 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
156 
157 struct spdk_nvmf_subsystem_poll_group {
158 	/* Array of namespace information for each namespace indexed by nsid - 1 */
159 	struct spdk_nvmf_subsystem_pg_ns_info	*ns_info;
160 	uint32_t				num_ns;
161 
162 	uint64_t				io_outstanding;
163 	spdk_nvmf_poll_group_mod_done		cb_fn;
164 	void					*cb_arg;
165 
166 	enum spdk_nvmf_subsystem_state		state;
167 
168 	TAILQ_HEAD(, spdk_nvmf_request)		queued;
169 };
170 
171 struct spdk_nvmf_poll_group {
172 	struct spdk_thread				*thread;
173 	struct spdk_poller				*poller;
174 
175 	TAILQ_HEAD(, spdk_nvmf_transport_poll_group)	tgroups;
176 
177 	/* Array of poll groups indexed by subsystem id (sid) */
178 	struct spdk_nvmf_subsystem_poll_group		*sgroups;
179 	uint32_t					num_sgroups;
180 
181 	/* All of the queue pairs that belong to this poll group */
182 	TAILQ_HEAD(, spdk_nvmf_qpair)			qpairs;
183 
184 	/* Statistics */
185 	struct spdk_nvmf_poll_group_stat		stat;
186 };
187 
188 typedef enum _spdk_nvmf_request_exec_status {
189 	SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE,
190 	SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS,
191 } spdk_nvmf_request_exec_status;
192 
193 union nvmf_h2c_msg {
194 	struct spdk_nvmf_capsule_cmd			nvmf_cmd;
195 	struct spdk_nvme_cmd				nvme_cmd;
196 	struct spdk_nvmf_fabric_prop_set_cmd		prop_set_cmd;
197 	struct spdk_nvmf_fabric_prop_get_cmd		prop_get_cmd;
198 	struct spdk_nvmf_fabric_connect_cmd		connect_cmd;
199 };
200 SPDK_STATIC_ASSERT(sizeof(union nvmf_h2c_msg) == 64, "Incorrect size");
201 
202 union nvmf_c2h_msg {
203 	struct spdk_nvme_cpl				nvme_cpl;
204 	struct spdk_nvmf_fabric_prop_get_rsp		prop_get_rsp;
205 	struct spdk_nvmf_fabric_connect_rsp		connect_rsp;
206 };
207 SPDK_STATIC_ASSERT(sizeof(union nvmf_c2h_msg) == 16, "Incorrect size");
208 
209 struct spdk_nvmf_dif_info {
210 	struct spdk_dif_ctx			dif_ctx;
211 	bool					dif_insert_or_strip;
212 	uint32_t				elba_length;
213 	uint32_t				orig_length;
214 };
215 
216 struct spdk_nvmf_request {
217 	struct spdk_nvmf_qpair		*qpair;
218 	uint32_t			length;
219 	enum spdk_nvme_data_transfer	xfer;
220 	void				*data;
221 	union nvmf_h2c_msg		*cmd;
222 	union nvmf_c2h_msg		*rsp;
223 	void				*buffers[NVMF_REQ_MAX_BUFFERS];
224 	struct iovec			iov[NVMF_REQ_MAX_BUFFERS];
225 	uint32_t			iovcnt;
226 	bool				data_from_pool;
227 	struct spdk_bdev_io_wait_entry	bdev_io_wait;
228 	struct spdk_nvmf_dif_info	dif;
229 
230 	STAILQ_ENTRY(spdk_nvmf_request)	buf_link;
231 	TAILQ_ENTRY(spdk_nvmf_request)	link;
232 };
233 
234 struct spdk_nvmf_registrant {
235 	TAILQ_ENTRY(spdk_nvmf_registrant) link;
236 	struct spdk_uuid hostid;
237 	/* Registration key */
238 	uint64_t rkey;
239 };
240 
241 struct spdk_nvmf_ns {
242 	uint32_t nsid;
243 	struct spdk_nvmf_subsystem *subsystem;
244 	struct spdk_bdev *bdev;
245 	struct spdk_bdev_desc *desc;
246 	struct spdk_nvmf_ns_opts opts;
247 	/* reservation notificaton mask */
248 	uint32_t mask;
249 	/* generation code */
250 	uint32_t gen;
251 	/* registrants head */
252 	TAILQ_HEAD(, spdk_nvmf_registrant) registrants;
253 	/* current reservation key */
254 	uint64_t crkey;
255 	/* reservation type */
256 	enum spdk_nvme_reservation_type rtype;
257 	/* current reservation holder, only valid if reservation type can only have one holder */
258 	struct spdk_nvmf_registrant *holder;
259 	/* Persist Through Power Loss file which contains the persistent reservation */
260 	char *ptpl_file;
261 	/* Persist Through Power Loss feature is enabled */
262 	bool ptpl_activated;
263 };
264 
265 struct spdk_nvmf_qpair {
266 	enum spdk_nvmf_qpair_state		state;
267 	spdk_nvmf_state_change_done		state_cb;
268 	void					*state_cb_arg;
269 
270 	struct spdk_nvmf_transport		*transport;
271 	struct spdk_nvmf_ctrlr			*ctrlr;
272 	struct spdk_nvmf_poll_group		*group;
273 
274 	uint16_t				qid;
275 	uint16_t				sq_head;
276 	uint16_t				sq_head_max;
277 
278 	TAILQ_HEAD(, spdk_nvmf_request)		outstanding;
279 	TAILQ_ENTRY(spdk_nvmf_qpair)		link;
280 };
281 
282 struct spdk_nvmf_ctrlr_feat {
283 	union spdk_nvme_feat_arbitration arbitration;
284 	union spdk_nvme_feat_power_management power_management;
285 	union spdk_nvme_feat_error_recovery error_recovery;
286 	union spdk_nvme_feat_volatile_write_cache volatile_write_cache;
287 	union spdk_nvme_feat_number_of_queues number_of_queues;
288 	union spdk_nvme_feat_write_atomicity write_atomicity;
289 	union spdk_nvme_feat_async_event_configuration async_event_configuration;
290 	union spdk_nvme_feat_keep_alive_timer keep_alive_timer;
291 };
292 
293 /*
294  * NVMf reservation notificaton log page.
295  */
296 struct spdk_nvmf_reservation_log {
297 	struct spdk_nvme_reservation_notification_log	log;
298 	TAILQ_ENTRY(spdk_nvmf_reservation_log)		link;
299 	struct spdk_nvmf_ctrlr				*ctrlr;
300 };
301 
302 /*
303  * This structure represents an NVMe-oF controller,
304  * which is like a "session" in networking terms.
305  */
306 struct spdk_nvmf_ctrlr {
307 	uint16_t			cntlid;
308 	char				hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
309 	struct spdk_nvmf_subsystem	*subsys;
310 
311 	struct {
312 		union spdk_nvme_cap_register	cap;
313 		union spdk_nvme_vs_register	vs;
314 		union spdk_nvme_cc_register	cc;
315 		union spdk_nvme_csts_register	csts;
316 	} vcprop; /* virtual controller properties */
317 
318 	struct spdk_nvmf_ctrlr_feat feat;
319 
320 	struct spdk_nvmf_qpair	*admin_qpair;
321 	struct spdk_thread	*thread;
322 	struct spdk_bit_array	*qpair_mask;
323 
324 	struct spdk_nvmf_request *aer_req;
325 	union spdk_nvme_async_event_completion notice_event;
326 	union spdk_nvme_async_event_completion reservation_event;
327 	struct spdk_uuid  hostid;
328 
329 	uint16_t changed_ns_list_count;
330 	struct spdk_nvme_ns_list changed_ns_list;
331 	uint64_t log_page_count;
332 	uint8_t num_avail_log_pages;
333 	TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head;
334 
335 	/* Time to trigger keep-alive--poller_time = now_tick + period */
336 	uint64_t			last_keep_alive_tick;
337 	struct spdk_poller		*keep_alive_poller;
338 
339 	bool				dif_insert_or_strip;
340 
341 	TAILQ_ENTRY(spdk_nvmf_ctrlr)	link;
342 };
343 
344 struct spdk_nvmf_subsystem {
345 	struct spdk_thread		*thread;
346 	uint32_t			id;
347 	enum spdk_nvmf_subsystem_state	state;
348 
349 	char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
350 	enum spdk_nvmf_subtype subtype;
351 	uint16_t next_cntlid;
352 	bool allow_any_host;
353 	bool allow_any_listener ;
354 
355 	struct spdk_nvmf_tgt			*tgt;
356 
357 	char sn[SPDK_NVME_CTRLR_SN_LEN + 1];
358 	char mn[SPDK_NVME_CTRLR_MN_LEN + 1];
359 
360 	/* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
361 	struct spdk_nvmf_ns			**ns;
362 	uint32_t				max_nsid;
363 	/* This is the maximum allowed nsid to a subsystem */
364 	uint32_t				max_allowed_nsid;
365 
366 	TAILQ_HEAD(, spdk_nvmf_ctrlr)		ctrlrs;
367 
368 	TAILQ_HEAD(, spdk_nvmf_host)		hosts;
369 
370 	TAILQ_HEAD(, spdk_nvmf_listener)	listeners;
371 
372 	TAILQ_ENTRY(spdk_nvmf_subsystem)	entries;
373 };
374 
375 
376 struct spdk_nvmf_transport *spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt,
377 		enum spdk_nvme_transport_type);
378 
379 int spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
380 				       struct spdk_nvmf_transport *transport);
381 int spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
382 		struct spdk_nvmf_subsystem *subsystem);
383 int spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
384 				       struct spdk_nvmf_subsystem *subsystem,
385 				       spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
386 void spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
387 		struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
388 void spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
389 		struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
390 void spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
391 		struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
392 void spdk_nvmf_request_exec(struct spdk_nvmf_request *req);
393 int spdk_nvmf_request_free(struct spdk_nvmf_request *req);
394 int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);
395 
396 void spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
397 				    struct spdk_nvmf_transport_poll_group *group,
398 				    struct spdk_nvmf_transport *transport);
399 int spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
400 				  struct spdk_nvmf_transport_poll_group *group,
401 				  struct spdk_nvmf_transport *transport,
402 				  uint32_t length);
403 int spdk_nvmf_request_get_buffers_multi(struct spdk_nvmf_request *req,
404 					struct spdk_nvmf_transport_poll_group *group,
405 					struct spdk_nvmf_transport *transport,
406 					uint32_t *lengths, uint32_t num_lengths);
407 
408 bool spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx *dif_ctx);
409 
410 void spdk_nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn,
411 				      struct iovec *iov,
412 				      uint32_t iovcnt, uint64_t offset, uint32_t length);
413 
414 void spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
415 int spdk_nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req);
416 int spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
417 int spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
418 bool spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
419 bool spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
420 void spdk_nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
421 
422 void spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
423 				      bool dif_insert_or_strip);
424 int spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
425 				  struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
426 int spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
427 				   struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
428 int spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
429 		struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
430 int spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
431 				   struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
432 int spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
433 				 struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
434 int spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
435 		struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
436 bool spdk_nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
437 				      struct spdk_dif_ctx *dif_ctx);
438 
439 int spdk_nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
440 				  struct spdk_nvmf_ctrlr *ctrlr);
441 void spdk_nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
442 				      struct spdk_nvmf_ctrlr *ctrlr);
443 struct spdk_nvmf_ctrlr *spdk_nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
444 		uint16_t cntlid);
445 int spdk_nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
446 void spdk_nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
447 void spdk_nvmf_ns_reservation_request(void *ctx);
448 void spdk_nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
449 		struct spdk_nvmf_ns *ns,
450 		enum spdk_nvme_reservation_notification_log_page_type type);
451 
452 /*
453  * Abort aer is sent on a per controller basis and sends a completion for the aer to the host.
454  * This function should be called when attempting to recover in error paths when it is OK for
455  * the host to send a subsequent AER.
456  */
457 void spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
458 
459 /*
460  * Free aer simply frees the rdma resources for the aer without informing the host.
461  * This function should be called when deleting a qpair when one wants to make sure
462  * the qpair is completely empty before freeing the request. The reason we free the
463  * AER without sending a completion is to prevent the host from sending another AER.
464  */
465 void spdk_nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
466 
467 static inline struct spdk_nvmf_ns *
468 _spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
469 {
470 	/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
471 	if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
472 		return NULL;
473 	}
474 
475 	return subsystem->ns[nsid - 1];
476 }
477 
478 static inline bool
479 spdk_nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
480 {
481 	return qpair->qid == 0;
482 }
483 
484 #endif /* __NVMF_INTERNAL_H__ */
485