xref: /spdk/lib/nvmf/nvmf_internal.h (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef __NVMF_INTERNAL_H__
8 #define __NVMF_INTERNAL_H__
9 
10 #include "spdk/stdinc.h"
11 
12 #include "spdk/likely.h"
13 #include "spdk/nvmf.h"
14 #include "spdk/nvmf_cmd.h"
15 #include "spdk/nvmf_transport.h"
16 #include "spdk/nvmf_spec.h"
17 #include "spdk/assert.h"
18 #include "spdk/bdev.h"
19 #include "spdk/queue.h"
20 #include "spdk/util.h"
21 #include "spdk/thread.h"
22 
23 #define NVMF_MAX_ASYNC_EVENTS	(4)
24 
25 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */
26 #define NVMF_MIN_CNTLID 1
27 #define NVMF_MAX_CNTLID 0xFFEF
28 
29 enum spdk_nvmf_subsystem_state {
30 	SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
31 	SPDK_NVMF_SUBSYSTEM_ACTIVATING,
32 	SPDK_NVMF_SUBSYSTEM_ACTIVE,
33 	SPDK_NVMF_SUBSYSTEM_PAUSING,
34 	SPDK_NVMF_SUBSYSTEM_PAUSED,
35 	SPDK_NVMF_SUBSYSTEM_RESUMING,
36 	SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
37 	SPDK_NVMF_SUBSYSTEM_NUM_STATES,
38 };
39 
40 struct spdk_nvmf_tgt {
41 	char					name[NVMF_TGT_NAME_MAX_LENGTH];
42 
43 	pthread_mutex_t				mutex;
44 
45 	uint64_t				discovery_genctr;
46 
47 	uint32_t				max_subsystems;
48 
49 	enum spdk_nvmf_tgt_discovery_filter	discovery_filter;
50 
51 	/* Array of subsystem pointers of size max_subsystems indexed by sid */
52 	struct spdk_nvmf_subsystem		**subsystems;
53 
54 	TAILQ_HEAD(, spdk_nvmf_transport)	transports;
55 	TAILQ_HEAD(, spdk_nvmf_poll_group)	poll_groups;
56 
57 	/* Used for round-robin assignment of connections to poll groups */
58 	struct spdk_nvmf_poll_group		*next_poll_group;
59 
60 	spdk_nvmf_tgt_destroy_done_fn		*destroy_cb_fn;
61 	void					*destroy_cb_arg;
62 
63 	uint16_t				crdt[3];
64 
65 	TAILQ_ENTRY(spdk_nvmf_tgt)		link;
66 };
67 
68 struct spdk_nvmf_host {
69 	char				nqn[SPDK_NVMF_NQN_MAX_LEN + 1];
70 	TAILQ_ENTRY(spdk_nvmf_host)	link;
71 };
72 
73 struct spdk_nvmf_subsystem_listener {
74 	struct spdk_nvmf_subsystem			*subsystem;
75 	spdk_nvmf_tgt_subsystem_listen_done_fn		cb_fn;
76 	void						*cb_arg;
77 	struct spdk_nvme_transport_id			*trid;
78 	struct spdk_nvmf_transport			*transport;
79 	enum spdk_nvme_ana_state			*ana_state;
80 	uint64_t					ana_state_change_count;
81 	uint16_t					id;
82 	TAILQ_ENTRY(spdk_nvmf_subsystem_listener)	link;
83 };
84 
85 /* Maximum number of registrants supported per namespace */
86 #define SPDK_NVMF_MAX_NUM_REGISTRANTS		16
87 
88 struct spdk_nvmf_registrant_info {
89 	uint64_t		rkey;
90 	char			host_uuid[SPDK_UUID_STRING_LEN];
91 };
92 
93 struct spdk_nvmf_reservation_info {
94 	bool					ptpl_activated;
95 	enum spdk_nvme_reservation_type		rtype;
96 	uint64_t				crkey;
97 	char					bdev_uuid[SPDK_UUID_STRING_LEN];
98 	char					holder_uuid[SPDK_UUID_STRING_LEN];
99 	uint32_t				num_regs;
100 	struct spdk_nvmf_registrant_info	registrants[SPDK_NVMF_MAX_NUM_REGISTRANTS];
101 };
102 
103 struct spdk_nvmf_subsystem_pg_ns_info {
104 	struct spdk_io_channel		*channel;
105 	struct spdk_uuid		uuid;
106 	/* current reservation key, no reservation if the value is 0 */
107 	uint64_t			crkey;
108 	/* reservation type */
109 	enum spdk_nvme_reservation_type	rtype;
110 	/* Host ID which holds the reservation */
111 	struct spdk_uuid		holder_id;
112 	/* Host ID for the registrants with the namespace */
113 	struct spdk_uuid		reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
114 	uint64_t			num_blocks;
115 
116 	/* I/O outstanding to this namespace */
117 	uint64_t			io_outstanding;
118 	enum spdk_nvmf_subsystem_state	state;
119 };
120 
121 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
122 
123 struct spdk_nvmf_subsystem_poll_group {
124 	/* Array of namespace information for each namespace indexed by nsid - 1 */
125 	struct spdk_nvmf_subsystem_pg_ns_info	*ns_info;
126 	uint32_t				num_ns;
127 
128 	/* Number of ADMIN and FABRICS requests outstanding */
129 	uint64_t				mgmt_io_outstanding;
130 	spdk_nvmf_poll_group_mod_done		cb_fn;
131 	void					*cb_arg;
132 
133 	enum spdk_nvmf_subsystem_state		state;
134 
135 	TAILQ_HEAD(, spdk_nvmf_request)		queued;
136 };
137 
138 struct spdk_nvmf_registrant {
139 	TAILQ_ENTRY(spdk_nvmf_registrant) link;
140 	struct spdk_uuid hostid;
141 	/* Registration key */
142 	uint64_t rkey;
143 };
144 
145 struct spdk_nvmf_ns {
146 	uint32_t nsid;
147 	uint32_t anagrpid;
148 	struct spdk_nvmf_subsystem *subsystem;
149 	struct spdk_bdev *bdev;
150 	struct spdk_bdev_desc *desc;
151 	struct spdk_nvmf_ns_opts opts;
152 	/* reservation notification mask */
153 	uint32_t mask;
154 	/* generation code */
155 	uint32_t gen;
156 	/* registrants head */
157 	TAILQ_HEAD(, spdk_nvmf_registrant) registrants;
158 	/* current reservation key */
159 	uint64_t crkey;
160 	/* reservation type */
161 	enum spdk_nvme_reservation_type rtype;
162 	/* current reservation holder, only valid if reservation type can only have one holder */
163 	struct spdk_nvmf_registrant *holder;
164 	/* Persist Through Power Loss file which contains the persistent reservation */
165 	char *ptpl_file;
166 	/* Persist Through Power Loss feature is enabled */
167 	bool ptpl_activated;
168 	/* ZCOPY supported on bdev device */
169 	bool zcopy;
170 };
171 
172 struct spdk_nvmf_ctrlr_feat {
173 	union spdk_nvme_feat_arbitration arbitration;
174 	union spdk_nvme_feat_power_management power_management;
175 	union spdk_nvme_feat_error_recovery error_recovery;
176 	union spdk_nvme_feat_volatile_write_cache volatile_write_cache;
177 	union spdk_nvme_feat_number_of_queues number_of_queues;
178 	union spdk_nvme_feat_interrupt_coalescing interrupt_coalescing;
179 	union spdk_nvme_feat_interrupt_vector_configuration interrupt_vector_configuration;
180 	union spdk_nvme_feat_write_atomicity write_atomicity;
181 	union spdk_nvme_feat_async_event_configuration async_event_configuration;
182 	union spdk_nvme_feat_keep_alive_timer keep_alive_timer;
183 };
184 
185 /*
186  * NVMf reservation notification log page.
187  */
188 struct spdk_nvmf_reservation_log {
189 	struct spdk_nvme_reservation_notification_log	log;
190 	TAILQ_ENTRY(spdk_nvmf_reservation_log)		link;
191 	struct spdk_nvmf_ctrlr				*ctrlr;
192 };
193 
194 /*
195  * NVMf async event completion.
196  */
197 struct spdk_nvmf_async_event_completion {
198 	union spdk_nvme_async_event_completion		event;
199 	STAILQ_ENTRY(spdk_nvmf_async_event_completion)	link;
200 };
201 
202 /*
203  * This structure represents an NVMe-oF controller,
204  * which is like a "session" in networking terms.
205  */
206 struct spdk_nvmf_ctrlr {
207 	uint16_t			cntlid;
208 	char				hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
209 	struct spdk_nvmf_subsystem	*subsys;
210 
211 	struct spdk_nvmf_ctrlr_data	cdata;
212 
213 	struct spdk_nvmf_registers	vcprop;
214 
215 	struct spdk_nvmf_ctrlr_feat feat;
216 
217 	struct spdk_nvmf_qpair	*admin_qpair;
218 	struct spdk_thread	*thread;
219 	struct spdk_bit_array	*qpair_mask;
220 
221 	const struct spdk_nvmf_subsystem_listener	*listener;
222 
223 	struct spdk_nvmf_request *aer_req[NVMF_MAX_ASYNC_EVENTS];
224 	STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events;
225 	uint64_t notice_aen_mask;
226 	uint8_t nr_aer_reqs;
227 	struct spdk_uuid  hostid;
228 
229 	uint32_t association_timeout; /* in milliseconds */
230 	uint16_t changed_ns_list_count;
231 	struct spdk_nvme_ns_list changed_ns_list;
232 	uint64_t log_page_count;
233 	uint8_t num_avail_log_pages;
234 	TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head;
235 
236 	/* Time to trigger keep-alive--poller_time = now_tick + period */
237 	uint64_t			last_keep_alive_tick;
238 	struct spdk_poller		*keep_alive_poller;
239 
240 	struct spdk_poller		*association_timer;
241 
242 	struct spdk_poller		*cc_timer;
243 	uint64_t			cc_timeout_tsc;
244 	struct spdk_poller		*cc_timeout_timer;
245 
246 	bool				dif_insert_or_strip;
247 	bool				in_destruct;
248 	bool				disconnect_in_progress;
249 	/* valid only when disconnect_in_progress is true */
250 	bool				disconnect_is_shn;
251 	bool				acre_enabled;
252 	bool				dynamic_ctrlr;
253 
254 	TAILQ_ENTRY(spdk_nvmf_ctrlr)	link;
255 };
256 
257 /* Maximum pending AERs that can be migrated */
258 #define NVMF_MIGR_MAX_PENDING_AERS 256
259 
260 /* spdk_nvmf_ctrlr private migration data structure used to save/restore a controller */
261 struct nvmf_ctrlr_migr_data {
262 	uint32_t				opts_size;
263 
264 	uint16_t				cntlid;
265 	uint8_t					reserved1[2];
266 
267 	struct spdk_nvmf_ctrlr_feat		feat;
268 	uint32_t				reserved2[2];
269 
270 	uint32_t				num_async_events;
271 	uint32_t				acre_enabled;
272 	uint64_t				notice_aen_mask;
273 	union spdk_nvme_async_event_completion	async_events[NVMF_MIGR_MAX_PENDING_AERS];
274 
275 	/* New fields shouldn't go after reserved3 */
276 	uint8_t					reserved3[3000];
277 };
278 SPDK_STATIC_ASSERT(sizeof(struct nvmf_ctrlr_migr_data) == 0x1000, "Incorrect size");
279 
280 #define NVMF_MAX_LISTENERS_PER_SUBSYSTEM	16
281 
282 struct spdk_nvmf_subsystem {
283 	struct spdk_thread				*thread;
284 
285 	uint32_t					id;
286 
287 	enum spdk_nvmf_subsystem_state			state;
288 	enum spdk_nvmf_subtype				subtype;
289 
290 	uint16_t					next_cntlid;
291 	struct {
292 		uint8_t					allow_any_host : 1;
293 		uint8_t					allow_any_listener : 1;
294 		uint8_t					ana_reporting : 1;
295 		uint8_t					reserved : 5;
296 	} flags;
297 
298 	/* boolean for state change synchronization */
299 	bool						changing_state;
300 
301 	bool						destroying;
302 	bool						async_destroy;
303 
304 	struct spdk_nvmf_tgt				*tgt;
305 
306 	/* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
307 	struct spdk_nvmf_ns				**ns;
308 	uint32_t					max_nsid;
309 
310 	uint16_t					min_cntlid;
311 	uint16_t					max_cntlid;
312 
313 	TAILQ_HEAD(, spdk_nvmf_ctrlr)			ctrlrs;
314 
315 	/* A mutex used to protect the hosts list and allow_any_host flag. Unlike the namespace
316 	 * array, this list is not used on the I/O path (it's needed for handling things like
317 	 * the CONNECT command), so use a mutex to protect it instead of requiring the subsystem
318 	 * state to be paused. This removes the requirement to pause the subsystem when hosts
319 	 * are added or removed dynamically. */
320 	pthread_mutex_t					mutex;
321 	TAILQ_HEAD(, spdk_nvmf_host)			hosts;
322 	TAILQ_HEAD(, spdk_nvmf_subsystem_listener)	listeners;
323 	struct spdk_bit_array				*used_listener_ids;
324 
325 	TAILQ_ENTRY(spdk_nvmf_subsystem)		entries;
326 
327 	nvmf_subsystem_destroy_cb			async_destroy_cb;
328 	void						*async_destroy_cb_arg;
329 
330 	char						sn[SPDK_NVME_CTRLR_SN_LEN + 1];
331 	char						mn[SPDK_NVME_CTRLR_MN_LEN + 1];
332 	char						subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
333 
334 	/* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1
335 	 * It will be enough for ANA group to use the same size as namespaces.
336 	 */
337 	uint32_t					*ana_group;
338 };
339 
340 int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
341 				     struct spdk_nvmf_subsystem *subsystem);
342 int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
343 				  struct spdk_nvmf_subsystem *subsystem,
344 				  spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
345 void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
346 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
347 void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
348 				     struct spdk_nvmf_subsystem *subsystem,
349 				     uint32_t nsid,
350 				     spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
351 void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
352 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
353 
354 void nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt, const char *hostnqn);
355 void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
356 				 uint32_t iovcnt, uint64_t offset, uint32_t length,
357 				 struct spdk_nvme_transport_id *cmd_source_trid);
358 
359 void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
360 int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
361 int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
362 bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
363 bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
364 void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
365 bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req);
366 
367 void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
368 				 bool dif_insert_or_strip);
369 int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
370 			     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
371 int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
372 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
373 int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
374 				struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
375 int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
376 		struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req);
377 int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
378 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
379 int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
380 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
381 int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
382 			    struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
383 int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
384 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
385 bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
386 				 struct spdk_dif_ctx *dif_ctx);
387 bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev);
388 
389 int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
390 			     struct spdk_nvmf_ctrlr *ctrlr);
391 void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
392 				 struct spdk_nvmf_ctrlr *ctrlr);
393 void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
394 		bool stop);
395 struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
396 		uint16_t cntlid);
397 struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener(
398 	struct spdk_nvmf_subsystem *subsystem,
399 	const struct spdk_nvme_transport_id *trid);
400 struct spdk_nvmf_listener *nvmf_transport_find_listener(
401 	struct spdk_nvmf_transport *transport,
402 	const struct spdk_nvme_transport_id *trid);
403 void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
404 			      bool named);
405 void nvmf_transport_listen_dump_opts(struct spdk_nvmf_transport *transport,
406 				     const struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w);
407 void nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem,
408 				  const struct spdk_nvme_transport_id *trid,
409 				  enum spdk_nvme_ana_state ana_state, uint32_t anagrpid,
410 				  spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg);
411 bool nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem);
412 
413 /**
414  * Sets the controller ID range for a subsystem.
415  * Valid range is [1, 0xFFEF].
416  *
417  * May only be performed on subsystems in the INACTIVE state.
418  *
419  * \param subsystem Subsystem to modify.
420  * \param min_cntlid Minimum controller ID.
421  * \param max_cntlid Maximum controller ID.
422  *
423  * \return 0 on success, or negated errno value on failure.
424  */
425 int nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem,
426 				    uint16_t min_cntlid, uint16_t max_cntlid);
427 
428 int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
429 int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
430 void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx);
431 void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
432 int nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
433 				       union spdk_nvme_async_event_completion event);
434 void nvmf_ns_reservation_request(void *ctx);
435 void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
436 				       struct spdk_nvmf_ns *ns,
437 				       enum spdk_nvme_reservation_notification_log_page_type type);
438 
439 /*
440  * Abort aer is sent on a per controller basis and sends a completion for the aer to the host.
441  * This function should be called when attempting to recover in error paths when it is OK for
442  * the host to send a subsequent AER.
443  */
444 void nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
445 int nvmf_ctrlr_save_aers(struct spdk_nvmf_ctrlr *ctrlr, uint16_t *aer_cids,
446 			 uint16_t max_aers);
447 
448 int nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr, struct nvmf_ctrlr_migr_data *data);
449 int nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr, struct nvmf_ctrlr_migr_data *data);
450 
451 /*
452  * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't
453  * started zcopy_end.  These requests are kept on the outstanding queue, but are not waiting for a
454  * completion from the bdev layer, so, when a qpair is being disconnected, we need to kick them to
455  * force their completion.
456  */
457 void nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair);
458 
459 /*
460  * Free aer simply frees the rdma resources for the aer without informing the host.
461  * This function should be called when deleting a qpair when one wants to make sure
462  * the qpair is completely empty before freeing the request. The reason we free the
463  * AER without sending a completion is to prevent the host from sending another AER.
464  */
465 void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
466 
467 int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req);
468 
469 void nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr);
470 
471 static inline struct spdk_nvmf_ns *
472 _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
473 {
474 	/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
475 	if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
476 		return NULL;
477 	}
478 
479 	return subsystem->ns[nsid - 1];
480 }
481 
482 static inline bool
483 nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
484 {
485 	return qpair->qid == 0;
486 }
487 
488 /**
489  * Initiates a zcopy start operation
490  *
491  * \param bdev The \ref spdk_bdev
492  * \param desc The \ref spdk_bdev_desc
493  * \param ch The \ref spdk_io_channel
494  * \param req The \ref spdk_nvmf_request passed to the bdev for processing
495  *
496  * \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or
497  *         SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be
498  *         completed asynchronously.  Asynchronous completions are notified through
499  *         spdk_nvmf_request_complete().
500  */
501 int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
502 				struct spdk_bdev_desc *desc,
503 				struct spdk_io_channel *ch,
504 				struct spdk_nvmf_request *req);
505 
506 /**
507  * Ends a zcopy operation
508  *
509  * \param req The NVMe-oF request
510  * \param commit Flag indicating whether the buffers should be committed
511  */
512 void nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit);
513 
514 #endif /* __NVMF_INTERNAL_H__ */
515