xref: /spdk/lib/nvmf/nvmf_internal.h (revision c9c7c281f8809a0cc54ae9e44e0fd87fb52ad5e8)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef __NVMF_INTERNAL_H__
36 #define __NVMF_INTERNAL_H__
37 
38 #include "spdk/stdinc.h"
39 
40 #include "spdk/likely.h"
41 #include "spdk/nvmf.h"
42 #include "spdk/nvmf_cmd.h"
43 #include "spdk/nvmf_transport.h"
44 #include "spdk/nvmf_spec.h"
45 #include "spdk/assert.h"
46 #include "spdk/bdev.h"
47 #include "spdk/queue.h"
48 #include "spdk/util.h"
49 #include "spdk/thread.h"
50 
51 #define NVMF_MAX_ASYNC_EVENTS	(4)
52 
53 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */
54 #define NVMF_MIN_CNTLID 1
55 #define NVMF_MAX_CNTLID 0xFFEF
56 
57 enum spdk_nvmf_subsystem_state {
58 	SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
59 	SPDK_NVMF_SUBSYSTEM_ACTIVATING,
60 	SPDK_NVMF_SUBSYSTEM_ACTIVE,
61 	SPDK_NVMF_SUBSYSTEM_PAUSING,
62 	SPDK_NVMF_SUBSYSTEM_PAUSED,
63 	SPDK_NVMF_SUBSYSTEM_RESUMING,
64 	SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
65 	SPDK_NVMF_SUBSYSTEM_NUM_STATES,
66 };
67 
68 struct spdk_nvmf_tgt {
69 	char					name[NVMF_TGT_NAME_MAX_LENGTH];
70 
71 	pthread_mutex_t				mutex;
72 
73 	uint64_t				discovery_genctr;
74 
75 	struct spdk_poller			*accept_poller;
76 
77 	uint32_t				max_subsystems;
78 
79 	enum spdk_nvmf_tgt_discovery_filter	discovery_filter;
80 
81 	/* Array of subsystem pointers of size max_subsystems indexed by sid */
82 	struct spdk_nvmf_subsystem		**subsystems;
83 
84 	TAILQ_HEAD(, spdk_nvmf_transport)	transports;
85 	TAILQ_HEAD(, spdk_nvmf_poll_group)	poll_groups;
86 
87 	/* Used for round-robin assignment of connections to poll groups */
88 	struct spdk_nvmf_poll_group		*next_poll_group;
89 
90 	spdk_nvmf_tgt_destroy_done_fn		*destroy_cb_fn;
91 	void					*destroy_cb_arg;
92 
93 	uint16_t				crdt[3];
94 
95 	TAILQ_ENTRY(spdk_nvmf_tgt)		link;
96 };
97 
98 struct spdk_nvmf_host {
99 	char				nqn[SPDK_NVMF_NQN_MAX_LEN + 1];
100 	TAILQ_ENTRY(spdk_nvmf_host)	link;
101 };
102 
103 struct spdk_nvmf_subsystem_listener {
104 	struct spdk_nvmf_subsystem			*subsystem;
105 	spdk_nvmf_tgt_subsystem_listen_done_fn		cb_fn;
106 	void						*cb_arg;
107 	struct spdk_nvme_transport_id			*trid;
108 	struct spdk_nvmf_transport			*transport;
109 	enum spdk_nvme_ana_state			*ana_state;
110 	uint64_t					ana_state_change_count;
111 	TAILQ_ENTRY(spdk_nvmf_subsystem_listener)	link;
112 };
113 
114 /* Maximum number of registrants supported per namespace */
115 #define SPDK_NVMF_MAX_NUM_REGISTRANTS		16
116 
117 struct spdk_nvmf_registrant_info {
118 	uint64_t		rkey;
119 	char			host_uuid[SPDK_UUID_STRING_LEN];
120 };
121 
122 struct spdk_nvmf_reservation_info {
123 	bool					ptpl_activated;
124 	enum spdk_nvme_reservation_type		rtype;
125 	uint64_t				crkey;
126 	char					bdev_uuid[SPDK_UUID_STRING_LEN];
127 	char					holder_uuid[SPDK_UUID_STRING_LEN];
128 	uint32_t				num_regs;
129 	struct spdk_nvmf_registrant_info	registrants[SPDK_NVMF_MAX_NUM_REGISTRANTS];
130 };
131 
132 struct spdk_nvmf_subsystem_pg_ns_info {
133 	struct spdk_io_channel		*channel;
134 	struct spdk_uuid		uuid;
135 	/* current reservation key, no reservation if the value is 0 */
136 	uint64_t			crkey;
137 	/* reservation type */
138 	enum spdk_nvme_reservation_type	rtype;
139 	/* Host ID which holds the reservation */
140 	struct spdk_uuid		holder_id;
141 	/* Host ID for the registrants with the namespace */
142 	struct spdk_uuid		reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
143 	uint64_t			num_blocks;
144 
145 	/* I/O outstanding to this namespace */
146 	uint64_t			io_outstanding;
147 	enum spdk_nvmf_subsystem_state	state;
148 };
149 
150 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
151 
152 struct spdk_nvmf_subsystem_poll_group {
153 	/* Array of namespace information for each namespace indexed by nsid - 1 */
154 	struct spdk_nvmf_subsystem_pg_ns_info	*ns_info;
155 	uint32_t				num_ns;
156 
157 	/* Number of ADMIN and FABRICS requests outstanding */
158 	uint64_t				mgmt_io_outstanding;
159 	spdk_nvmf_poll_group_mod_done		cb_fn;
160 	void					*cb_arg;
161 
162 	enum spdk_nvmf_subsystem_state		state;
163 
164 	TAILQ_HEAD(, spdk_nvmf_request)		queued;
165 };
166 
167 struct spdk_nvmf_registrant {
168 	TAILQ_ENTRY(spdk_nvmf_registrant) link;
169 	struct spdk_uuid hostid;
170 	/* Registration key */
171 	uint64_t rkey;
172 };
173 
174 struct spdk_nvmf_ns {
175 	uint32_t nsid;
176 	uint32_t anagrpid;
177 	struct spdk_nvmf_subsystem *subsystem;
178 	struct spdk_bdev *bdev;
179 	struct spdk_bdev_desc *desc;
180 	struct spdk_nvmf_ns_opts opts;
181 	/* reservation notification mask */
182 	uint32_t mask;
183 	/* generation code */
184 	uint32_t gen;
185 	/* registrants head */
186 	TAILQ_HEAD(, spdk_nvmf_registrant) registrants;
187 	/* current reservation key */
188 	uint64_t crkey;
189 	/* reservation type */
190 	enum spdk_nvme_reservation_type rtype;
191 	/* current reservation holder, only valid if reservation type can only have one holder */
192 	struct spdk_nvmf_registrant *holder;
193 	/* Persist Through Power Loss file which contains the persistent reservation */
194 	char *ptpl_file;
195 	/* Persist Through Power Loss feature is enabled */
196 	bool ptpl_activated;
197 	/* ZCOPY supported on bdev device */
198 	bool zcopy;
199 };
200 
201 struct spdk_nvmf_ctrlr_feat {
202 	union spdk_nvme_feat_arbitration arbitration;
203 	union spdk_nvme_feat_power_management power_management;
204 	union spdk_nvme_feat_error_recovery error_recovery;
205 	union spdk_nvme_feat_volatile_write_cache volatile_write_cache;
206 	union spdk_nvme_feat_number_of_queues number_of_queues;
207 	union spdk_nvme_feat_interrupt_coalescing interrupt_coalescing;
208 	union spdk_nvme_feat_interrupt_vector_configuration interrupt_vector_configuration;
209 	union spdk_nvme_feat_write_atomicity write_atomicity;
210 	union spdk_nvme_feat_async_event_configuration async_event_configuration;
211 	union spdk_nvme_feat_keep_alive_timer keep_alive_timer;
212 };
213 
214 /*
215  * NVMf reservation notification log page.
216  */
217 struct spdk_nvmf_reservation_log {
218 	struct spdk_nvme_reservation_notification_log	log;
219 	TAILQ_ENTRY(spdk_nvmf_reservation_log)		link;
220 	struct spdk_nvmf_ctrlr				*ctrlr;
221 };
222 
223 /*
224  * NVMf async event completion.
225  */
226 struct spdk_nvmf_async_event_completion {
227 	union spdk_nvme_async_event_completion		event;
228 	STAILQ_ENTRY(spdk_nvmf_async_event_completion)	link;
229 };
230 
231 /*
232  * This structure represents an NVMe-oF controller,
233  * which is like a "session" in networking terms.
234  */
235 struct spdk_nvmf_ctrlr {
236 	uint16_t			cntlid;
237 	char				hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
238 	struct spdk_nvmf_subsystem	*subsys;
239 
240 	struct spdk_nvmf_ctrlr_data	cdata;
241 
242 	struct spdk_nvmf_registers	vcprop;
243 
244 	struct spdk_nvmf_ctrlr_feat feat;
245 
246 	struct spdk_nvmf_qpair	*admin_qpair;
247 	struct spdk_thread	*thread;
248 	struct spdk_bit_array	*qpair_mask;
249 
250 	const struct spdk_nvmf_subsystem_listener	*listener;
251 
252 	struct spdk_nvmf_request *aer_req[NVMF_MAX_ASYNC_EVENTS];
253 	STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events;
254 	uint64_t notice_aen_mask;
255 	uint8_t nr_aer_reqs;
256 	struct spdk_uuid  hostid;
257 
258 	uint32_t association_timeout; /* in milliseconds */
259 	uint16_t changed_ns_list_count;
260 	struct spdk_nvme_ns_list changed_ns_list;
261 	uint64_t log_page_count;
262 	uint8_t num_avail_log_pages;
263 	TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head;
264 
265 	/* Time to trigger keep-alive--poller_time = now_tick + period */
266 	uint64_t			last_keep_alive_tick;
267 	struct spdk_poller		*keep_alive_poller;
268 
269 	struct spdk_poller		*association_timer;
270 
271 	struct spdk_poller		*cc_timer;
272 
273 	bool				dif_insert_or_strip;
274 	bool				in_destruct;
275 	bool				disconnect_in_progress;
276 	/* valid only when disconnect_in_progress is true */
277 	bool				disconnect_is_shn;
278 	bool				acre_enabled;
279 
280 	TAILQ_ENTRY(spdk_nvmf_ctrlr)	link;
281 };
282 
283 struct spdk_nvmf_subsystem {
284 	struct spdk_thread				*thread;
285 
286 	uint32_t					id;
287 
288 	enum spdk_nvmf_subsystem_state			state;
289 	enum spdk_nvmf_subtype				subtype;
290 
291 	uint16_t					next_cntlid;
292 	struct {
293 		uint8_t					allow_any_host : 1;
294 		uint8_t					allow_any_listener : 1;
295 		uint8_t					ana_reporting : 1;
296 		uint8_t					reserved : 5;
297 	} flags;
298 
299 	/* boolean for state change synchronization */
300 	bool						changing_state;
301 
302 	bool						destroying;
303 	bool						async_destroy;
304 
305 	struct spdk_nvmf_tgt				*tgt;
306 
307 	/* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
308 	struct spdk_nvmf_ns				**ns;
309 	uint32_t					max_nsid;
310 
311 	uint16_t					min_cntlid;
312 	uint16_t					max_cntlid;
313 
314 	TAILQ_HEAD(, spdk_nvmf_ctrlr)			ctrlrs;
315 
316 	/* A mutex used to protect the hosts list and allow_any_host flag. Unlike the namespace
317 	 * array, this list is not used on the I/O path (it's needed for handling things like
318 	 * the CONNECT command), so use a mutex to protect it instead of requiring the subsystem
319 	 * state to be paused. This removes the requirement to pause the subsystem when hosts
320 	 * are added or removed dynamically. */
321 	pthread_mutex_t					mutex;
322 	TAILQ_HEAD(, spdk_nvmf_host)			hosts;
323 	TAILQ_HEAD(, spdk_nvmf_subsystem_listener)	listeners;
324 
325 	TAILQ_ENTRY(spdk_nvmf_subsystem)		entries;
326 
327 	nvmf_subsystem_destroy_cb			async_destroy_cb;
328 	void						*async_destroy_cb_arg;
329 
330 	char						sn[SPDK_NVME_CTRLR_SN_LEN + 1];
331 	char						mn[SPDK_NVME_CTRLR_MN_LEN + 1];
332 	char						subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
333 
334 	/* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1
335 	 * It will be enough for ANA group to use the same size as namespaces.
336 	 */
337 	uint32_t					*ana_group;
338 };
339 
340 int nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
341 				  struct spdk_nvmf_transport *transport);
342 int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
343 				     struct spdk_nvmf_subsystem *subsystem);
344 int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
345 				  struct spdk_nvmf_subsystem *subsystem,
346 				  spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
347 void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
348 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
349 void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
350 				     struct spdk_nvmf_subsystem *subsystem,
351 				     uint32_t nsid,
352 				     spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
353 void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
354 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
355 
356 void nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt, const char *hostnqn);
357 void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
358 				 uint32_t iovcnt, uint64_t offset, uint32_t length,
359 				 struct spdk_nvme_transport_id *cmd_source_trid);
360 
361 void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
362 int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
363 int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
364 bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
365 bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
366 void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
367 bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req);
368 
369 void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
370 				 bool dif_insert_or_strip);
371 int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
372 			     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
373 int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
374 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
375 int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
376 				struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
377 int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
378 		struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req);
379 int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
380 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
381 int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
382 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
383 int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
384 			    struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
385 int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
386 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
387 bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
388 				 struct spdk_dif_ctx *dif_ctx);
389 bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev);
390 
391 int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
392 			     struct spdk_nvmf_ctrlr *ctrlr);
393 void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
394 				 struct spdk_nvmf_ctrlr *ctrlr);
395 void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
396 		bool stop);
397 struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
398 		uint16_t cntlid);
399 struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener(
400 	struct spdk_nvmf_subsystem *subsystem,
401 	const struct spdk_nvme_transport_id *trid);
402 struct spdk_nvmf_listener *nvmf_transport_find_listener(
403 	struct spdk_nvmf_transport *transport,
404 	const struct spdk_nvme_transport_id *trid);
405 void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
406 			      bool named);
407 void nvmf_transport_listen_dump_opts(struct spdk_nvmf_transport *transport,
408 				     const struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w);
409 void nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem,
410 				  const struct spdk_nvme_transport_id *trid,
411 				  enum spdk_nvme_ana_state ana_state, uint32_t anagrpid,
412 				  spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg);
413 bool nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem);
414 
415 /**
416  * Sets the controller ID range for a subsystem.
417  * Valid range is [1, 0xFFEF].
418  *
419  * May only be performed on subsystems in the INACTIVE state.
420  *
421  * \param subsystem Subsystem to modify.
422  * \param min_cntlid Minimum controller ID.
423  * \param max_cntlid Maximum controller ID.
424  *
425  * \return 0 on success, or negated errno value on failure.
426  */
427 int nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem,
428 				    uint16_t min_cntlid, uint16_t max_cntlid);
429 
430 int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
431 int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
432 int nvmf_ctrlr_async_event_discovery_log_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
433 void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
434 int nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
435 				       union spdk_nvme_async_event_completion event);
436 void nvmf_ns_reservation_request(void *ctx);
437 void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
438 				       struct spdk_nvmf_ns *ns,
439 				       enum spdk_nvme_reservation_notification_log_page_type type);
440 
441 /*
442  * Abort aer is sent on a per controller basis and sends a completion for the aer to the host.
443  * This function should be called when attempting to recover in error paths when it is OK for
444  * the host to send a subsequent AER.
445  */
446 void nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
447 
448 /*
449  * Free aer simply frees the rdma resources for the aer without informing the host.
450  * This function should be called when deleting a qpair when one wants to make sure
451  * the qpair is completely empty before freeing the request. The reason we free the
452  * AER without sending a completion is to prevent the host from sending another AER.
453  */
454 void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
455 
456 int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req);
457 
458 static inline struct spdk_nvmf_ns *
459 _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
460 {
461 	/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
462 	if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
463 		return NULL;
464 	}
465 
466 	return subsystem->ns[nsid - 1];
467 }
468 
469 static inline bool
470 nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
471 {
472 	return qpair->qid == 0;
473 }
474 
475 /**
476  * Initiates a zcopy start operation
477  *
478  * \param bdev The \ref spdk_bdev
479  * \param desc The \ref spdk_bdev_desc
480  * \param ch The \ref spdk_io_channel
481  * \param req The \ref spdk_nvmf_request passed to the bdev for processing
482  *
483  * \return 0 upon success
484  * \return <0 if the zcopy operation could not be started
485  */
486 int nvmf_bdev_ctrlr_start_zcopy(struct spdk_bdev *bdev,
487 				struct spdk_bdev_desc *desc,
488 				struct spdk_io_channel *ch,
489 				struct spdk_nvmf_request *req);
490 
491 /**
492  * Ends a zcopy operation
493  *
494  * \param req The NVMe-oF request
495  * \param commit Flag indicating whether the buffers should be committed
496  *
497  * \return 0 upon success
498  * \return <0 on error
499  */
500 int nvmf_bdev_ctrlr_end_zcopy(struct spdk_nvmf_request *req, bool commit);
501 
502 #endif /* __NVMF_INTERNAL_H__ */
503