xref: /spdk/lib/nvmf/nvmf_internal.h (revision f387b7fe187572d4505323dfb7a5dc1318638dda)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef __NVMF_INTERNAL_H__
8 #define __NVMF_INTERNAL_H__
9 
10 #include "spdk/stdinc.h"
11 
12 #include "spdk/likely.h"
13 #include "spdk/nvmf.h"
14 #include "spdk/nvmf_cmd.h"
15 #include "spdk/nvmf_transport.h"
16 #include "spdk/nvmf_spec.h"
17 #include "spdk/assert.h"
18 #include "spdk/bdev.h"
19 #include "spdk/queue.h"
20 #include "spdk/util.h"
21 #include "spdk/thread.h"
22 #include "spdk/tree.h"
23 #include "spdk/bit_array.h"
24 
25 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */
26 #define NVMF_MIN_CNTLID 1
27 #define NVMF_MAX_CNTLID 0xFFEF
28 
29 enum spdk_nvmf_tgt_state {
30 	NVMF_TGT_IDLE = 0,
31 	NVMF_TGT_RUNNING,
32 	NVMF_TGT_PAUSING,
33 	NVMF_TGT_PAUSED,
34 	NVMF_TGT_RESUMING,
35 };
36 
37 enum spdk_nvmf_subsystem_state {
38 	SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
39 	SPDK_NVMF_SUBSYSTEM_ACTIVATING,
40 	SPDK_NVMF_SUBSYSTEM_ACTIVE,
41 	SPDK_NVMF_SUBSYSTEM_PAUSING,
42 	SPDK_NVMF_SUBSYSTEM_PAUSED,
43 	SPDK_NVMF_SUBSYSTEM_RESUMING,
44 	SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
45 	SPDK_NVMF_SUBSYSTEM_NUM_STATES,
46 };
47 
48 RB_HEAD(subsystem_tree, spdk_nvmf_subsystem);
49 
50 struct spdk_nvmf_tgt {
51 	char					name[NVMF_TGT_NAME_MAX_LENGTH];
52 
53 	pthread_mutex_t				mutex;
54 
55 	uint64_t				discovery_genctr;
56 
57 	uint32_t				max_subsystems;
58 
59 	enum spdk_nvmf_tgt_discovery_filter	discovery_filter;
60 
61 	enum spdk_nvmf_tgt_state                state;
62 
63 	struct spdk_bit_array			*subsystem_ids;
64 
65 	struct subsystem_tree			subsystems;
66 
67 	TAILQ_HEAD(, spdk_nvmf_transport)	transports;
68 	TAILQ_HEAD(, spdk_nvmf_poll_group)	poll_groups;
69 	TAILQ_HEAD(, spdk_nvmf_referral)	referrals;
70 
71 	/* Used for round-robin assignment of connections to poll groups */
72 	struct spdk_nvmf_poll_group		*next_poll_group;
73 
74 	spdk_nvmf_tgt_destroy_done_fn		*destroy_cb_fn;
75 	void					*destroy_cb_arg;
76 
77 	uint16_t				crdt[3];
78 	uint16_t				num_poll_groups;
79 
80 	TAILQ_ENTRY(spdk_nvmf_tgt)		link;
81 };
82 
83 struct spdk_nvmf_host {
84 	char				nqn[SPDK_NVMF_NQN_MAX_LEN + 1];
85 	TAILQ_ENTRY(spdk_nvmf_host)	link;
86 };
87 
88 struct spdk_nvmf_subsystem_listener {
89 	struct spdk_nvmf_subsystem			*subsystem;
90 	spdk_nvmf_tgt_subsystem_listen_done_fn		cb_fn;
91 	void						*cb_arg;
92 	struct spdk_nvme_transport_id			*trid;
93 	struct spdk_nvmf_transport			*transport;
94 	enum spdk_nvme_ana_state			*ana_state;
95 	uint64_t					ana_state_change_count;
96 	uint16_t					id;
97 	struct spdk_nvmf_listener_opts			opts;
98 	TAILQ_ENTRY(spdk_nvmf_subsystem_listener)	link;
99 };
100 
101 struct spdk_nvmf_referral {
102 	/* Discovery Log Page Entry for this referral */
103 	struct spdk_nvmf_discovery_log_page_entry entry;
104 	/* Transport ID */
105 	struct spdk_nvme_transport_id trid;
106 	TAILQ_ENTRY(spdk_nvmf_referral) link;
107 };
108 
109 struct spdk_nvmf_subsystem_pg_ns_info {
110 	struct spdk_io_channel		*channel;
111 	struct spdk_uuid		uuid;
112 	/* current reservation key, no reservation if the value is 0 */
113 	uint64_t			crkey;
114 	/* reservation type */
115 	enum spdk_nvme_reservation_type	rtype;
116 	/* Host ID which holds the reservation */
117 	struct spdk_uuid		holder_id;
118 	/* Host ID for the registrants with the namespace */
119 	struct spdk_uuid		reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
120 	uint64_t			num_blocks;
121 
122 	/* I/O outstanding to this namespace */
123 	uint64_t			io_outstanding;
124 	enum spdk_nvmf_subsystem_state	state;
125 };
126 
127 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
128 
129 struct spdk_nvmf_subsystem_poll_group {
130 	/* Array of namespace information for each namespace indexed by nsid - 1 */
131 	struct spdk_nvmf_subsystem_pg_ns_info	*ns_info;
132 	uint32_t				num_ns;
133 	enum spdk_nvmf_subsystem_state		state;
134 
135 	/* Number of ADMIN and FABRICS requests outstanding */
136 	uint64_t				mgmt_io_outstanding;
137 	spdk_nvmf_poll_group_mod_done		cb_fn;
138 	void					*cb_arg;
139 
140 	TAILQ_HEAD(, spdk_nvmf_request)		queued;
141 };
142 
143 struct spdk_nvmf_registrant {
144 	TAILQ_ENTRY(spdk_nvmf_registrant) link;
145 	struct spdk_uuid hostid;
146 	/* Registration key */
147 	uint64_t rkey;
148 };
149 
150 struct spdk_nvmf_ns {
151 	uint32_t nsid;
152 	uint32_t anagrpid;
153 	struct spdk_nvmf_subsystem *subsystem;
154 	struct spdk_bdev *bdev;
155 	struct spdk_bdev_desc *desc;
156 	struct spdk_nvmf_ns_opts opts;
157 	/* reservation notification mask */
158 	uint32_t mask;
159 	/* generation code */
160 	uint32_t gen;
161 	/* registrants head */
162 	TAILQ_HEAD(, spdk_nvmf_registrant) registrants;
163 	/* current reservation key */
164 	uint64_t crkey;
165 	/* reservation type */
166 	enum spdk_nvme_reservation_type rtype;
167 	/* current reservation holder, only valid if reservation type can only have one holder */
168 	struct spdk_nvmf_registrant *holder;
169 	/* Persist Through Power Loss file which contains the persistent reservation */
170 	char *ptpl_file;
171 	/* Persist Through Power Loss feature is enabled */
172 	bool ptpl_activated;
173 	/* ZCOPY supported on bdev device */
174 	bool zcopy;
175 	/* Command Set Identifier */
176 	enum spdk_nvme_csi csi;
177 	/* Make namespace visible to controllers of these hosts */
178 	TAILQ_HEAD(, spdk_nvmf_host) hosts;
179 	/* Namespace is always visible to all controllers */
180 	bool always_visible;
181 };
182 
183 /*
184  * NVMf reservation notification log page.
185  */
186 struct spdk_nvmf_reservation_log {
187 	struct spdk_nvme_reservation_notification_log	log;
188 	TAILQ_ENTRY(spdk_nvmf_reservation_log)		link;
189 	struct spdk_nvmf_ctrlr				*ctrlr;
190 };
191 
192 /*
193  * NVMf async event completion.
194  */
195 struct spdk_nvmf_async_event_completion {
196 	union spdk_nvme_async_event_completion		event;
197 	STAILQ_ENTRY(spdk_nvmf_async_event_completion)	link;
198 };
199 
200 /*
201  * This structure represents an NVMe-oF controller,
202  * which is like a "session" in networking terms.
203  */
204 struct spdk_nvmf_ctrlr {
205 	uint16_t			cntlid;
206 	char				hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
207 	struct spdk_nvmf_subsystem	*subsys;
208 	struct spdk_bit_array		*visible_ns;
209 
210 	struct spdk_nvmf_ctrlr_data	cdata;
211 
212 	struct spdk_nvmf_registers	vcprop;
213 
214 	struct spdk_nvmf_ctrlr_feat feat;
215 
216 	struct spdk_nvmf_qpair	*admin_qpair;
217 	struct spdk_thread	*thread;
218 	struct spdk_bit_array	*qpair_mask;
219 
220 	const struct spdk_nvmf_subsystem_listener	*listener;
221 
222 	struct spdk_nvmf_request *aer_req[SPDK_NVMF_MAX_ASYNC_EVENTS];
223 	STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events;
224 	uint64_t notice_aen_mask;
225 	uint8_t nr_aer_reqs;
226 	struct spdk_uuid  hostid;
227 
228 	uint32_t association_timeout; /* in milliseconds */
229 	uint16_t changed_ns_list_count;
230 	struct spdk_nvme_ns_list changed_ns_list;
231 	uint64_t log_page_count;
232 	uint8_t num_avail_log_pages;
233 	TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head;
234 
235 	/* Time to trigger keep-alive--poller_time = now_tick + period */
236 	uint64_t			last_keep_alive_tick;
237 	struct spdk_poller		*keep_alive_poller;
238 
239 	struct spdk_poller		*association_timer;
240 
241 	struct spdk_poller		*cc_timer;
242 	uint64_t			cc_timeout_tsc;
243 	struct spdk_poller		*cc_timeout_timer;
244 
245 	bool				dif_insert_or_strip;
246 	bool				in_destruct;
247 	bool				disconnect_in_progress;
248 	/* valid only when disconnect_in_progress is true */
249 	bool				disconnect_is_shn;
250 	bool				acre_enabled;
251 	bool				dynamic_ctrlr;
252 
253 	TAILQ_ENTRY(spdk_nvmf_ctrlr)	link;
254 };
255 
256 #define NVMF_MAX_LISTENERS_PER_SUBSYSTEM	16
257 
258 struct nvmf_subsystem_state_change_ctx {
259 	struct spdk_nvmf_subsystem			*subsystem;
260 	uint16_t					nsid;
261 
262 	enum spdk_nvmf_subsystem_state			original_state;
263 	enum spdk_nvmf_subsystem_state			requested_state;
264 	int						status;
265 	struct spdk_thread				*thread;
266 
267 	spdk_nvmf_subsystem_state_change_done		cb_fn;
268 	void						*cb_arg;
269 	TAILQ_ENTRY(nvmf_subsystem_state_change_ctx)	link;
270 };
271 
272 struct spdk_nvmf_subsystem {
273 	struct spdk_thread				*thread;
274 
275 	uint32_t					id;
276 
277 	enum spdk_nvmf_subsystem_state			state;
278 	enum spdk_nvmf_subtype				subtype;
279 
280 	uint16_t					next_cntlid;
281 	struct {
282 		uint8_t					allow_any_host : 1;
283 		uint8_t					allow_any_listener : 1;
284 		uint8_t					ana_reporting : 1;
285 		uint8_t					reserved : 5;
286 	} flags;
287 
288 	bool						destroying;
289 	bool						async_destroy;
290 
291 	/* FDP related fields */
292 	bool						fdp_supported;
293 
294 	/* Zoned storage related fields */
295 	bool						zone_append_supported;
296 	uint64_t					max_zone_append_size_kib;
297 
298 	struct spdk_nvmf_tgt				*tgt;
299 	RB_ENTRY(spdk_nvmf_subsystem)			link;
300 
301 	/* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
302 	struct spdk_nvmf_ns				**ns;
303 	uint32_t					max_nsid;
304 
305 	uint16_t					min_cntlid;
306 	uint16_t					max_cntlid;
307 
308 	uint64_t					max_discard_size_kib;
309 	uint64_t					max_write_zeroes_size_kib;
310 
311 	TAILQ_HEAD(, spdk_nvmf_ctrlr)			ctrlrs;
312 
313 	/* A mutex used to protect the hosts list and allow_any_host flag. Unlike the namespace
314 	 * array, this list is not used on the I/O path (it's needed for handling things like
315 	 * the CONNECT command), so use a mutex to protect it instead of requiring the subsystem
316 	 * state to be paused. This removes the requirement to pause the subsystem when hosts
317 	 * are added or removed dynamically. */
318 	pthread_mutex_t					mutex;
319 	TAILQ_HEAD(, spdk_nvmf_host)			hosts;
320 	TAILQ_HEAD(, spdk_nvmf_subsystem_listener)	listeners;
321 	struct spdk_bit_array				*used_listener_ids;
322 
323 	TAILQ_ENTRY(spdk_nvmf_subsystem)		entries;
324 
325 	nvmf_subsystem_destroy_cb			async_destroy_cb;
326 	void						*async_destroy_cb_arg;
327 
328 	char						sn[SPDK_NVME_CTRLR_SN_LEN + 1];
329 	char						mn[SPDK_NVME_CTRLR_MN_LEN + 1];
330 	char						subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
331 
332 	/* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1
333 	 * It will be enough for ANA group to use the same size as namespaces.
334 	 */
335 	uint32_t					*ana_group;
336 	/* Queue of a state change requests */
337 	TAILQ_HEAD(, nvmf_subsystem_state_change_ctx)	state_changes;
338 };
339 
340 static int
341 subsystem_cmp(struct spdk_nvmf_subsystem *subsystem1, struct spdk_nvmf_subsystem *subsystem2)
342 {
343 	return strncmp(subsystem1->subnqn, subsystem2->subnqn, sizeof(subsystem1->subnqn));
344 }
345 
346 RB_GENERATE_STATIC(subsystem_tree, spdk_nvmf_subsystem, link, subsystem_cmp);
347 
348 int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
349 				     struct spdk_nvmf_subsystem *subsystem);
350 int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
351 				  struct spdk_nvmf_subsystem *subsystem,
352 				  spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
353 void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
354 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
355 void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
356 				     struct spdk_nvmf_subsystem *subsystem,
357 				     uint32_t nsid,
358 				     spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
359 void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
360 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
361 
362 void nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt, const char *hostnqn);
363 void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
364 				 uint32_t iovcnt, uint64_t offset, uint32_t length,
365 				 struct spdk_nvme_transport_id *cmd_source_trid);
366 
367 void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
368 int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
369 int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
370 bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
371 bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
372 bool nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr);
373 void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
374 bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req);
375 
376 void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
377 				 bool dif_insert_or_strip);
378 int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
379 			     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
380 int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
381 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
382 int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
383 				struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
384 int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
385 		struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req);
386 int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
387 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
388 int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
389 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
390 int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
391 			    struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
392 int nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
393 			     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
394 int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
395 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
396 bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
397 				 struct spdk_dif_ctx *dif_ctx);
398 bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev);
399 
400 int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
401 			     struct spdk_nvmf_ctrlr *ctrlr);
402 void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
403 				 struct spdk_nvmf_ctrlr *ctrlr);
404 void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
405 		bool stop);
406 struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
407 		uint16_t cntlid);
408 struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener(
409 	struct spdk_nvmf_subsystem *subsystem,
410 	const struct spdk_nvme_transport_id *trid);
411 struct spdk_nvmf_listener *nvmf_transport_find_listener(
412 	struct spdk_nvmf_transport *transport,
413 	const struct spdk_nvme_transport_id *trid);
414 void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
415 			      bool named);
416 void nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid,
417 				     struct spdk_json_write_ctx *w);
418 
419 /**
420  * Sets the controller ID range for a subsystem.
421  * Valid range is [1, 0xFFEF].
422  *
423  * May only be performed on subsystems in the INACTIVE state.
424  *
425  * \param subsystem Subsystem to modify.
426  * \param min_cntlid Minimum controller ID.
427  * \param max_cntlid Maximum controller ID.
428  *
429  * \return 0 on success, or negated errno value on failure.
430  */
431 int nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem,
432 				    uint16_t min_cntlid, uint16_t max_cntlid);
433 
434 int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
435 int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
436 void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx);
437 void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
438 
439 void nvmf_ns_reservation_request(void *ctx);
440 void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
441 				       struct spdk_nvmf_ns *ns,
442 				       enum spdk_nvme_reservation_notification_log_page_type type);
443 
444 bool nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns);
445 
446 static inline struct spdk_nvmf_host *
447 nvmf_ns_find_host(struct spdk_nvmf_ns *ns, const char *hostnqn)
448 {
449 	struct spdk_nvmf_host *host = NULL;
450 
451 	TAILQ_FOREACH(host, &ns->hosts, link) {
452 		if (strcmp(hostnqn, host->nqn) == 0) {
453 			return host;
454 		}
455 	}
456 
457 	return NULL;
458 }
459 
460 /*
461  * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't
462  * started zcopy_end.  These requests are kept on the outstanding queue, but are not waiting for a
463  * completion from the bdev layer, so, when a qpair is being disconnected, we need to kick them to
464  * force their completion.
465  */
466 void nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair);
467 
468 /*
469  * Free aer simply frees the rdma resources for the aer without informing the host.
470  * This function should be called when deleting a qpair when one wants to make sure
471  * the qpair is completely empty before freeing the request. The reason we free the
472  * AER without sending a completion is to prevent the host from sending another AER.
473  */
474 void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
475 
476 int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req);
477 
478 void nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr);
479 
480 static inline bool
481 nvmf_ctrlr_ns_is_visible(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
482 {
483 	return spdk_bit_array_get(ctrlr->visible_ns, nsid - 1);
484 }
485 
486 static inline struct spdk_nvmf_ns *
487 _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
488 {
489 	/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
490 	if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
491 		return NULL;
492 	}
493 
494 	return subsystem->ns[nsid - 1];
495 }
496 
497 static inline struct spdk_nvmf_ns *
498 nvmf_ctrlr_get_ns(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
499 {
500 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
501 	struct spdk_nvmf_ns *ns = _nvmf_subsystem_get_ns(subsystem, nsid);
502 
503 	return ns && nvmf_ctrlr_ns_is_visible(ctrlr, nsid) ? ns : NULL;
504 }
505 
506 static inline bool
507 nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
508 {
509 	return qpair->qid == 0;
510 }
511 
512 static inline bool
513 nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req)
514 {
515 	return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC &&
516 	       req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT;
517 }
518 
519 /*
520  * Tests whether a given string represents a valid NQN.
521  */
522 bool nvmf_nqn_is_valid(const char *nqn);
523 
524 /*
525  * Tests whether a given NQN describes a discovery subsystem.
526  */
527 bool nvmf_nqn_is_discovery(const char *nqn);
528 
529 /**
530  * Initiates a zcopy start operation
531  *
532  * \param bdev The \ref spdk_bdev
533  * \param desc The \ref spdk_bdev_desc
534  * \param ch The \ref spdk_io_channel
535  * \param req The \ref spdk_nvmf_request passed to the bdev for processing
536  *
537  * \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or
538  *         SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be
539  *         completed asynchronously.  Asynchronous completions are notified through
540  *         spdk_nvmf_request_complete().
541  */
542 int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
543 				struct spdk_bdev_desc *desc,
544 				struct spdk_io_channel *ch,
545 				struct spdk_nvmf_request *req);
546 
547 /**
548  * Ends a zcopy operation
549  *
550  * \param req The NVMe-oF request
551  * \param commit Flag indicating whether the buffers should be committed
552  */
553 void nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit);
554 
555 #endif /* __NVMF_INTERNAL_H__ */
556