xref: /spdk/lib/nvmf/nvmf_internal.h (revision b09de013a5df946650e14acd608a19c0cce22140)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef __NVMF_INTERNAL_H__
8 #define __NVMF_INTERNAL_H__
9 
10 #include "spdk/stdinc.h"
11 
12 #include "spdk/keyring.h"
13 #include "spdk/likely.h"
14 #include "spdk/nvmf.h"
15 #include "spdk/nvmf_cmd.h"
16 #include "spdk/nvmf_transport.h"
17 #include "spdk/nvmf_spec.h"
18 #include "spdk/assert.h"
19 #include "spdk/bdev.h"
20 #include "spdk/queue.h"
21 #include "spdk/util.h"
22 #include "spdk/thread.h"
23 #include "spdk/tree.h"
24 #include "spdk/bit_array.h"
25 
26 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */
27 #define NVMF_MIN_CNTLID 1
28 #define NVMF_MAX_CNTLID 0xFFEF
29 
30 enum spdk_nvmf_tgt_state {
31 	NVMF_TGT_IDLE = 0,
32 	NVMF_TGT_RUNNING,
33 	NVMF_TGT_PAUSING,
34 	NVMF_TGT_PAUSED,
35 	NVMF_TGT_RESUMING,
36 };
37 
38 enum spdk_nvmf_subsystem_state {
39 	SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
40 	SPDK_NVMF_SUBSYSTEM_ACTIVATING,
41 	SPDK_NVMF_SUBSYSTEM_ACTIVE,
42 	SPDK_NVMF_SUBSYSTEM_PAUSING,
43 	SPDK_NVMF_SUBSYSTEM_PAUSED,
44 	SPDK_NVMF_SUBSYSTEM_RESUMING,
45 	SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
46 	SPDK_NVMF_SUBSYSTEM_NUM_STATES,
47 };
48 
49 RB_HEAD(subsystem_tree, spdk_nvmf_subsystem);
50 
51 struct spdk_nvmf_tgt {
52 	char					name[NVMF_TGT_NAME_MAX_LENGTH];
53 
54 	pthread_mutex_t				mutex;
55 
56 	uint64_t				discovery_genctr;
57 
58 	uint32_t				max_subsystems;
59 
60 	uint32_t				discovery_filter;
61 
62 	enum spdk_nvmf_tgt_state                state;
63 
64 	struct spdk_bit_array			*subsystem_ids;
65 
66 	struct subsystem_tree			subsystems;
67 
68 	TAILQ_HEAD(, spdk_nvmf_transport)	transports;
69 	TAILQ_HEAD(, spdk_nvmf_poll_group)	poll_groups;
70 	TAILQ_HEAD(, spdk_nvmf_referral)	referrals;
71 
72 	/* Used for round-robin assignment of connections to poll groups */
73 	struct spdk_nvmf_poll_group		*next_poll_group;
74 
75 	spdk_nvmf_tgt_destroy_done_fn		*destroy_cb_fn;
76 	void					*destroy_cb_arg;
77 
78 	uint16_t				crdt[3];
79 	uint16_t				num_poll_groups;
80 
81 	/* Allowed DH-HMAC-CHAP digests/dhgroups */
82 	uint32_t				dhchap_digests;
83 	uint32_t				dhchap_dhgroups;
84 
85 	TAILQ_ENTRY(spdk_nvmf_tgt)		link;
86 };
87 
88 struct spdk_nvmf_host {
89 	char				nqn[SPDK_NVMF_NQN_MAX_LEN + 1];
90 	struct spdk_key			*dhchap_key;
91 	struct spdk_key			*dhchap_ctrlr_key;
92 	TAILQ_ENTRY(spdk_nvmf_host)	link;
93 };
94 
95 struct spdk_nvmf_subsystem_listener {
96 	struct spdk_nvmf_subsystem			*subsystem;
97 	spdk_nvmf_tgt_subsystem_listen_done_fn		cb_fn;
98 	void						*cb_arg;
99 	struct spdk_nvme_transport_id			*trid;
100 	struct spdk_nvmf_transport			*transport;
101 	enum spdk_nvme_ana_state			*ana_state;
102 	uint64_t					ana_state_change_count;
103 	uint16_t					id;
104 	struct spdk_nvmf_listener_opts			opts;
105 	TAILQ_ENTRY(spdk_nvmf_subsystem_listener)	link;
106 };
107 
108 struct spdk_nvmf_referral {
109 	/* Discovery Log Page Entry for this referral */
110 	struct spdk_nvmf_discovery_log_page_entry entry;
111 	/* Transport ID */
112 	struct spdk_nvme_transport_id trid;
113 	TAILQ_ENTRY(spdk_nvmf_referral) link;
114 };
115 
116 struct spdk_nvmf_subsystem_pg_ns_info {
117 	struct spdk_io_channel		*channel;
118 	struct spdk_uuid		uuid;
119 	/* current reservation key, no reservation if the value is 0 */
120 	uint64_t			crkey;
121 	/* reservation type */
122 	enum spdk_nvme_reservation_type	rtype;
123 	/* Host ID which holds the reservation */
124 	struct spdk_uuid		holder_id;
125 	/* Host ID for the registrants with the namespace */
126 	struct spdk_uuid		reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
127 	uint64_t			num_blocks;
128 	uint32_t			anagrpid;
129 
130 	/* I/O outstanding to this namespace */
131 	uint64_t			io_outstanding;
132 	enum spdk_nvmf_subsystem_state	state;
133 };
134 
135 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
136 
137 struct spdk_nvmf_subsystem_poll_group {
138 	/* Array of namespace information for each namespace indexed by nsid - 1 */
139 	struct spdk_nvmf_subsystem_pg_ns_info	*ns_info;
140 	uint32_t				num_ns;
141 	enum spdk_nvmf_subsystem_state		state;
142 
143 	/* Number of ADMIN and FABRICS requests outstanding */
144 	uint64_t				mgmt_io_outstanding;
145 	spdk_nvmf_poll_group_mod_done		cb_fn;
146 	void					*cb_arg;
147 
148 	TAILQ_HEAD(, spdk_nvmf_request)		queued;
149 };
150 
151 struct spdk_nvmf_registrant {
152 	TAILQ_ENTRY(spdk_nvmf_registrant) link;
153 	struct spdk_uuid hostid;
154 	/* Registration key */
155 	uint64_t rkey;
156 };
157 
158 struct spdk_nvmf_ns {
159 	uint32_t nsid;
160 	uint32_t anagrpid;
161 	struct spdk_nvmf_subsystem *subsystem;
162 	struct spdk_bdev *bdev;
163 	struct spdk_bdev_desc *desc;
164 	struct spdk_nvmf_ns_opts opts;
165 	/* reservation notification mask */
166 	uint32_t mask;
167 	/* generation code */
168 	uint32_t gen;
169 	/* registrants head */
170 	TAILQ_HEAD(, spdk_nvmf_registrant) registrants;
171 	/* current reservation key */
172 	uint64_t crkey;
173 	/* reservation type */
174 	enum spdk_nvme_reservation_type rtype;
175 	/* current reservation holder, only valid if reservation type can only have one holder */
176 	struct spdk_nvmf_registrant *holder;
177 	/* Persist Through Power Loss file which contains the persistent reservation */
178 	char *ptpl_file;
179 	/* Persist Through Power Loss feature is enabled */
180 	bool ptpl_activated;
181 	/* ZCOPY supported on bdev device */
182 	bool zcopy;
183 	/* Command Set Identifier */
184 	enum spdk_nvme_csi csi;
185 	/* Make namespace visible to controllers of these hosts */
186 	TAILQ_HEAD(, spdk_nvmf_host) hosts;
187 	/* Namespace is always visible to all controllers */
188 	bool always_visible;
189 	/* Namespace id of the underlying device, used for passthrough commands */
190 	uint32_t passthru_nsid;
191 };
192 
193 /*
194  * NVMf reservation notification log page.
195  */
196 struct spdk_nvmf_reservation_log {
197 	struct spdk_nvme_reservation_notification_log	log;
198 	TAILQ_ENTRY(spdk_nvmf_reservation_log)		link;
199 	struct spdk_nvmf_ctrlr				*ctrlr;
200 };
201 
202 /*
203  * NVMf async event completion.
204  */
205 struct spdk_nvmf_async_event_completion {
206 	union spdk_nvme_async_event_completion		event;
207 	STAILQ_ENTRY(spdk_nvmf_async_event_completion)	link;
208 };
209 
210 /*
211  * This structure represents an NVMe-oF controller,
212  * which is like a "session" in networking terms.
213  */
214 struct spdk_nvmf_ctrlr {
215 	uint16_t			cntlid;
216 	char				hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
217 	struct spdk_nvmf_subsystem	*subsys;
218 	struct spdk_bit_array		*visible_ns;
219 
220 	struct spdk_nvmf_ctrlr_data	cdata;
221 
222 	struct spdk_nvmf_registers	vcprop;
223 
224 	struct spdk_nvmf_ctrlr_feat feat;
225 
226 	struct spdk_nvmf_qpair	*admin_qpair;
227 	struct spdk_thread	*thread;
228 	struct spdk_bit_array	*qpair_mask;
229 
230 	const struct spdk_nvmf_subsystem_listener	*listener;
231 
232 	struct spdk_nvmf_request *aer_req[SPDK_NVMF_MAX_ASYNC_EVENTS];
233 	STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events;
234 	uint64_t notice_aen_mask;
235 	uint8_t nr_aer_reqs;
236 	struct spdk_uuid  hostid;
237 
238 	uint32_t association_timeout; /* in milliseconds */
239 	uint16_t changed_ns_list_count;
240 	struct spdk_nvme_ns_list changed_ns_list;
241 	uint64_t log_page_count;
242 	uint8_t num_avail_log_pages;
243 	TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head;
244 
245 	/* Time to trigger keep-alive--poller_time = now_tick + period */
246 	uint64_t			last_keep_alive_tick;
247 	struct spdk_poller		*keep_alive_poller;
248 
249 	struct spdk_poller		*association_timer;
250 
251 	struct spdk_poller		*cc_timer;
252 	uint64_t			cc_timeout_tsc;
253 	struct spdk_poller		*cc_timeout_timer;
254 
255 	bool				dif_insert_or_strip;
256 	bool				in_destruct;
257 	bool				disconnect_in_progress;
258 	/* valid only when disconnect_in_progress is true */
259 	bool				disconnect_is_shn;
260 	bool				acre_enabled;
261 	bool				dynamic_ctrlr;
262 	/* LBA Format Extension Enabled (LBAFEE) */
263 	bool				lbafee_enabled;
264 
265 	TAILQ_ENTRY(spdk_nvmf_ctrlr)	link;
266 };
267 
268 #define NVMF_MAX_LISTENERS_PER_SUBSYSTEM	16
269 
270 struct nvmf_subsystem_state_change_ctx {
271 	struct spdk_nvmf_subsystem			*subsystem;
272 	uint16_t					nsid;
273 
274 	enum spdk_nvmf_subsystem_state			original_state;
275 	enum spdk_nvmf_subsystem_state			requested_state;
276 	int						status;
277 	struct spdk_thread				*thread;
278 
279 	spdk_nvmf_subsystem_state_change_done		cb_fn;
280 	void						*cb_arg;
281 	TAILQ_ENTRY(nvmf_subsystem_state_change_ctx)	link;
282 };
283 
284 struct spdk_nvmf_subsystem {
285 	struct spdk_thread				*thread;
286 
287 	uint32_t					id;
288 
289 	enum spdk_nvmf_subsystem_state			state;
290 	enum spdk_nvmf_subtype				subtype;
291 
292 	uint16_t					next_cntlid;
293 	struct {
294 		uint8_t					allow_any_listener : 1;
295 		uint8_t					ana_reporting : 1;
296 		uint8_t					reserved : 6;
297 	} flags;
298 
299 	/* Protected against concurrent access by ->mutex */
300 	bool						allow_any_host;
301 
302 	bool						destroying;
303 	bool						async_destroy;
304 
305 	/* FDP related fields */
306 	bool						fdp_supported;
307 
308 	/* Zoned storage related fields */
309 	uint64_t					max_zone_append_size_kib;
310 
311 	struct spdk_nvmf_tgt				*tgt;
312 	RB_ENTRY(spdk_nvmf_subsystem)			link;
313 
314 	/* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
315 	struct spdk_nvmf_ns				**ns;
316 	uint32_t					max_nsid;
317 
318 	uint16_t					min_cntlid;
319 	uint16_t					max_cntlid;
320 
321 	uint64_t					max_discard_size_kib;
322 	uint64_t					max_write_zeroes_size_kib;
323 
324 	TAILQ_HEAD(, spdk_nvmf_ctrlr)			ctrlrs;
325 
326 	/* This mutex is used to protect fields that aren't touched on the I/O path (e.g. it's
327 	 * needed for handling things like the CONNECT command) instead of requiring the subsystem
328 	 * to be paused.  It makes it possible to modify those fields (e.g. add/remove hosts)
329 	 * without affecting outstanding I/O requests.
330 	 */
331 	pthread_mutex_t					mutex;
332 	/* Protected against concurrent access by ->mutex */
333 	TAILQ_HEAD(, spdk_nvmf_host)			hosts;
334 	TAILQ_HEAD(, spdk_nvmf_subsystem_listener)	listeners;
335 	struct spdk_bit_array				*used_listener_ids;
336 
337 	TAILQ_ENTRY(spdk_nvmf_subsystem)		entries;
338 
339 	nvmf_subsystem_destroy_cb			async_destroy_cb;
340 	void						*async_destroy_cb_arg;
341 
342 	char						sn[SPDK_NVME_CTRLR_SN_LEN + 1];
343 	char						mn[SPDK_NVME_CTRLR_MN_LEN + 1];
344 	char						subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
345 
346 	/* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1
347 	 * It will be enough for ANA group to use the same size as namespaces.
348 	 */
349 	uint32_t					*ana_group;
350 	/* Queue of a state change requests */
351 	TAILQ_HEAD(, nvmf_subsystem_state_change_ctx)	state_changes;
352 	/* In-band authentication sequence number, protected by ->mutex */
353 	uint32_t					auth_seqnum;
354 	bool						passthrough;
355 };
356 
357 static int
358 subsystem_cmp(struct spdk_nvmf_subsystem *subsystem1, struct spdk_nvmf_subsystem *subsystem2)
359 {
360 	return strncmp(subsystem1->subnqn, subsystem2->subnqn, sizeof(subsystem1->subnqn));
361 }
362 
363 RB_GENERATE_STATIC(subsystem_tree, spdk_nvmf_subsystem, link, subsystem_cmp);
364 
365 int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
366 				     struct spdk_nvmf_subsystem *subsystem);
367 int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
368 				  struct spdk_nvmf_subsystem *subsystem,
369 				  spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
370 void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
371 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
372 void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
373 				     struct spdk_nvmf_subsystem *subsystem,
374 				     uint32_t nsid,
375 				     spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
376 void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
377 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
378 
379 void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
380 				 uint32_t iovcnt, uint64_t offset, uint32_t length,
381 				 struct spdk_nvme_transport_id *cmd_source_trid);
382 
383 void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
384 int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
385 int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
386 bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
387 bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
388 bool nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr);
389 void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
390 bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req);
391 
392 void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
393 				 bool dif_insert_or_strip);
394 void nvmf_bdev_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ns *ns,
395 				       struct spdk_nvme_nvm_ns_data *nsdata_nvm);
396 int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
397 			     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
398 int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
399 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
400 int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
401 				struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
402 int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
403 		struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req);
404 int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
405 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
406 int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
407 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
408 int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
409 			    struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
410 int nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
411 			     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
412 int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
413 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
414 bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev_desc *desc, struct spdk_nvme_cmd *cmd,
415 				 struct spdk_dif_ctx *dif_ctx);
416 bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev);
417 
418 int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
419 			     struct spdk_nvmf_ctrlr *ctrlr);
420 void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
421 				 struct spdk_nvmf_ctrlr *ctrlr);
422 void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
423 		bool stop);
424 struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
425 		uint16_t cntlid);
426 bool nvmf_subsystem_host_auth_required(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn);
427 enum nvmf_auth_key_type {
428 	NVMF_AUTH_KEY_HOST,
429 	NVMF_AUTH_KEY_CTRLR,
430 };
431 struct spdk_key *nvmf_subsystem_get_dhchap_key(struct spdk_nvmf_subsystem *subsys, const char *nqn,
432 		enum nvmf_auth_key_type type);
433 struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener(
434 	struct spdk_nvmf_subsystem *subsystem,
435 	const struct spdk_nvme_transport_id *trid);
436 bool nvmf_subsystem_zone_append_supported(struct spdk_nvmf_subsystem *subsystem);
437 struct spdk_nvmf_listener *nvmf_transport_find_listener(
438 	struct spdk_nvmf_transport *transport,
439 	const struct spdk_nvme_transport_id *trid);
440 void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
441 			      bool named);
442 void nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid,
443 				     struct spdk_json_write_ctx *w);
444 
445 int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
446 int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
447 void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx);
448 void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
449 
450 void nvmf_ns_reservation_request(void *ctx);
451 void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
452 				       struct spdk_nvmf_ns *ns,
453 				       enum spdk_nvme_reservation_notification_log_page_type type);
454 
455 bool nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns);
456 
457 static inline struct spdk_nvmf_host *
458 nvmf_ns_find_host(struct spdk_nvmf_ns *ns, const char *hostnqn)
459 {
460 	struct spdk_nvmf_host *host = NULL;
461 
462 	TAILQ_FOREACH(host, &ns->hosts, link) {
463 		if (strcmp(hostnqn, host->nqn) == 0) {
464 			return host;
465 		}
466 	}
467 
468 	return NULL;
469 }
470 
471 /*
472  * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't
473  * started zcopy_end.  These requests are kept on the outstanding queue, but are not waiting for a
474  * completion from the bdev layer, so, when a qpair is being disconnected, we need to kick them to
475  * force their completion.
476  */
477 void nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair);
478 
479 /*
480  * Free aer simply frees the rdma resources for the aer without informing the host.
481  * This function should be called when deleting a qpair when one wants to make sure
482  * the qpair is completely empty before freeing the request. The reason we free the
483  * AER without sending a completion is to prevent the host from sending another AER.
484  */
485 void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
486 
487 int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req);
488 
489 void nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr);
490 
491 static inline bool
492 nvmf_ctrlr_ns_is_visible(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
493 {
494 	assert(nsid > 0 && nsid <= ctrlr->subsys->max_nsid);
495 	return spdk_bit_array_get(ctrlr->visible_ns, nsid - 1);
496 }
497 
498 static inline void
499 nvmf_ctrlr_ns_set_visible(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid, bool visible)
500 {
501 	assert(nsid > 0 && nsid <= ctrlr->subsys->max_nsid);
502 	if (visible) {
503 		spdk_bit_array_set(ctrlr->visible_ns, nsid - 1);
504 	} else {
505 		spdk_bit_array_clear(ctrlr->visible_ns, nsid - 1);
506 	}
507 }
508 
509 static inline struct spdk_nvmf_ns *
510 _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
511 {
512 	/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
513 	if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
514 		return NULL;
515 	}
516 
517 	return subsystem->ns[nsid - 1];
518 }
519 
520 static inline struct spdk_nvmf_ns *
521 nvmf_ctrlr_get_ns(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
522 {
523 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
524 	struct spdk_nvmf_ns *ns = _nvmf_subsystem_get_ns(subsystem, nsid);
525 
526 	return ns && nvmf_ctrlr_ns_is_visible(ctrlr, nsid) ? ns : NULL;
527 }
528 
529 static inline bool
530 nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
531 {
532 	return qpair->qid == 0;
533 }
534 
535 void nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, enum spdk_nvmf_qpair_state state);
536 
537 int nvmf_qpair_auth_init(struct spdk_nvmf_qpair *qpair);
538 void nvmf_qpair_auth_destroy(struct spdk_nvmf_qpair *qpair);
539 void nvmf_qpair_auth_dump(struct spdk_nvmf_qpair *qpair, struct spdk_json_write_ctx *w);
540 
541 int nvmf_auth_request_exec(struct spdk_nvmf_request *req);
542 bool nvmf_auth_is_supported(void);
543 
544 static inline bool
545 nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req)
546 {
547 	return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC &&
548 	       req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT;
549 }
550 
551 /*
552  * Tests whether a given string represents a valid NQN.
553  */
554 bool nvmf_nqn_is_valid(const char *nqn);
555 
556 /*
557  * Tests whether a given NQN describes a discovery subsystem.
558  */
559 bool nvmf_nqn_is_discovery(const char *nqn);
560 
561 /**
562  * Initiates a zcopy start operation
563  *
564  * \param bdev The \ref spdk_bdev
565  * \param desc The \ref spdk_bdev_desc
566  * \param ch The \ref spdk_io_channel
567  * \param req The \ref spdk_nvmf_request passed to the bdev for processing
568  *
569  * \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or
570  *         SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be
571  *         completed asynchronously.  Asynchronous completions are notified through
572  *         spdk_nvmf_request_complete().
573  */
574 int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
575 				struct spdk_bdev_desc *desc,
576 				struct spdk_io_channel *ch,
577 				struct spdk_nvmf_request *req);
578 
579 /**
580  * Ends a zcopy operation
581  *
582  * \param req The NVMe-oF request
583  * \param commit Flag indicating whether the buffers should be committed
584  */
585 void nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit);
586 
587 /**
588  * Publishes the mDNS PRR (Pull Registration Request) for the NVMe-oF target.
589  *
590  * \param tgt The NVMe-oF target
591  *
592  * \return 0 on success, negative errno on failure
593  */
594 int nvmf_publish_mdns_prr(struct spdk_nvmf_tgt *tgt);
595 
596 /**
597  * Stops the mDNS PRR (Pull Registration Request) for the NVMe-oF target.
598  *
599  * \param tgt The NVMe-oF target
600  */
601 void nvmf_tgt_stop_mdns_prr(struct spdk_nvmf_tgt *tgt);
602 
603 /**
604  * Updates the listener list in the mDNS PRR (Pull Registration Request) for the NVMe-oF target.
605  *
606  * \param tgt The NVMe-oF target
607  *
608  * \return 0 on success, negative errno on failure
609  */
610 int nvmf_tgt_update_mdns_prr(struct spdk_nvmf_tgt *tgt);
611 
612 static inline struct spdk_nvmf_transport_poll_group *
613 nvmf_get_transport_poll_group(struct spdk_nvmf_poll_group *group,
614 			      struct spdk_nvmf_transport *transport)
615 {
616 	struct spdk_nvmf_transport_poll_group *tgroup;
617 
618 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
619 		if (tgroup->transport == transport) {
620 			return tgroup;
621 		}
622 	}
623 
624 	return NULL;
625 }
626 
627 /**
628  * Generates a new NVMF controller id
629  *
630  * \param subsystem The subsystem
631  *
632  * \return unique controller id or 0xFFFF when all controller ids are in use
633  */
634 uint16_t nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem);
635 
636 #endif /* __NVMF_INTERNAL_H__ */
637