xref: /spdk/lib/nvmf/nvmf_internal.h (revision c39647df83e4be9bcc49025132c48bf2414ef8b1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef __NVMF_INTERNAL_H__
36 #define __NVMF_INTERNAL_H__
37 
38 #include "spdk/stdinc.h"
39 
40 #include "spdk/likely.h"
41 #include "spdk/nvmf.h"
42 #include "spdk/nvmf_cmd.h"
43 #include "spdk/nvmf_transport.h"
44 #include "spdk/nvmf_spec.h"
45 #include "spdk/assert.h"
46 #include "spdk/bdev.h"
47 #include "spdk/queue.h"
48 #include "spdk/util.h"
49 #include "spdk/thread.h"
50 
51 #define NVMF_MAX_ASYNC_EVENTS	(4)
52 
53 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */
54 #define NVMF_MIN_CNTLID 1
55 #define NVMF_MAX_CNTLID 0xFFEF
56 
57 enum spdk_nvmf_subsystem_state {
58 	SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
59 	SPDK_NVMF_SUBSYSTEM_ACTIVATING,
60 	SPDK_NVMF_SUBSYSTEM_ACTIVE,
61 	SPDK_NVMF_SUBSYSTEM_PAUSING,
62 	SPDK_NVMF_SUBSYSTEM_PAUSED,
63 	SPDK_NVMF_SUBSYSTEM_RESUMING,
64 	SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
65 	SPDK_NVMF_SUBSYSTEM_NUM_STATES,
66 };
67 
68 struct spdk_nvmf_tgt {
69 	char					name[NVMF_TGT_NAME_MAX_LENGTH];
70 
71 	pthread_mutex_t				mutex;
72 
73 	uint64_t				discovery_genctr;
74 
75 	uint32_t				max_subsystems;
76 
77 	enum spdk_nvmf_tgt_discovery_filter	discovery_filter;
78 
79 	/* Array of subsystem pointers of size max_subsystems indexed by sid */
80 	struct spdk_nvmf_subsystem		**subsystems;
81 
82 	TAILQ_HEAD(, spdk_nvmf_transport)	transports;
83 	TAILQ_HEAD(, spdk_nvmf_poll_group)	poll_groups;
84 
85 	/* Used for round-robin assignment of connections to poll groups */
86 	struct spdk_nvmf_poll_group		*next_poll_group;
87 
88 	spdk_nvmf_tgt_destroy_done_fn		*destroy_cb_fn;
89 	void					*destroy_cb_arg;
90 
91 	uint16_t				crdt[3];
92 
93 	TAILQ_ENTRY(spdk_nvmf_tgt)		link;
94 };
95 
96 struct spdk_nvmf_host {
97 	char				nqn[SPDK_NVMF_NQN_MAX_LEN + 1];
98 	TAILQ_ENTRY(spdk_nvmf_host)	link;
99 };
100 
101 struct spdk_nvmf_subsystem_listener {
102 	struct spdk_nvmf_subsystem			*subsystem;
103 	spdk_nvmf_tgt_subsystem_listen_done_fn		cb_fn;
104 	void						*cb_arg;
105 	struct spdk_nvme_transport_id			*trid;
106 	struct spdk_nvmf_transport			*transport;
107 	enum spdk_nvme_ana_state			*ana_state;
108 	uint64_t					ana_state_change_count;
109 	uint16_t					id;
110 	TAILQ_ENTRY(spdk_nvmf_subsystem_listener)	link;
111 };
112 
113 /* Maximum number of registrants supported per namespace */
114 #define SPDK_NVMF_MAX_NUM_REGISTRANTS		16
115 
116 struct spdk_nvmf_registrant_info {
117 	uint64_t		rkey;
118 	char			host_uuid[SPDK_UUID_STRING_LEN];
119 };
120 
121 struct spdk_nvmf_reservation_info {
122 	bool					ptpl_activated;
123 	enum spdk_nvme_reservation_type		rtype;
124 	uint64_t				crkey;
125 	char					bdev_uuid[SPDK_UUID_STRING_LEN];
126 	char					holder_uuid[SPDK_UUID_STRING_LEN];
127 	uint32_t				num_regs;
128 	struct spdk_nvmf_registrant_info	registrants[SPDK_NVMF_MAX_NUM_REGISTRANTS];
129 };
130 
131 struct spdk_nvmf_subsystem_pg_ns_info {
132 	struct spdk_io_channel		*channel;
133 	struct spdk_uuid		uuid;
134 	/* current reservation key, no reservation if the value is 0 */
135 	uint64_t			crkey;
136 	/* reservation type */
137 	enum spdk_nvme_reservation_type	rtype;
138 	/* Host ID which holds the reservation */
139 	struct spdk_uuid		holder_id;
140 	/* Host ID for the registrants with the namespace */
141 	struct spdk_uuid		reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
142 	uint64_t			num_blocks;
143 
144 	/* I/O outstanding to this namespace */
145 	uint64_t			io_outstanding;
146 	enum spdk_nvmf_subsystem_state	state;
147 };
148 
149 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
150 
151 struct spdk_nvmf_subsystem_poll_group {
152 	/* Array of namespace information for each namespace indexed by nsid - 1 */
153 	struct spdk_nvmf_subsystem_pg_ns_info	*ns_info;
154 	uint32_t				num_ns;
155 
156 	/* Number of ADMIN and FABRICS requests outstanding */
157 	uint64_t				mgmt_io_outstanding;
158 	spdk_nvmf_poll_group_mod_done		cb_fn;
159 	void					*cb_arg;
160 
161 	enum spdk_nvmf_subsystem_state		state;
162 
163 	TAILQ_HEAD(, spdk_nvmf_request)		queued;
164 };
165 
166 struct spdk_nvmf_registrant {
167 	TAILQ_ENTRY(spdk_nvmf_registrant) link;
168 	struct spdk_uuid hostid;
169 	/* Registration key */
170 	uint64_t rkey;
171 };
172 
173 struct spdk_nvmf_ns {
174 	uint32_t nsid;
175 	uint32_t anagrpid;
176 	struct spdk_nvmf_subsystem *subsystem;
177 	struct spdk_bdev *bdev;
178 	struct spdk_bdev_desc *desc;
179 	struct spdk_nvmf_ns_opts opts;
180 	/* reservation notification mask */
181 	uint32_t mask;
182 	/* generation code */
183 	uint32_t gen;
184 	/* registrants head */
185 	TAILQ_HEAD(, spdk_nvmf_registrant) registrants;
186 	/* current reservation key */
187 	uint64_t crkey;
188 	/* reservation type */
189 	enum spdk_nvme_reservation_type rtype;
190 	/* current reservation holder, only valid if reservation type can only have one holder */
191 	struct spdk_nvmf_registrant *holder;
192 	/* Persist Through Power Loss file which contains the persistent reservation */
193 	char *ptpl_file;
194 	/* Persist Through Power Loss feature is enabled */
195 	bool ptpl_activated;
196 	/* ZCOPY supported on bdev device */
197 	bool zcopy;
198 };
199 
200 struct spdk_nvmf_ctrlr_feat {
201 	union spdk_nvme_feat_arbitration arbitration;
202 	union spdk_nvme_feat_power_management power_management;
203 	union spdk_nvme_feat_error_recovery error_recovery;
204 	union spdk_nvme_feat_volatile_write_cache volatile_write_cache;
205 	union spdk_nvme_feat_number_of_queues number_of_queues;
206 	union spdk_nvme_feat_interrupt_coalescing interrupt_coalescing;
207 	union spdk_nvme_feat_interrupt_vector_configuration interrupt_vector_configuration;
208 	union spdk_nvme_feat_write_atomicity write_atomicity;
209 	union spdk_nvme_feat_async_event_configuration async_event_configuration;
210 	union spdk_nvme_feat_keep_alive_timer keep_alive_timer;
211 };
212 
213 /*
214  * NVMf reservation notification log page.
215  */
216 struct spdk_nvmf_reservation_log {
217 	struct spdk_nvme_reservation_notification_log	log;
218 	TAILQ_ENTRY(spdk_nvmf_reservation_log)		link;
219 	struct spdk_nvmf_ctrlr				*ctrlr;
220 };
221 
222 /*
223  * NVMf async event completion.
224  */
225 struct spdk_nvmf_async_event_completion {
226 	union spdk_nvme_async_event_completion		event;
227 	STAILQ_ENTRY(spdk_nvmf_async_event_completion)	link;
228 };
229 
230 /*
231  * This structure represents an NVMe-oF controller,
232  * which is like a "session" in networking terms.
233  */
234 struct spdk_nvmf_ctrlr {
235 	uint16_t			cntlid;
236 	char				hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
237 	struct spdk_nvmf_subsystem	*subsys;
238 
239 	struct spdk_nvmf_ctrlr_data	cdata;
240 
241 	struct spdk_nvmf_registers	vcprop;
242 
243 	struct spdk_nvmf_ctrlr_feat feat;
244 
245 	struct spdk_nvmf_qpair	*admin_qpair;
246 	struct spdk_thread	*thread;
247 	struct spdk_bit_array	*qpair_mask;
248 
249 	const struct spdk_nvmf_subsystem_listener	*listener;
250 
251 	struct spdk_nvmf_request *aer_req[NVMF_MAX_ASYNC_EVENTS];
252 	STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events;
253 	uint64_t notice_aen_mask;
254 	uint8_t nr_aer_reqs;
255 	struct spdk_uuid  hostid;
256 
257 	uint32_t association_timeout; /* in milliseconds */
258 	uint16_t changed_ns_list_count;
259 	struct spdk_nvme_ns_list changed_ns_list;
260 	uint64_t log_page_count;
261 	uint8_t num_avail_log_pages;
262 	TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head;
263 
264 	/* Time to trigger keep-alive--poller_time = now_tick + period */
265 	uint64_t			last_keep_alive_tick;
266 	struct spdk_poller		*keep_alive_poller;
267 
268 	struct spdk_poller		*association_timer;
269 
270 	struct spdk_poller		*cc_timer;
271 	uint64_t			cc_timeout_tsc;
272 	struct spdk_poller		*cc_timeout_timer;
273 
274 	bool				dif_insert_or_strip;
275 	bool				in_destruct;
276 	bool				disconnect_in_progress;
277 	/* valid only when disconnect_in_progress is true */
278 	bool				disconnect_is_shn;
279 	bool				acre_enabled;
280 	bool				dynamic_ctrlr;
281 
282 	TAILQ_ENTRY(spdk_nvmf_ctrlr)	link;
283 };
284 
285 /* Maximum pending AERs that can be migrated */
286 #define NVMF_MIGR_MAX_PENDING_AERS 256
287 
288 /* spdk_nvmf_ctrlr private migration data structure used to save/restore a controller */
289 struct nvmf_ctrlr_migr_data {
290 	uint32_t				opts_size;
291 
292 	uint16_t				cntlid;
293 	uint8_t					reserved1[2];
294 
295 	struct spdk_nvmf_ctrlr_feat		feat;
296 	uint32_t				reserved2[2];
297 
298 	uint32_t				num_async_events;
299 	uint32_t				acre_enabled;
300 	uint64_t				notice_aen_mask;
301 	union spdk_nvme_async_event_completion	async_events[NVMF_MIGR_MAX_PENDING_AERS];
302 
303 	/* New fields shouldn't go after reserved3 */
304 	uint8_t					reserved3[3000];
305 };
306 SPDK_STATIC_ASSERT(sizeof(struct nvmf_ctrlr_migr_data) == 0x1000, "Incorrect size");
307 
308 #define NVMF_MAX_LISTENERS_PER_SUBSYSTEM	16
309 
310 struct spdk_nvmf_subsystem {
311 	struct spdk_thread				*thread;
312 
313 	uint32_t					id;
314 
315 	enum spdk_nvmf_subsystem_state			state;
316 	enum spdk_nvmf_subtype				subtype;
317 
318 	uint16_t					next_cntlid;
319 	struct {
320 		uint8_t					allow_any_host : 1;
321 		uint8_t					allow_any_listener : 1;
322 		uint8_t					ana_reporting : 1;
323 		uint8_t					reserved : 5;
324 	} flags;
325 
326 	/* boolean for state change synchronization */
327 	bool						changing_state;
328 
329 	bool						destroying;
330 	bool						async_destroy;
331 
332 	struct spdk_nvmf_tgt				*tgt;
333 
334 	/* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
335 	struct spdk_nvmf_ns				**ns;
336 	uint32_t					max_nsid;
337 
338 	uint16_t					min_cntlid;
339 	uint16_t					max_cntlid;
340 
341 	TAILQ_HEAD(, spdk_nvmf_ctrlr)			ctrlrs;
342 
343 	/* A mutex used to protect the hosts list and allow_any_host flag. Unlike the namespace
344 	 * array, this list is not used on the I/O path (it's needed for handling things like
345 	 * the CONNECT command), so use a mutex to protect it instead of requiring the subsystem
346 	 * state to be paused. This removes the requirement to pause the subsystem when hosts
347 	 * are added or removed dynamically. */
348 	pthread_mutex_t					mutex;
349 	TAILQ_HEAD(, spdk_nvmf_host)			hosts;
350 	TAILQ_HEAD(, spdk_nvmf_subsystem_listener)	listeners;
351 	struct spdk_bit_array				*used_listener_ids;
352 
353 	TAILQ_ENTRY(spdk_nvmf_subsystem)		entries;
354 
355 	nvmf_subsystem_destroy_cb			async_destroy_cb;
356 	void						*async_destroy_cb_arg;
357 
358 	char						sn[SPDK_NVME_CTRLR_SN_LEN + 1];
359 	char						mn[SPDK_NVME_CTRLR_MN_LEN + 1];
360 	char						subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
361 
362 	/* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1
363 	 * It will be enough for ANA group to use the same size as namespaces.
364 	 */
365 	uint32_t					*ana_group;
366 };
367 
368 int nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
369 				  struct spdk_nvmf_transport *transport);
370 int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
371 				     struct spdk_nvmf_subsystem *subsystem);
372 int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
373 				  struct spdk_nvmf_subsystem *subsystem,
374 				  spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
375 void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
376 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
377 void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
378 				     struct spdk_nvmf_subsystem *subsystem,
379 				     uint32_t nsid,
380 				     spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
381 void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
382 				      struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
383 
384 void nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt, const char *hostnqn);
385 void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
386 				 uint32_t iovcnt, uint64_t offset, uint32_t length,
387 				 struct spdk_nvme_transport_id *cmd_source_trid);
388 
389 void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
390 int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
391 int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
392 bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
393 bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
394 void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
395 bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req);
396 
397 void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
398 				 bool dif_insert_or_strip);
399 int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
400 			     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
401 int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
402 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
403 int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
404 				struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
405 int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
406 		struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req);
407 int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
408 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
409 int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
410 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
411 int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
412 			    struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
413 int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
414 				     struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
415 bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
416 				 struct spdk_dif_ctx *dif_ctx);
417 bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev);
418 
419 int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
420 			     struct spdk_nvmf_ctrlr *ctrlr);
421 void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
422 				 struct spdk_nvmf_ctrlr *ctrlr);
423 void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
424 		bool stop);
425 struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
426 		uint16_t cntlid);
427 struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener(
428 	struct spdk_nvmf_subsystem *subsystem,
429 	const struct spdk_nvme_transport_id *trid);
430 struct spdk_nvmf_listener *nvmf_transport_find_listener(
431 	struct spdk_nvmf_transport *transport,
432 	const struct spdk_nvme_transport_id *trid);
433 void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
434 			      bool named);
435 void nvmf_transport_listen_dump_opts(struct spdk_nvmf_transport *transport,
436 				     const struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w);
437 void nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem,
438 				  const struct spdk_nvme_transport_id *trid,
439 				  enum spdk_nvme_ana_state ana_state, uint32_t anagrpid,
440 				  spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg);
441 bool nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem);
442 
443 /**
444  * Sets the controller ID range for a subsystem.
445  * Valid range is [1, 0xFFEF].
446  *
447  * May only be performed on subsystems in the INACTIVE state.
448  *
449  * \param subsystem Subsystem to modify.
450  * \param min_cntlid Minimum controller ID.
451  * \param max_cntlid Maximum controller ID.
452  *
453  * \return 0 on success, or negated errno value on failure.
454  */
455 int nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem,
456 				    uint16_t min_cntlid, uint16_t max_cntlid);
457 
458 int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
459 int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
460 void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx);
461 void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
462 int nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
463 				       union spdk_nvme_async_event_completion event);
464 void nvmf_ns_reservation_request(void *ctx);
465 void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
466 				       struct spdk_nvmf_ns *ns,
467 				       enum spdk_nvme_reservation_notification_log_page_type type);
468 
469 /*
470  * Abort aer is sent on a per controller basis and sends a completion for the aer to the host.
471  * This function should be called when attempting to recover in error paths when it is OK for
472  * the host to send a subsequent AER.
473  */
474 void nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
475 int nvmf_ctrlr_save_aers(struct spdk_nvmf_ctrlr *ctrlr, uint16_t *aer_cids,
476 			 uint16_t max_aers);
477 
478 int nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr, struct nvmf_ctrlr_migr_data *data);
479 int nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr, struct nvmf_ctrlr_migr_data *data);
480 
481 /*
482  * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't
483  * started zcopy_end.  These requests are kept on the outstanding queue, but are not waiting for a
484  * completion from the bdev layer, so, when a qpair is being disconnected, we need to kick them to
485  * force their completion.
486  */
487 void nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair);
488 
489 /*
490  * Free aer simply frees the rdma resources for the aer without informing the host.
491  * This function should be called when deleting a qpair when one wants to make sure
492  * the qpair is completely empty before freeing the request. The reason we free the
493  * AER without sending a completion is to prevent the host from sending another AER.
494  */
495 void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
496 
497 int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req);
498 
499 static inline struct spdk_nvmf_ns *
500 _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
501 {
502 	/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
503 	if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
504 		return NULL;
505 	}
506 
507 	return subsystem->ns[nsid - 1];
508 }
509 
510 static inline bool
511 nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
512 {
513 	return qpair->qid == 0;
514 }
515 
516 /**
517  * Initiates a zcopy start operation
518  *
519  * \param bdev The \ref spdk_bdev
520  * \param desc The \ref spdk_bdev_desc
521  * \param ch The \ref spdk_io_channel
522  * \param req The \ref spdk_nvmf_request passed to the bdev for processing
523  *
524  * \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or
525  *         SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be
526  *         completed asynchronously.  Asynchronous completions are notified through
527  *         spdk_nvmf_request_complete().
528  */
529 int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
530 				struct spdk_bdev_desc *desc,
531 				struct spdk_io_channel *ch,
532 				struct spdk_nvmf_request *req);
533 
534 /**
535  * Ends a zcopy operation
536  *
537  * \param req The NVMe-oF request
538  * \param commit Flag indicating whether the buffers should be committed
539  */
540 void nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit);
541 
542 #endif /* __NVMF_INTERNAL_H__ */
543