xref: /spdk/lib/nvmf/nvmf_internal.h (revision f93b6fb0a4ebcee203e7c44c9e170c20bbce96cc)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef __NVMF_INTERNAL_H__
35 #define __NVMF_INTERNAL_H__
36 
37 #include "spdk/stdinc.h"
38 
39 #include "spdk/likely.h"
40 #include "spdk/nvmf.h"
41 #include "spdk/nvmf_spec.h"
42 #include "spdk/assert.h"
43 #include "spdk/bdev.h"
44 #include "spdk/queue.h"
45 #include "spdk/util.h"
46 #include "spdk/thread.h"
47 
48 #define SPDK_NVMF_MAX_SGL_ENTRIES	16
49 
50 /* AIO backend requires block size aligned data buffers,
51  * extra 4KiB aligned data buffer should work for most devices.
52  */
53 #define SHIFT_4KB			12u
54 #define NVMF_DATA_BUFFER_ALIGNMENT	(1u << SHIFT_4KB)
55 #define NVMF_DATA_BUFFER_MASK		(NVMF_DATA_BUFFER_ALIGNMENT - 1LL)
56 
57 enum spdk_nvmf_subsystem_state {
58 	SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
59 	SPDK_NVMF_SUBSYSTEM_ACTIVATING,
60 	SPDK_NVMF_SUBSYSTEM_ACTIVE,
61 	SPDK_NVMF_SUBSYSTEM_PAUSING,
62 	SPDK_NVMF_SUBSYSTEM_PAUSED,
63 	SPDK_NVMF_SUBSYSTEM_RESUMING,
64 	SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
65 };
66 
67 enum spdk_nvmf_qpair_state {
68 	SPDK_NVMF_QPAIR_UNINITIALIZED = 0,
69 	SPDK_NVMF_QPAIR_ACTIVE,
70 	SPDK_NVMF_QPAIR_DEACTIVATING,
71 	SPDK_NVMF_QPAIR_ERROR,
72 };
73 
74 typedef void (*spdk_nvmf_state_change_done)(void *cb_arg, int status);
75 
76 struct spdk_nvmf_tgt {
77 	uint64_t				discovery_genctr;
78 
79 	uint32_t				max_subsystems;
80 
81 	/* Array of subsystem pointers of size max_subsystems indexed by sid */
82 	struct spdk_nvmf_subsystem		**subsystems;
83 
84 	struct spdk_nvmf_discovery_log_page	*discovery_log_page;
85 	size_t					discovery_log_page_size;
86 	TAILQ_HEAD(, spdk_nvmf_transport)	transports;
87 
88 	spdk_nvmf_tgt_destroy_done_fn		*destroy_cb_fn;
89 	void					*destroy_cb_arg;
90 };
91 
92 struct spdk_nvmf_host {
93 	char				*nqn;
94 	TAILQ_ENTRY(spdk_nvmf_host)	link;
95 };
96 
97 struct spdk_nvmf_listener {
98 	struct spdk_nvme_transport_id	trid;
99 	struct spdk_nvmf_transport	*transport;
100 	TAILQ_ENTRY(spdk_nvmf_listener)	link;
101 };
102 
103 struct spdk_nvmf_transport_pg_cache_buf {
104 	STAILQ_ENTRY(spdk_nvmf_transport_pg_cache_buf) link;
105 };
106 
107 struct spdk_nvmf_transport_poll_group {
108 	struct spdk_nvmf_transport					*transport;
109 	STAILQ_HEAD(, spdk_nvmf_transport_pg_cache_buf)			buf_cache;
110 	uint32_t							buf_cache_count;
111 	uint32_t							buf_cache_size;
112 	TAILQ_ENTRY(spdk_nvmf_transport_poll_group)			link;
113 };
114 
115 /* Maximum number of registrants supported per namespace */
116 #define SPDK_NVMF_MAX_NUM_REGISTRANTS		16
117 
118 struct spdk_nvmf_subsystem_pg_ns_info {
119 	struct spdk_io_channel		*channel;
120 	/* current reservation key, no reservation if the value is 0 */
121 	uint64_t			crkey;
122 	/* reservation type */
123 	enum spdk_nvme_reservation_type	rtype;
124 	/* Host ID which holds the reservation */
125 	struct spdk_uuid		holder_id;
126 	/* Host ID for the registrants with the namespace */
127 	struct spdk_uuid		reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
128 };
129 
130 struct spdk_nvmf_subsystem_poll_group {
131 	/* Array of namespace information for each namespace indexed by nsid - 1 */
132 	struct spdk_nvmf_subsystem_pg_ns_info	*ns_info;
133 	uint32_t				num_ns;
134 
135 	enum spdk_nvmf_subsystem_state		state;
136 
137 	TAILQ_HEAD(, spdk_nvmf_request)		queued;
138 };
139 
140 struct spdk_nvmf_poll_group {
141 	struct spdk_thread				*thread;
142 	struct spdk_poller				*poller;
143 
144 	TAILQ_HEAD(, spdk_nvmf_transport_poll_group)	tgroups;
145 
146 	/* Array of poll groups indexed by subsystem id (sid) */
147 	struct spdk_nvmf_subsystem_poll_group		*sgroups;
148 	uint32_t					num_sgroups;
149 
150 	/* All of the queue pairs that belong to this poll group */
151 	TAILQ_HEAD(, spdk_nvmf_qpair)			qpairs;
152 };
153 
154 typedef enum _spdk_nvmf_request_exec_status {
155 	SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE,
156 	SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS,
157 } spdk_nvmf_request_exec_status;
158 
159 union nvmf_h2c_msg {
160 	struct spdk_nvmf_capsule_cmd			nvmf_cmd;
161 	struct spdk_nvme_cmd				nvme_cmd;
162 	struct spdk_nvmf_fabric_prop_set_cmd		prop_set_cmd;
163 	struct spdk_nvmf_fabric_prop_get_cmd		prop_get_cmd;
164 	struct spdk_nvmf_fabric_connect_cmd		connect_cmd;
165 };
166 SPDK_STATIC_ASSERT(sizeof(union nvmf_h2c_msg) == 64, "Incorrect size");
167 
168 union nvmf_c2h_msg {
169 	struct spdk_nvme_cpl				nvme_cpl;
170 	struct spdk_nvmf_fabric_prop_get_rsp		prop_get_rsp;
171 	struct spdk_nvmf_fabric_connect_rsp		connect_rsp;
172 };
173 SPDK_STATIC_ASSERT(sizeof(union nvmf_c2h_msg) == 16, "Incorrect size");
174 
175 struct spdk_nvmf_request {
176 	struct spdk_nvmf_qpair		*qpair;
177 	uint32_t			length;
178 	enum spdk_nvme_data_transfer	xfer;
179 	void				*data;
180 	union nvmf_h2c_msg		*cmd;
181 	union nvmf_c2h_msg		*rsp;
182 	struct iovec			iov[SPDK_NVMF_MAX_SGL_ENTRIES * 2];
183 	uint32_t			iovcnt;
184 	struct spdk_bdev_io_wait_entry	bdev_io_wait;
185 
186 	TAILQ_ENTRY(spdk_nvmf_request)	link;
187 };
188 
189 struct spdk_nvmf_registrant {
190 	TAILQ_ENTRY(spdk_nvmf_registrant) link;
191 	struct spdk_uuid hostid;
192 	/* Registration key */
193 	uint64_t rkey;
194 };
195 
196 struct spdk_nvmf_ns {
197 	uint32_t nsid;
198 	struct spdk_nvmf_subsystem *subsystem;
199 	struct spdk_bdev *bdev;
200 	struct spdk_bdev_desc *desc;
201 	struct spdk_nvmf_ns_opts opts;
202 	/* reservation notificaton mask */
203 	uint32_t mask;
204 	/* generation code */
205 	uint32_t gen;
206 	/* registrants head */
207 	TAILQ_HEAD(, spdk_nvmf_registrant) registrants;
208 	/* current reservation key */
209 	uint64_t crkey;
210 	/* reservation type */
211 	enum spdk_nvme_reservation_type rtype;
212 	/* current reservation holder, only valid if reservation type can only have one holder */
213 	struct spdk_nvmf_registrant *holder;
214 };
215 
216 struct spdk_nvmf_qpair {
217 	enum spdk_nvmf_qpair_state		state;
218 	spdk_nvmf_state_change_done		state_cb;
219 	void					*state_cb_arg;
220 
221 	struct spdk_nvmf_transport		*transport;
222 	struct spdk_nvmf_ctrlr			*ctrlr;
223 	struct spdk_nvmf_poll_group		*group;
224 
225 	uint16_t				qid;
226 	uint16_t				sq_head;
227 	uint16_t				sq_head_max;
228 
229 	TAILQ_HEAD(, spdk_nvmf_request)		outstanding;
230 	TAILQ_ENTRY(spdk_nvmf_qpair)		link;
231 };
232 
233 struct spdk_nvmf_ctrlr_feat {
234 	union spdk_nvme_feat_arbitration arbitration;
235 	union spdk_nvme_feat_power_management power_management;
236 	union spdk_nvme_feat_error_recovery error_recovery;
237 	union spdk_nvme_feat_volatile_write_cache volatile_write_cache;
238 	union spdk_nvme_feat_number_of_queues number_of_queues;
239 	union spdk_nvme_feat_write_atomicity write_atomicity;
240 	union spdk_nvme_feat_async_event_configuration async_event_configuration;
241 	union spdk_nvme_feat_keep_alive_timer keep_alive_timer;
242 };
243 
244 /*
245  * NVMf reservation notificaton log page.
246  */
247 struct spdk_nvmf_reservation_log {
248 	struct spdk_nvme_reservation_notification_log	log;
249 	TAILQ_ENTRY(spdk_nvmf_reservation_log)		link;
250 	struct spdk_nvmf_ctrlr				*ctrlr;
251 };
252 
253 /*
254  * This structure represents an NVMe-oF controller,
255  * which is like a "session" in networking terms.
256  */
257 struct spdk_nvmf_ctrlr {
258 	uint16_t			cntlid;
259 	struct spdk_nvmf_subsystem	*subsys;
260 
261 	struct {
262 		union spdk_nvme_cap_register	cap;
263 		union spdk_nvme_vs_register	vs;
264 		union spdk_nvme_cc_register	cc;
265 		union spdk_nvme_csts_register	csts;
266 	} vcprop; /* virtual controller properties */
267 
268 	struct spdk_nvmf_ctrlr_feat feat;
269 
270 	struct spdk_nvmf_qpair	*admin_qpair;
271 	struct spdk_thread	*thread;
272 	struct spdk_bit_array	*qpair_mask;
273 
274 	struct spdk_nvmf_request *aer_req;
275 	union spdk_nvme_async_event_completion notice_event;
276 	union spdk_nvme_async_event_completion reservation_event;
277 	struct spdk_uuid  hostid;
278 
279 	uint16_t changed_ns_list_count;
280 	struct spdk_nvme_ns_list changed_ns_list;
281 	uint64_t log_page_count;
282 	uint8_t num_avail_log_pages;
283 	TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head;
284 
285 	/* Time to trigger keep-alive--poller_time = now_tick + period */
286 	uint64_t last_keep_alive_tick;
287 	struct spdk_poller			*keep_alive_poller;
288 
289 	TAILQ_ENTRY(spdk_nvmf_ctrlr)		link;
290 };
291 
292 struct spdk_nvmf_subsystem {
293 	struct spdk_thread		*thread;
294 	uint32_t			id;
295 	enum spdk_nvmf_subsystem_state	state;
296 
297 	char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
298 	enum spdk_nvmf_subtype subtype;
299 	uint16_t next_cntlid;
300 	bool allow_any_host;
301 
302 	struct spdk_nvmf_tgt			*tgt;
303 
304 	char sn[SPDK_NVME_CTRLR_SN_LEN + 1];
305 	char mn[SPDK_NVME_CTRLR_MN_LEN + 1];
306 
307 	/* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
308 	struct spdk_nvmf_ns			**ns;
309 	uint32_t				max_nsid;
310 	/* This is the maximum allowed nsid to a subsystem */
311 	uint32_t				max_allowed_nsid;
312 
313 	TAILQ_HEAD(, spdk_nvmf_ctrlr)		ctrlrs;
314 
315 	TAILQ_HEAD(, spdk_nvmf_host)		hosts;
316 
317 	TAILQ_HEAD(, spdk_nvmf_listener)	listeners;
318 
319 	TAILQ_ENTRY(spdk_nvmf_subsystem)	entries;
320 };
321 
322 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
323 
324 struct spdk_nvmf_transport *spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt,
325 		enum spdk_nvme_transport_type);
326 
327 int spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
328 				       struct spdk_nvmf_transport *transport);
329 int spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
330 		struct spdk_nvmf_subsystem *subsystem);
331 int spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
332 				       struct spdk_nvmf_subsystem *subsystem,
333 				       spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
334 void spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
335 		struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
336 void spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
337 		struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
338 void spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
339 		struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
340 void spdk_nvmf_request_exec(struct spdk_nvmf_request *req);
341 int spdk_nvmf_request_free(struct spdk_nvmf_request *req);
342 int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);
343 
344 void spdk_nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, struct iovec *iov,
345 				      uint32_t iovcnt, uint64_t offset, uint32_t length);
346 
347 void spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
348 int spdk_nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req);
349 int spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
350 int spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
351 bool spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
352 bool spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
353 void spdk_nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
354 
355 void spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata);
356 int spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
357 				  struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
358 int spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
359 				   struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
360 int spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
361 		struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
362 int spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
363 				   struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
364 int spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
365 				 struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
366 int spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
367 		struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
368 
369 int spdk_nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
370 				  struct spdk_nvmf_ctrlr *ctrlr);
371 void spdk_nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
372 				      struct spdk_nvmf_ctrlr *ctrlr);
373 struct spdk_nvmf_ctrlr *spdk_nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
374 		uint16_t cntlid);
375 int spdk_nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
376 void spdk_nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
377 void spdk_nvmf_ns_reservation_request(void *ctx);
378 void spdk_nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
379 		struct spdk_nvmf_ns *ns,
380 		enum spdk_nvme_reservation_notification_log_page_type type);
381 
382 /*
383  * Abort aer is sent on a per controller basis and sends a completion for the aer to the host.
384  * This function should be called when attempting to recover in error paths when it is OK for
385  * the host to send a subsequent AER.
386  */
387 void spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
388 
389 /*
390  * Free aer simply frees the rdma resources for the aer without informing the host.
391  * This function should be called when deleting a qpair when one wants to make sure
392  * the qpair is completely empty before freeing the request. The reason we free the
393  * AER without sending a completion is to prevent the host from sending another AER.
394  */
395 void spdk_nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
396 
397 static inline struct spdk_nvmf_ns *
398 _spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
399 {
400 	/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
401 	if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
402 		return NULL;
403 	}
404 
405 	return subsystem->ns[nsid - 1];
406 }
407 
408 static inline bool
409 spdk_nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
410 {
411 	return qpair->qid == 0;
412 }
413 
414 #endif /* __NVMF_INTERNAL_H__ */
415