1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef __NVMF_INTERNAL_H__ 35 #define __NVMF_INTERNAL_H__ 36 37 #include "spdk/stdinc.h" 38 39 #include "spdk/likely.h" 40 #include "spdk/nvmf.h" 41 #include "spdk/nvmf_spec.h" 42 #include "spdk/assert.h" 43 #include "spdk/bdev.h" 44 #include "spdk/queue.h" 45 #include "spdk/util.h" 46 #include "spdk/thread.h" 47 48 #define SPDK_NVMF_MAX_SGL_ENTRIES 16 49 50 /* The maximum number of buffers per request */ 51 #define NVMF_REQ_MAX_BUFFERS (SPDK_NVMF_MAX_SGL_ENTRIES * 2) 52 53 /* AIO backend requires block size aligned data buffers, 54 * extra 4KiB aligned data buffer should work for most devices. 55 */ 56 #define SHIFT_4KB 12u 57 #define NVMF_DATA_BUFFER_ALIGNMENT (1u << SHIFT_4KB) 58 #define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1LL) 59 60 enum spdk_nvmf_subsystem_state { 61 SPDK_NVMF_SUBSYSTEM_INACTIVE = 0, 62 SPDK_NVMF_SUBSYSTEM_ACTIVATING, 63 SPDK_NVMF_SUBSYSTEM_ACTIVE, 64 SPDK_NVMF_SUBSYSTEM_PAUSING, 65 SPDK_NVMF_SUBSYSTEM_PAUSED, 66 SPDK_NVMF_SUBSYSTEM_RESUMING, 67 SPDK_NVMF_SUBSYSTEM_DEACTIVATING, 68 }; 69 70 enum spdk_nvmf_qpair_state { 71 SPDK_NVMF_QPAIR_UNINITIALIZED = 0, 72 SPDK_NVMF_QPAIR_ACTIVE, 73 SPDK_NVMF_QPAIR_DEACTIVATING, 74 SPDK_NVMF_QPAIR_ERROR, 75 }; 76 77 typedef void (*spdk_nvmf_state_change_done)(void *cb_arg, int status); 78 79 struct spdk_nvmf_tgt { 80 char name[NVMF_TGT_NAME_MAX_LENGTH]; 81 82 uint64_t discovery_genctr; 83 84 uint32_t max_subsystems; 85 86 /* Array of subsystem pointers of size max_subsystems indexed by sid */ 87 struct spdk_nvmf_subsystem **subsystems; 88 89 TAILQ_HEAD(, spdk_nvmf_transport) transports; 90 91 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn; 92 void *destroy_cb_arg; 93 94 TAILQ_ENTRY(spdk_nvmf_tgt) link; 95 }; 96 97 struct spdk_nvmf_host { 98 char nqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 99 TAILQ_ENTRY(spdk_nvmf_host) link; 100 }; 101 102 struct spdk_nvmf_listener { 103 struct spdk_nvme_transport_id trid; 104 struct spdk_nvmf_transport *transport; 105 TAILQ_ENTRY(spdk_nvmf_listener) link; 106 }; 107 108 struct spdk_nvmf_transport_pg_cache_buf { 109 STAILQ_ENTRY(spdk_nvmf_transport_pg_cache_buf) link; 110 }; 111 112 struct spdk_nvmf_transport_poll_group { 113 struct spdk_nvmf_transport *transport; 114 /* Requests that are waiting to obtain a data buffer */ 115 STAILQ_HEAD(, spdk_nvmf_request) pending_buf_queue; 116 STAILQ_HEAD(, spdk_nvmf_transport_pg_cache_buf) buf_cache; 117 uint32_t buf_cache_count; 118 uint32_t buf_cache_size; 119 struct spdk_nvmf_poll_group *group; 120 TAILQ_ENTRY(spdk_nvmf_transport_poll_group) link; 121 }; 122 123 /* Maximum number of registrants supported per namespace */ 124 #define SPDK_NVMF_MAX_NUM_REGISTRANTS 16 125 126 struct spdk_nvmf_registrant_info { 127 uint64_t rkey; 128 char host_uuid[SPDK_UUID_STRING_LEN]; 129 }; 130 131 struct spdk_nvmf_reservation_info { 132 bool ptpl_activated; 133 enum spdk_nvme_reservation_type rtype; 134 uint64_t crkey; 135 char bdev_uuid[SPDK_UUID_STRING_LEN]; 136 char holder_uuid[SPDK_UUID_STRING_LEN]; 137 uint32_t num_regs; 138 struct spdk_nvmf_registrant_info registrants[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 139 }; 140 141 struct spdk_nvmf_subsystem_pg_ns_info { 142 struct spdk_io_channel *channel; 143 struct spdk_uuid uuid; 144 /* current reservation key, no reservation if the value is 0 */ 145 uint64_t crkey; 146 /* reservation type */ 147 enum spdk_nvme_reservation_type rtype; 148 /* Host ID which holds the reservation */ 149 struct spdk_uuid holder_id; 150 /* Host ID for the registrants with the namespace */ 151 struct spdk_uuid reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 152 }; 153 154 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status); 155 156 struct spdk_nvmf_subsystem_poll_group { 157 /* Array of namespace information for each namespace indexed by nsid - 1 */ 158 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 159 uint32_t num_ns; 160 161 uint64_t io_outstanding; 162 spdk_nvmf_poll_group_mod_done cb_fn; 163 void *cb_arg; 164 165 enum spdk_nvmf_subsystem_state state; 166 167 TAILQ_HEAD(, spdk_nvmf_request) queued; 168 }; 169 170 struct spdk_nvmf_poll_group { 171 struct spdk_thread *thread; 172 struct spdk_poller *poller; 173 174 TAILQ_HEAD(, spdk_nvmf_transport_poll_group) tgroups; 175 176 /* Array of poll groups indexed by subsystem id (sid) */ 177 struct spdk_nvmf_subsystem_poll_group *sgroups; 178 uint32_t num_sgroups; 179 180 /* All of the queue pairs that belong to this poll group */ 181 TAILQ_HEAD(, spdk_nvmf_qpair) qpairs; 182 183 /* Statistics */ 184 struct spdk_nvmf_poll_group_stat stat; 185 }; 186 187 typedef enum _spdk_nvmf_request_exec_status { 188 SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE, 189 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS, 190 } spdk_nvmf_request_exec_status; 191 192 union nvmf_h2c_msg { 193 struct spdk_nvmf_capsule_cmd nvmf_cmd; 194 struct spdk_nvme_cmd nvme_cmd; 195 struct spdk_nvmf_fabric_prop_set_cmd prop_set_cmd; 196 struct spdk_nvmf_fabric_prop_get_cmd prop_get_cmd; 197 struct spdk_nvmf_fabric_connect_cmd connect_cmd; 198 }; 199 SPDK_STATIC_ASSERT(sizeof(union nvmf_h2c_msg) == 64, "Incorrect size"); 200 201 union nvmf_c2h_msg { 202 struct spdk_nvme_cpl nvme_cpl; 203 struct spdk_nvmf_fabric_prop_get_rsp prop_get_rsp; 204 struct spdk_nvmf_fabric_connect_rsp connect_rsp; 205 }; 206 SPDK_STATIC_ASSERT(sizeof(union nvmf_c2h_msg) == 16, "Incorrect size"); 207 208 struct spdk_nvmf_request { 209 struct spdk_nvmf_qpair *qpair; 210 uint32_t length; 211 enum spdk_nvme_data_transfer xfer; 212 void *data; 213 union nvmf_h2c_msg *cmd; 214 union nvmf_c2h_msg *rsp; 215 void *buffers[NVMF_REQ_MAX_BUFFERS]; 216 struct iovec iov[NVMF_REQ_MAX_BUFFERS]; 217 uint32_t iovcnt; 218 bool data_from_pool; 219 struct spdk_bdev_io_wait_entry bdev_io_wait; 220 221 STAILQ_ENTRY(spdk_nvmf_request) buf_link; 222 TAILQ_ENTRY(spdk_nvmf_request) link; 223 }; 224 225 struct spdk_nvmf_registrant { 226 TAILQ_ENTRY(spdk_nvmf_registrant) link; 227 struct spdk_uuid hostid; 228 /* Registration key */ 229 uint64_t rkey; 230 }; 231 232 struct spdk_nvmf_ns { 233 uint32_t nsid; 234 struct spdk_nvmf_subsystem *subsystem; 235 struct spdk_bdev *bdev; 236 struct spdk_bdev_desc *desc; 237 struct spdk_nvmf_ns_opts opts; 238 /* reservation notificaton mask */ 239 uint32_t mask; 240 /* generation code */ 241 uint32_t gen; 242 /* registrants head */ 243 TAILQ_HEAD(, spdk_nvmf_registrant) registrants; 244 /* current reservation key */ 245 uint64_t crkey; 246 /* reservation type */ 247 enum spdk_nvme_reservation_type rtype; 248 /* current reservation holder, only valid if reservation type can only have one holder */ 249 struct spdk_nvmf_registrant *holder; 250 /* Persist Through Power Loss file which contains the persistent reservation */ 251 char *ptpl_file; 252 /* Persist Through Power Loss feature is enabled */ 253 bool ptpl_activated; 254 }; 255 256 struct spdk_nvmf_qpair { 257 enum spdk_nvmf_qpair_state state; 258 spdk_nvmf_state_change_done state_cb; 259 void *state_cb_arg; 260 261 struct spdk_nvmf_transport *transport; 262 struct spdk_nvmf_ctrlr *ctrlr; 263 struct spdk_nvmf_poll_group *group; 264 265 uint16_t qid; 266 uint16_t sq_head; 267 uint16_t sq_head_max; 268 269 TAILQ_HEAD(, spdk_nvmf_request) outstanding; 270 TAILQ_ENTRY(spdk_nvmf_qpair) link; 271 }; 272 273 struct spdk_nvmf_ctrlr_feat { 274 union spdk_nvme_feat_arbitration arbitration; 275 union spdk_nvme_feat_power_management power_management; 276 union spdk_nvme_feat_error_recovery error_recovery; 277 union spdk_nvme_feat_volatile_write_cache volatile_write_cache; 278 union spdk_nvme_feat_number_of_queues number_of_queues; 279 union spdk_nvme_feat_write_atomicity write_atomicity; 280 union spdk_nvme_feat_async_event_configuration async_event_configuration; 281 union spdk_nvme_feat_keep_alive_timer keep_alive_timer; 282 }; 283 284 /* 285 * NVMf reservation notificaton log page. 286 */ 287 struct spdk_nvmf_reservation_log { 288 struct spdk_nvme_reservation_notification_log log; 289 TAILQ_ENTRY(spdk_nvmf_reservation_log) link; 290 struct spdk_nvmf_ctrlr *ctrlr; 291 }; 292 293 /* 294 * This structure represents an NVMe-oF controller, 295 * which is like a "session" in networking terms. 296 */ 297 struct spdk_nvmf_ctrlr { 298 uint16_t cntlid; 299 char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 300 struct spdk_nvmf_subsystem *subsys; 301 302 struct { 303 union spdk_nvme_cap_register cap; 304 union spdk_nvme_vs_register vs; 305 union spdk_nvme_cc_register cc; 306 union spdk_nvme_csts_register csts; 307 } vcprop; /* virtual controller properties */ 308 309 struct spdk_nvmf_ctrlr_feat feat; 310 311 struct spdk_nvmf_qpair *admin_qpair; 312 struct spdk_thread *thread; 313 struct spdk_bit_array *qpair_mask; 314 315 struct spdk_nvmf_request *aer_req; 316 union spdk_nvme_async_event_completion notice_event; 317 union spdk_nvme_async_event_completion reservation_event; 318 struct spdk_uuid hostid; 319 320 uint16_t changed_ns_list_count; 321 struct spdk_nvme_ns_list changed_ns_list; 322 uint64_t log_page_count; 323 uint8_t num_avail_log_pages; 324 TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head; 325 326 /* Time to trigger keep-alive--poller_time = now_tick + period */ 327 uint64_t last_keep_alive_tick; 328 struct spdk_poller *keep_alive_poller; 329 330 bool dif_insert_or_strip; 331 332 TAILQ_ENTRY(spdk_nvmf_ctrlr) link; 333 }; 334 335 struct spdk_nvmf_subsystem { 336 struct spdk_thread *thread; 337 uint32_t id; 338 enum spdk_nvmf_subsystem_state state; 339 340 char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 341 enum spdk_nvmf_subtype subtype; 342 uint16_t next_cntlid; 343 bool allow_any_host; 344 345 struct spdk_nvmf_tgt *tgt; 346 347 char sn[SPDK_NVME_CTRLR_SN_LEN + 1]; 348 char mn[SPDK_NVME_CTRLR_MN_LEN + 1]; 349 350 /* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */ 351 struct spdk_nvmf_ns **ns; 352 uint32_t max_nsid; 353 /* This is the maximum allowed nsid to a subsystem */ 354 uint32_t max_allowed_nsid; 355 356 TAILQ_HEAD(, spdk_nvmf_ctrlr) ctrlrs; 357 358 TAILQ_HEAD(, spdk_nvmf_host) hosts; 359 360 TAILQ_HEAD(, spdk_nvmf_listener) listeners; 361 362 TAILQ_ENTRY(spdk_nvmf_subsystem) entries; 363 }; 364 365 366 struct spdk_nvmf_transport *spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, 367 enum spdk_nvme_transport_type); 368 369 int spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group, 370 struct spdk_nvmf_transport *transport); 371 int spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 372 struct spdk_nvmf_subsystem *subsystem); 373 int spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 374 struct spdk_nvmf_subsystem *subsystem, 375 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 376 void spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 377 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 378 void spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 379 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 380 void spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 381 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 382 void spdk_nvmf_request_exec(struct spdk_nvmf_request *req); 383 int spdk_nvmf_request_free(struct spdk_nvmf_request *req); 384 int spdk_nvmf_request_complete(struct spdk_nvmf_request *req); 385 386 void spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req, 387 struct spdk_nvmf_transport_poll_group *group, 388 struct spdk_nvmf_transport *transport, 389 uint32_t num_buffers); 390 int spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 391 struct spdk_nvmf_transport_poll_group *group, 392 struct spdk_nvmf_transport *transport, 393 uint32_t num_buffers); 394 395 bool spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx *dif_ctx); 396 397 void spdk_nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, 398 struct iovec *iov, 399 uint32_t iovcnt, uint64_t offset, uint32_t length); 400 401 void spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr); 402 int spdk_nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req); 403 int spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req); 404 int spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req); 405 bool spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr); 406 bool spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr); 407 void spdk_nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid); 408 409 void spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 410 bool dif_insert_or_strip); 411 int spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 412 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 413 int spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 414 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 415 int spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 416 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 417 int spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 418 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 419 int spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 420 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 421 int spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 422 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 423 bool spdk_nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 424 struct spdk_dif_ctx *dif_ctx); 425 426 int spdk_nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, 427 struct spdk_nvmf_ctrlr *ctrlr); 428 void spdk_nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 429 struct spdk_nvmf_ctrlr *ctrlr); 430 struct spdk_nvmf_ctrlr *spdk_nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, 431 uint16_t cntlid); 432 int spdk_nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr); 433 void spdk_nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr); 434 void spdk_nvmf_ns_reservation_request(void *ctx); 435 void spdk_nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 436 struct spdk_nvmf_ns *ns, 437 enum spdk_nvme_reservation_notification_log_page_type type); 438 439 /* 440 * Abort aer is sent on a per controller basis and sends a completion for the aer to the host. 441 * This function should be called when attempting to recover in error paths when it is OK for 442 * the host to send a subsequent AER. 443 */ 444 void spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr); 445 446 /* 447 * Free aer simply frees the rdma resources for the aer without informing the host. 448 * This function should be called when deleting a qpair when one wants to make sure 449 * the qpair is completely empty before freeing the request. The reason we free the 450 * AER without sending a completion is to prevent the host from sending another AER. 451 */ 452 void spdk_nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair); 453 454 static inline struct spdk_nvmf_ns * 455 _spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 456 { 457 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 458 if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) { 459 return NULL; 460 } 461 462 return subsystem->ns[nsid - 1]; 463 } 464 465 static inline bool 466 spdk_nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair) 467 { 468 return qpair->qid == 0; 469 } 470 471 #endif /* __NVMF_INTERNAL_H__ */ 472