1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #ifndef __NVMF_INTERNAL_H__ 8 #define __NVMF_INTERNAL_H__ 9 10 #include "spdk/stdinc.h" 11 12 #include "spdk/keyring.h" 13 #include "spdk/likely.h" 14 #include "spdk/nvmf.h" 15 #include "spdk/nvmf_cmd.h" 16 #include "spdk/nvmf_transport.h" 17 #include "spdk/nvmf_spec.h" 18 #include "spdk/assert.h" 19 #include "spdk/bdev.h" 20 #include "spdk/queue.h" 21 #include "spdk/util.h" 22 #include "spdk/thread.h" 23 #include "spdk/tree.h" 24 #include "spdk/bit_array.h" 25 26 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 27 #define NVMF_MIN_CNTLID 1 28 #define NVMF_MAX_CNTLID 0xFFEF 29 30 enum spdk_nvmf_tgt_state { 31 NVMF_TGT_IDLE = 0, 32 NVMF_TGT_RUNNING, 33 NVMF_TGT_PAUSING, 34 NVMF_TGT_PAUSED, 35 NVMF_TGT_RESUMING, 36 }; 37 38 enum spdk_nvmf_subsystem_state { 39 SPDK_NVMF_SUBSYSTEM_INACTIVE = 0, 40 SPDK_NVMF_SUBSYSTEM_ACTIVATING, 41 SPDK_NVMF_SUBSYSTEM_ACTIVE, 42 SPDK_NVMF_SUBSYSTEM_PAUSING, 43 SPDK_NVMF_SUBSYSTEM_PAUSED, 44 SPDK_NVMF_SUBSYSTEM_RESUMING, 45 SPDK_NVMF_SUBSYSTEM_DEACTIVATING, 46 SPDK_NVMF_SUBSYSTEM_NUM_STATES, 47 }; 48 49 RB_HEAD(subsystem_tree, spdk_nvmf_subsystem); 50 51 struct spdk_nvmf_tgt { 52 char name[NVMF_TGT_NAME_MAX_LENGTH]; 53 54 pthread_mutex_t mutex; 55 56 uint64_t discovery_genctr; 57 58 uint32_t max_subsystems; 59 60 enum spdk_nvmf_tgt_discovery_filter discovery_filter; 61 62 enum spdk_nvmf_tgt_state state; 63 64 struct spdk_bit_array *subsystem_ids; 65 66 struct subsystem_tree subsystems; 67 68 TAILQ_HEAD(, spdk_nvmf_transport) transports; 69 TAILQ_HEAD(, spdk_nvmf_poll_group) poll_groups; 70 TAILQ_HEAD(, spdk_nvmf_referral) referrals; 71 72 /* Used for round-robin assignment of connections to poll groups */ 73 struct spdk_nvmf_poll_group *next_poll_group; 74 75 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn; 76 void *destroy_cb_arg; 77 78 uint16_t crdt[3]; 79 uint16_t num_poll_groups; 80 81 TAILQ_ENTRY(spdk_nvmf_tgt) link; 82 }; 83 84 struct spdk_nvmf_host { 85 char nqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 86 struct spdk_key *dhchap_key; 87 TAILQ_ENTRY(spdk_nvmf_host) link; 88 }; 89 90 struct spdk_nvmf_subsystem_listener { 91 struct spdk_nvmf_subsystem *subsystem; 92 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 93 void *cb_arg; 94 struct spdk_nvme_transport_id *trid; 95 struct spdk_nvmf_transport *transport; 96 enum spdk_nvme_ana_state *ana_state; 97 uint64_t ana_state_change_count; 98 uint16_t id; 99 struct spdk_nvmf_listener_opts opts; 100 TAILQ_ENTRY(spdk_nvmf_subsystem_listener) link; 101 }; 102 103 struct spdk_nvmf_referral { 104 /* Discovery Log Page Entry for this referral */ 105 struct spdk_nvmf_discovery_log_page_entry entry; 106 /* Transport ID */ 107 struct spdk_nvme_transport_id trid; 108 TAILQ_ENTRY(spdk_nvmf_referral) link; 109 }; 110 111 struct spdk_nvmf_subsystem_pg_ns_info { 112 struct spdk_io_channel *channel; 113 struct spdk_uuid uuid; 114 /* current reservation key, no reservation if the value is 0 */ 115 uint64_t crkey; 116 /* reservation type */ 117 enum spdk_nvme_reservation_type rtype; 118 /* Host ID which holds the reservation */ 119 struct spdk_uuid holder_id; 120 /* Host ID for the registrants with the namespace */ 121 struct spdk_uuid reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 122 uint64_t num_blocks; 123 124 /* I/O outstanding to this namespace */ 125 uint64_t io_outstanding; 126 enum spdk_nvmf_subsystem_state state; 127 }; 128 129 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status); 130 131 struct spdk_nvmf_subsystem_poll_group { 132 /* Array of namespace information for each namespace indexed by nsid - 1 */ 133 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 134 uint32_t num_ns; 135 enum spdk_nvmf_subsystem_state state; 136 137 /* Number of ADMIN and FABRICS requests outstanding */ 138 uint64_t mgmt_io_outstanding; 139 spdk_nvmf_poll_group_mod_done cb_fn; 140 void *cb_arg; 141 142 TAILQ_HEAD(, spdk_nvmf_request) queued; 143 }; 144 145 struct spdk_nvmf_registrant { 146 TAILQ_ENTRY(spdk_nvmf_registrant) link; 147 struct spdk_uuid hostid; 148 /* Registration key */ 149 uint64_t rkey; 150 }; 151 152 struct spdk_nvmf_ns { 153 uint32_t nsid; 154 uint32_t anagrpid; 155 struct spdk_nvmf_subsystem *subsystem; 156 struct spdk_bdev *bdev; 157 struct spdk_bdev_desc *desc; 158 struct spdk_nvmf_ns_opts opts; 159 /* reservation notification mask */ 160 uint32_t mask; 161 /* generation code */ 162 uint32_t gen; 163 /* registrants head */ 164 TAILQ_HEAD(, spdk_nvmf_registrant) registrants; 165 /* current reservation key */ 166 uint64_t crkey; 167 /* reservation type */ 168 enum spdk_nvme_reservation_type rtype; 169 /* current reservation holder, only valid if reservation type can only have one holder */ 170 struct spdk_nvmf_registrant *holder; 171 /* Persist Through Power Loss file which contains the persistent reservation */ 172 char *ptpl_file; 173 /* Persist Through Power Loss feature is enabled */ 174 bool ptpl_activated; 175 /* ZCOPY supported on bdev device */ 176 bool zcopy; 177 /* Command Set Identifier */ 178 enum spdk_nvme_csi csi; 179 /* Make namespace visible to controllers of these hosts */ 180 TAILQ_HEAD(, spdk_nvmf_host) hosts; 181 /* Namespace is always visible to all controllers */ 182 bool always_visible; 183 }; 184 185 /* 186 * NVMf reservation notification log page. 187 */ 188 struct spdk_nvmf_reservation_log { 189 struct spdk_nvme_reservation_notification_log log; 190 TAILQ_ENTRY(spdk_nvmf_reservation_log) link; 191 struct spdk_nvmf_ctrlr *ctrlr; 192 }; 193 194 /* 195 * NVMf async event completion. 196 */ 197 struct spdk_nvmf_async_event_completion { 198 union spdk_nvme_async_event_completion event; 199 STAILQ_ENTRY(spdk_nvmf_async_event_completion) link; 200 }; 201 202 /* 203 * This structure represents an NVMe-oF controller, 204 * which is like a "session" in networking terms. 205 */ 206 struct spdk_nvmf_ctrlr { 207 uint16_t cntlid; 208 char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 209 struct spdk_nvmf_subsystem *subsys; 210 struct spdk_bit_array *visible_ns; 211 212 struct spdk_nvmf_ctrlr_data cdata; 213 214 struct spdk_nvmf_registers vcprop; 215 216 struct spdk_nvmf_ctrlr_feat feat; 217 218 struct spdk_nvmf_qpair *admin_qpair; 219 struct spdk_thread *thread; 220 struct spdk_bit_array *qpair_mask; 221 222 const struct spdk_nvmf_subsystem_listener *listener; 223 224 struct spdk_nvmf_request *aer_req[SPDK_NVMF_MAX_ASYNC_EVENTS]; 225 STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events; 226 uint64_t notice_aen_mask; 227 uint8_t nr_aer_reqs; 228 struct spdk_uuid hostid; 229 230 uint32_t association_timeout; /* in milliseconds */ 231 uint16_t changed_ns_list_count; 232 struct spdk_nvme_ns_list changed_ns_list; 233 uint64_t log_page_count; 234 uint8_t num_avail_log_pages; 235 TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head; 236 237 /* Time to trigger keep-alive--poller_time = now_tick + period */ 238 uint64_t last_keep_alive_tick; 239 struct spdk_poller *keep_alive_poller; 240 241 struct spdk_poller *association_timer; 242 243 struct spdk_poller *cc_timer; 244 uint64_t cc_timeout_tsc; 245 struct spdk_poller *cc_timeout_timer; 246 247 bool dif_insert_or_strip; 248 bool in_destruct; 249 bool disconnect_in_progress; 250 /* valid only when disconnect_in_progress is true */ 251 bool disconnect_is_shn; 252 bool acre_enabled; 253 bool dynamic_ctrlr; 254 255 TAILQ_ENTRY(spdk_nvmf_ctrlr) link; 256 }; 257 258 #define NVMF_MAX_LISTENERS_PER_SUBSYSTEM 16 259 260 struct nvmf_subsystem_state_change_ctx { 261 struct spdk_nvmf_subsystem *subsystem; 262 uint16_t nsid; 263 264 enum spdk_nvmf_subsystem_state original_state; 265 enum spdk_nvmf_subsystem_state requested_state; 266 int status; 267 struct spdk_thread *thread; 268 269 spdk_nvmf_subsystem_state_change_done cb_fn; 270 void *cb_arg; 271 TAILQ_ENTRY(nvmf_subsystem_state_change_ctx) link; 272 }; 273 274 struct spdk_nvmf_subsystem { 275 struct spdk_thread *thread; 276 277 uint32_t id; 278 279 enum spdk_nvmf_subsystem_state state; 280 enum spdk_nvmf_subtype subtype; 281 282 uint16_t next_cntlid; 283 struct { 284 uint8_t allow_any_host : 1; 285 uint8_t allow_any_listener : 1; 286 uint8_t ana_reporting : 1; 287 uint8_t reserved : 5; 288 } flags; 289 290 bool destroying; 291 bool async_destroy; 292 293 /* FDP related fields */ 294 bool fdp_supported; 295 296 /* Zoned storage related fields */ 297 bool zone_append_supported; 298 uint64_t max_zone_append_size_kib; 299 300 struct spdk_nvmf_tgt *tgt; 301 RB_ENTRY(spdk_nvmf_subsystem) link; 302 303 /* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */ 304 struct spdk_nvmf_ns **ns; 305 uint32_t max_nsid; 306 307 uint16_t min_cntlid; 308 uint16_t max_cntlid; 309 310 uint64_t max_discard_size_kib; 311 uint64_t max_write_zeroes_size_kib; 312 313 TAILQ_HEAD(, spdk_nvmf_ctrlr) ctrlrs; 314 315 /* A mutex used to protect the hosts list and allow_any_host flag. Unlike the namespace 316 * array, this list is not used on the I/O path (it's needed for handling things like 317 * the CONNECT command), so use a mutex to protect it instead of requiring the subsystem 318 * state to be paused. This removes the requirement to pause the subsystem when hosts 319 * are added or removed dynamically. */ 320 pthread_mutex_t mutex; 321 TAILQ_HEAD(, spdk_nvmf_host) hosts; 322 TAILQ_HEAD(, spdk_nvmf_subsystem_listener) listeners; 323 struct spdk_bit_array *used_listener_ids; 324 325 TAILQ_ENTRY(spdk_nvmf_subsystem) entries; 326 327 nvmf_subsystem_destroy_cb async_destroy_cb; 328 void *async_destroy_cb_arg; 329 330 char sn[SPDK_NVME_CTRLR_SN_LEN + 1]; 331 char mn[SPDK_NVME_CTRLR_MN_LEN + 1]; 332 char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 333 334 /* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1 335 * It will be enough for ANA group to use the same size as namespaces. 336 */ 337 uint32_t *ana_group; 338 /* Queue of a state change requests */ 339 TAILQ_HEAD(, nvmf_subsystem_state_change_ctx) state_changes; 340 }; 341 342 static int 343 subsystem_cmp(struct spdk_nvmf_subsystem *subsystem1, struct spdk_nvmf_subsystem *subsystem2) 344 { 345 return strncmp(subsystem1->subnqn, subsystem2->subnqn, sizeof(subsystem1->subnqn)); 346 } 347 348 RB_GENERATE_STATIC(subsystem_tree, spdk_nvmf_subsystem, link, subsystem_cmp); 349 350 int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 351 struct spdk_nvmf_subsystem *subsystem); 352 int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 353 struct spdk_nvmf_subsystem *subsystem, 354 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 355 void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 356 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 357 void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 358 struct spdk_nvmf_subsystem *subsystem, 359 uint32_t nsid, 360 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 361 void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 362 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 363 364 void nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt, const char *hostnqn); 365 void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 366 uint32_t iovcnt, uint64_t offset, uint32_t length, 367 struct spdk_nvme_transport_id *cmd_source_trid); 368 369 void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr); 370 int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req); 371 int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req); 372 bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr); 373 bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr); 374 bool nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr); 375 void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid); 376 bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req); 377 378 void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 379 bool dif_insert_or_strip); 380 int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 381 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 382 int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 383 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 384 int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 385 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 386 int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 387 struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req); 388 int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 389 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 390 int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 391 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 392 int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 393 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 394 int nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 395 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 396 int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 397 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 398 bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 399 struct spdk_dif_ctx *dif_ctx); 400 bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev); 401 402 int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, 403 struct spdk_nvmf_ctrlr *ctrlr); 404 void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 405 struct spdk_nvmf_ctrlr *ctrlr); 406 void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 407 bool stop); 408 struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, 409 uint16_t cntlid); 410 bool nvmf_subsystem_host_auth_required(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn); 411 struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener( 412 struct spdk_nvmf_subsystem *subsystem, 413 const struct spdk_nvme_transport_id *trid); 414 struct spdk_nvmf_listener *nvmf_transport_find_listener( 415 struct spdk_nvmf_transport *transport, 416 const struct spdk_nvme_transport_id *trid); 417 void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w, 418 bool named); 419 void nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid, 420 struct spdk_json_write_ctx *w); 421 422 /** 423 * Sets the controller ID range for a subsystem. 424 * Valid range is [1, 0xFFEF]. 425 * 426 * May only be performed on subsystems in the INACTIVE state. 427 * 428 * \param subsystem Subsystem to modify. 429 * \param min_cntlid Minimum controller ID. 430 * \param max_cntlid Maximum controller ID. 431 * 432 * \return 0 on success, or negated errno value on failure. 433 */ 434 int nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem, 435 uint16_t min_cntlid, uint16_t max_cntlid); 436 437 int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr); 438 int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr); 439 void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx); 440 void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr); 441 442 void nvmf_ns_reservation_request(void *ctx); 443 void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 444 struct spdk_nvmf_ns *ns, 445 enum spdk_nvme_reservation_notification_log_page_type type); 446 447 bool nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns); 448 449 static inline struct spdk_nvmf_host * 450 nvmf_ns_find_host(struct spdk_nvmf_ns *ns, const char *hostnqn) 451 { 452 struct spdk_nvmf_host *host = NULL; 453 454 TAILQ_FOREACH(host, &ns->hosts, link) { 455 if (strcmp(hostnqn, host->nqn) == 0) { 456 return host; 457 } 458 } 459 460 return NULL; 461 } 462 463 /* 464 * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't 465 * started zcopy_end. These requests are kept on the outstanding queue, but are not waiting for a 466 * completion from the bdev layer, so, when a qpair is being disconnected, we need to kick them to 467 * force their completion. 468 */ 469 void nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair); 470 471 /* 472 * Free aer simply frees the rdma resources for the aer without informing the host. 473 * This function should be called when deleting a qpair when one wants to make sure 474 * the qpair is completely empty before freeing the request. The reason we free the 475 * AER without sending a completion is to prevent the host from sending another AER. 476 */ 477 void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair); 478 479 int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req); 480 481 void nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr); 482 483 static inline bool 484 nvmf_ctrlr_ns_is_visible(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 485 { 486 return spdk_bit_array_get(ctrlr->visible_ns, nsid - 1); 487 } 488 489 static inline struct spdk_nvmf_ns * 490 _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 491 { 492 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 493 if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) { 494 return NULL; 495 } 496 497 return subsystem->ns[nsid - 1]; 498 } 499 500 static inline struct spdk_nvmf_ns * 501 nvmf_ctrlr_get_ns(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 502 { 503 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 504 struct spdk_nvmf_ns *ns = _nvmf_subsystem_get_ns(subsystem, nsid); 505 506 return ns && nvmf_ctrlr_ns_is_visible(ctrlr, nsid) ? ns : NULL; 507 } 508 509 static inline bool 510 nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair) 511 { 512 return qpair->qid == 0; 513 } 514 515 void nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, enum spdk_nvmf_qpair_state state); 516 517 int nvmf_qpair_auth_init(struct spdk_nvmf_qpair *qpair); 518 void nvmf_qpair_auth_destroy(struct spdk_nvmf_qpair *qpair); 519 520 int nvmf_auth_request_exec(struct spdk_nvmf_request *req); 521 bool nvmf_auth_is_supported(void); 522 523 static inline bool 524 nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req) 525 { 526 return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC && 527 req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT; 528 } 529 530 /* 531 * Tests whether a given string represents a valid NQN. 532 */ 533 bool nvmf_nqn_is_valid(const char *nqn); 534 535 /* 536 * Tests whether a given NQN describes a discovery subsystem. 537 */ 538 bool nvmf_nqn_is_discovery(const char *nqn); 539 540 /** 541 * Initiates a zcopy start operation 542 * 543 * \param bdev The \ref spdk_bdev 544 * \param desc The \ref spdk_bdev_desc 545 * \param ch The \ref spdk_io_channel 546 * \param req The \ref spdk_nvmf_request passed to the bdev for processing 547 * 548 * \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or 549 * SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be 550 * completed asynchronously. Asynchronous completions are notified through 551 * spdk_nvmf_request_complete(). 552 */ 553 int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 554 struct spdk_bdev_desc *desc, 555 struct spdk_io_channel *ch, 556 struct spdk_nvmf_request *req); 557 558 /** 559 * Ends a zcopy operation 560 * 561 * \param req The NVMe-oF request 562 * \param commit Flag indicating whether the buffers should be committed 563 */ 564 void nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit); 565 566 #endif /* __NVMF_INTERNAL_H__ */ 567