1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #ifndef __NVMF_INTERNAL_H__ 8 #define __NVMF_INTERNAL_H__ 9 10 #include "spdk/stdinc.h" 11 12 #include "spdk/keyring.h" 13 #include "spdk/likely.h" 14 #include "spdk/nvmf.h" 15 #include "spdk/nvmf_cmd.h" 16 #include "spdk/nvmf_transport.h" 17 #include "spdk/nvmf_spec.h" 18 #include "spdk/assert.h" 19 #include "spdk/bdev.h" 20 #include "spdk/queue.h" 21 #include "spdk/util.h" 22 #include "spdk/thread.h" 23 #include "spdk/tree.h" 24 #include "spdk/bit_array.h" 25 26 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 27 #define NVMF_MIN_CNTLID 1 28 #define NVMF_MAX_CNTLID 0xFFEF 29 30 enum spdk_nvmf_tgt_state { 31 NVMF_TGT_IDLE = 0, 32 NVMF_TGT_RUNNING, 33 NVMF_TGT_PAUSING, 34 NVMF_TGT_PAUSED, 35 NVMF_TGT_RESUMING, 36 }; 37 38 enum spdk_nvmf_subsystem_state { 39 SPDK_NVMF_SUBSYSTEM_INACTIVE = 0, 40 SPDK_NVMF_SUBSYSTEM_ACTIVATING, 41 SPDK_NVMF_SUBSYSTEM_ACTIVE, 42 SPDK_NVMF_SUBSYSTEM_PAUSING, 43 SPDK_NVMF_SUBSYSTEM_PAUSED, 44 SPDK_NVMF_SUBSYSTEM_RESUMING, 45 SPDK_NVMF_SUBSYSTEM_DEACTIVATING, 46 SPDK_NVMF_SUBSYSTEM_NUM_STATES, 47 }; 48 49 RB_HEAD(subsystem_tree, spdk_nvmf_subsystem); 50 51 struct spdk_nvmf_tgt { 52 char name[NVMF_TGT_NAME_MAX_LENGTH]; 53 54 pthread_mutex_t mutex; 55 56 uint64_t discovery_genctr; 57 58 uint32_t max_subsystems; 59 60 uint32_t discovery_filter; 61 62 enum spdk_nvmf_tgt_state state; 63 64 struct spdk_bit_array *subsystem_ids; 65 66 struct subsystem_tree subsystems; 67 68 TAILQ_HEAD(, spdk_nvmf_transport) transports; 69 TAILQ_HEAD(, spdk_nvmf_poll_group) poll_groups; 70 TAILQ_HEAD(, spdk_nvmf_referral) referrals; 71 72 /* Used for round-robin assignment of connections to poll groups */ 73 struct spdk_nvmf_poll_group *next_poll_group; 74 75 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn; 76 void *destroy_cb_arg; 77 78 uint16_t crdt[3]; 79 uint16_t num_poll_groups; 80 81 /* Allowed DH-HMAC-CHAP digests/dhgroups */ 82 uint32_t dhchap_digests; 83 uint32_t dhchap_dhgroups; 84 85 TAILQ_ENTRY(spdk_nvmf_tgt) link; 86 }; 87 88 struct spdk_nvmf_host { 89 char nqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 90 struct spdk_key *dhchap_key; 91 struct spdk_key *dhchap_ctrlr_key; 92 TAILQ_ENTRY(spdk_nvmf_host) link; 93 }; 94 95 struct spdk_nvmf_subsystem_listener { 96 struct spdk_nvmf_subsystem *subsystem; 97 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 98 void *cb_arg; 99 struct spdk_nvme_transport_id *trid; 100 struct spdk_nvmf_transport *transport; 101 enum spdk_nvme_ana_state *ana_state; 102 uint64_t ana_state_change_count; 103 uint16_t id; 104 struct spdk_nvmf_listener_opts opts; 105 TAILQ_ENTRY(spdk_nvmf_subsystem_listener) link; 106 }; 107 108 struct spdk_nvmf_referral { 109 /* Discovery Log Page Entry for this referral */ 110 struct spdk_nvmf_discovery_log_page_entry entry; 111 /* Transport ID */ 112 struct spdk_nvme_transport_id trid; 113 TAILQ_ENTRY(spdk_nvmf_referral) link; 114 }; 115 116 struct spdk_nvmf_subsystem_pg_ns_info { 117 struct spdk_io_channel *channel; 118 struct spdk_uuid uuid; 119 /* current reservation key, no reservation if the value is 0 */ 120 uint64_t crkey; 121 /* reservation type */ 122 enum spdk_nvme_reservation_type rtype; 123 /* Host ID which holds the reservation */ 124 struct spdk_uuid holder_id; 125 /* Host ID for the registrants with the namespace */ 126 struct spdk_uuid reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 127 uint64_t num_blocks; 128 uint32_t anagrpid; 129 130 /* I/O outstanding to this namespace */ 131 uint64_t io_outstanding; 132 enum spdk_nvmf_subsystem_state state; 133 }; 134 135 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status); 136 137 struct spdk_nvmf_subsystem_poll_group { 138 /* Array of namespace information for each namespace indexed by nsid - 1 */ 139 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 140 uint32_t num_ns; 141 enum spdk_nvmf_subsystem_state state; 142 143 /* Number of ADMIN and FABRICS requests outstanding */ 144 uint64_t mgmt_io_outstanding; 145 spdk_nvmf_poll_group_mod_done cb_fn; 146 void *cb_arg; 147 148 TAILQ_HEAD(, spdk_nvmf_request) queued; 149 }; 150 151 struct spdk_nvmf_registrant { 152 TAILQ_ENTRY(spdk_nvmf_registrant) link; 153 struct spdk_uuid hostid; 154 /* Registration key */ 155 uint64_t rkey; 156 }; 157 158 struct spdk_nvmf_ns { 159 uint32_t nsid; 160 uint32_t anagrpid; 161 struct spdk_nvmf_subsystem *subsystem; 162 struct spdk_bdev *bdev; 163 struct spdk_bdev_desc *desc; 164 struct spdk_nvmf_ns_opts opts; 165 /* reservation notification mask */ 166 uint32_t mask; 167 /* generation code */ 168 uint32_t gen; 169 /* registrants head */ 170 TAILQ_HEAD(, spdk_nvmf_registrant) registrants; 171 /* current reservation key */ 172 uint64_t crkey; 173 /* reservation type */ 174 enum spdk_nvme_reservation_type rtype; 175 /* current reservation holder, only valid if reservation type can only have one holder */ 176 struct spdk_nvmf_registrant *holder; 177 /* Persist Through Power Loss file which contains the persistent reservation */ 178 char *ptpl_file; 179 /* Persist Through Power Loss feature is enabled */ 180 bool ptpl_activated; 181 /* ZCOPY supported on bdev device */ 182 bool zcopy; 183 /* Command Set Identifier */ 184 enum spdk_nvme_csi csi; 185 /* Make namespace visible to controllers of these hosts */ 186 TAILQ_HEAD(, spdk_nvmf_host) hosts; 187 /* Namespace is always visible to all controllers */ 188 bool always_visible; 189 /* Namespace id of the underlying device, used for passthrough commands */ 190 uint32_t passthrough_nsid; 191 }; 192 193 /* 194 * NVMf reservation notification log page. 195 */ 196 struct spdk_nvmf_reservation_log { 197 struct spdk_nvme_reservation_notification_log log; 198 TAILQ_ENTRY(spdk_nvmf_reservation_log) link; 199 struct spdk_nvmf_ctrlr *ctrlr; 200 }; 201 202 /* 203 * NVMf async event completion. 204 */ 205 struct spdk_nvmf_async_event_completion { 206 union spdk_nvme_async_event_completion event; 207 STAILQ_ENTRY(spdk_nvmf_async_event_completion) link; 208 }; 209 210 /* 211 * This structure represents an NVMe-oF controller, 212 * which is like a "session" in networking terms. 213 */ 214 struct spdk_nvmf_ctrlr { 215 uint16_t cntlid; 216 char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 217 struct spdk_nvmf_subsystem *subsys; 218 struct spdk_bit_array *visible_ns; 219 220 struct spdk_nvmf_ctrlr_data cdata; 221 222 struct spdk_nvmf_registers vcprop; 223 224 struct spdk_nvmf_ctrlr_feat feat; 225 226 struct spdk_nvmf_qpair *admin_qpair; 227 struct spdk_thread *thread; 228 struct spdk_bit_array *qpair_mask; 229 230 const struct spdk_nvmf_subsystem_listener *listener; 231 232 struct spdk_nvmf_request *aer_req[SPDK_NVMF_MAX_ASYNC_EVENTS]; 233 STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events; 234 uint64_t notice_aen_mask; 235 uint8_t nr_aer_reqs; 236 struct spdk_uuid hostid; 237 238 uint32_t association_timeout; /* in milliseconds */ 239 uint16_t changed_ns_list_count; 240 struct spdk_nvme_ns_list changed_ns_list; 241 uint64_t log_page_count; 242 uint8_t num_avail_log_pages; 243 TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head; 244 245 /* Time to trigger keep-alive--poller_time = now_tick + period */ 246 uint64_t last_keep_alive_tick; 247 struct spdk_poller *keep_alive_poller; 248 249 struct spdk_poller *association_timer; 250 251 struct spdk_poller *cc_timer; 252 uint64_t cc_timeout_tsc; 253 struct spdk_poller *cc_timeout_timer; 254 255 bool dif_insert_or_strip; 256 bool in_destruct; 257 bool disconnect_in_progress; 258 /* valid only when disconnect_in_progress is true */ 259 bool disconnect_is_shn; 260 bool acre_enabled; 261 bool dynamic_ctrlr; 262 /* LBA Format Extension Enabled (LBAFEE) */ 263 bool lbafee_enabled; 264 265 TAILQ_ENTRY(spdk_nvmf_ctrlr) link; 266 }; 267 268 #define NVMF_MAX_LISTENERS_PER_SUBSYSTEM 16 269 270 struct nvmf_subsystem_state_change_ctx { 271 struct spdk_nvmf_subsystem *subsystem; 272 uint16_t nsid; 273 274 enum spdk_nvmf_subsystem_state original_state; 275 enum spdk_nvmf_subsystem_state requested_state; 276 int status; 277 struct spdk_thread *thread; 278 279 spdk_nvmf_subsystem_state_change_done cb_fn; 280 void *cb_arg; 281 TAILQ_ENTRY(nvmf_subsystem_state_change_ctx) link; 282 }; 283 284 struct spdk_nvmf_subsystem { 285 struct spdk_thread *thread; 286 287 uint32_t id; 288 289 enum spdk_nvmf_subsystem_state state; 290 enum spdk_nvmf_subtype subtype; 291 292 uint16_t next_cntlid; 293 struct { 294 uint8_t allow_any_listener : 1; 295 uint8_t ana_reporting : 1; 296 uint8_t reserved : 6; 297 } flags; 298 299 /* Protected against concurrent access by ->mutex */ 300 bool allow_any_host; 301 302 bool destroying; 303 bool async_destroy; 304 305 /* FDP related fields */ 306 bool fdp_supported; 307 308 /* Zoned storage related fields */ 309 uint64_t max_zone_append_size_kib; 310 311 struct spdk_nvmf_tgt *tgt; 312 RB_ENTRY(spdk_nvmf_subsystem) link; 313 314 /* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */ 315 struct spdk_nvmf_ns **ns; 316 uint32_t max_nsid; 317 318 uint16_t min_cntlid; 319 uint16_t max_cntlid; 320 321 uint64_t max_discard_size_kib; 322 uint64_t max_write_zeroes_size_kib; 323 324 TAILQ_HEAD(, spdk_nvmf_ctrlr) ctrlrs; 325 326 /* This mutex is used to protect fields that aren't touched on the I/O path (e.g. it's 327 * needed for handling things like the CONNECT command) instead of requiring the subsystem 328 * to be paused. It makes it possible to modify those fields (e.g. add/remove hosts) 329 * without affecting outstanding I/O requests. 330 */ 331 pthread_mutex_t mutex; 332 /* Protected against concurrent access by ->mutex */ 333 TAILQ_HEAD(, spdk_nvmf_host) hosts; 334 TAILQ_HEAD(, spdk_nvmf_subsystem_listener) listeners; 335 struct spdk_bit_array *used_listener_ids; 336 337 TAILQ_ENTRY(spdk_nvmf_subsystem) entries; 338 339 nvmf_subsystem_destroy_cb async_destroy_cb; 340 void *async_destroy_cb_arg; 341 342 char sn[SPDK_NVME_CTRLR_SN_LEN + 1]; 343 char mn[SPDK_NVME_CTRLR_MN_LEN + 1]; 344 char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 345 346 /* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1 347 * It will be enough for ANA group to use the same size as namespaces. 348 */ 349 uint32_t *ana_group; 350 /* Queue of a state change requests */ 351 TAILQ_HEAD(, nvmf_subsystem_state_change_ctx) state_changes; 352 /* In-band authentication sequence number, protected by ->mutex */ 353 uint32_t auth_seqnum; 354 bool passthrough; 355 }; 356 357 static int 358 subsystem_cmp(struct spdk_nvmf_subsystem *subsystem1, struct spdk_nvmf_subsystem *subsystem2) 359 { 360 return strncmp(subsystem1->subnqn, subsystem2->subnqn, sizeof(subsystem1->subnqn)); 361 } 362 363 RB_GENERATE_STATIC(subsystem_tree, spdk_nvmf_subsystem, link, subsystem_cmp); 364 365 int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 366 struct spdk_nvmf_subsystem *subsystem); 367 int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 368 struct spdk_nvmf_subsystem *subsystem, 369 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 370 void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 371 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 372 void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 373 struct spdk_nvmf_subsystem *subsystem, 374 uint32_t nsid, 375 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 376 void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 377 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 378 379 void nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt, const char *hostnqn); 380 void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 381 uint32_t iovcnt, uint64_t offset, uint32_t length, 382 struct spdk_nvme_transport_id *cmd_source_trid); 383 384 void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr); 385 int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req); 386 int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req); 387 bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr); 388 bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr); 389 bool nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr); 390 void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid); 391 bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req); 392 393 void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 394 bool dif_insert_or_strip); 395 void nvmf_bdev_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ns *ns, 396 struct spdk_nvme_nvm_ns_data *nsdata_nvm); 397 int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 398 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 399 int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 400 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 401 int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 402 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 403 int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 404 struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req); 405 int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 406 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 407 int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 408 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 409 int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 410 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 411 int nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 412 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 413 int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 414 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 415 bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 416 struct spdk_dif_ctx *dif_ctx); 417 bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev); 418 419 int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, 420 struct spdk_nvmf_ctrlr *ctrlr); 421 void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 422 struct spdk_nvmf_ctrlr *ctrlr); 423 void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 424 bool stop); 425 struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, 426 uint16_t cntlid); 427 bool nvmf_subsystem_host_auth_required(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn); 428 enum nvmf_auth_key_type { 429 NVMF_AUTH_KEY_HOST, 430 NVMF_AUTH_KEY_CTRLR, 431 }; 432 struct spdk_key *nvmf_subsystem_get_dhchap_key(struct spdk_nvmf_subsystem *subsys, const char *nqn, 433 enum nvmf_auth_key_type type); 434 struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener( 435 struct spdk_nvmf_subsystem *subsystem, 436 const struct spdk_nvme_transport_id *trid); 437 bool nvmf_subsystem_zone_append_supported(struct spdk_nvmf_subsystem *subsystem); 438 struct spdk_nvmf_listener *nvmf_transport_find_listener( 439 struct spdk_nvmf_transport *transport, 440 const struct spdk_nvme_transport_id *trid); 441 void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w, 442 bool named); 443 void nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid, 444 struct spdk_json_write_ctx *w); 445 446 int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr); 447 int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr); 448 void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx); 449 void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr); 450 451 void nvmf_ns_reservation_request(void *ctx); 452 void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 453 struct spdk_nvmf_ns *ns, 454 enum spdk_nvme_reservation_notification_log_page_type type); 455 456 bool nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns); 457 458 static inline struct spdk_nvmf_host * 459 nvmf_ns_find_host(struct spdk_nvmf_ns *ns, const char *hostnqn) 460 { 461 struct spdk_nvmf_host *host = NULL; 462 463 TAILQ_FOREACH(host, &ns->hosts, link) { 464 if (strcmp(hostnqn, host->nqn) == 0) { 465 return host; 466 } 467 } 468 469 return NULL; 470 } 471 472 /* 473 * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't 474 * started zcopy_end. These requests are kept on the outstanding queue, but are not waiting for a 475 * completion from the bdev layer, so, when a qpair is being disconnected, we need to kick them to 476 * force their completion. 477 */ 478 void nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair); 479 480 /* 481 * Free aer simply frees the rdma resources for the aer without informing the host. 482 * This function should be called when deleting a qpair when one wants to make sure 483 * the qpair is completely empty before freeing the request. The reason we free the 484 * AER without sending a completion is to prevent the host from sending another AER. 485 */ 486 void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair); 487 488 int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req); 489 490 void nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr); 491 492 static inline bool 493 nvmf_ctrlr_ns_is_visible(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 494 { 495 return spdk_bit_array_get(ctrlr->visible_ns, nsid - 1); 496 } 497 498 static inline struct spdk_nvmf_ns * 499 _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 500 { 501 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 502 if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) { 503 return NULL; 504 } 505 506 return subsystem->ns[nsid - 1]; 507 } 508 509 static inline struct spdk_nvmf_ns * 510 nvmf_ctrlr_get_ns(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 511 { 512 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 513 struct spdk_nvmf_ns *ns = _nvmf_subsystem_get_ns(subsystem, nsid); 514 515 return ns && nvmf_ctrlr_ns_is_visible(ctrlr, nsid) ? ns : NULL; 516 } 517 518 static inline bool 519 nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair) 520 { 521 return qpair->qid == 0; 522 } 523 524 void nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, enum spdk_nvmf_qpair_state state); 525 526 int nvmf_qpair_auth_init(struct spdk_nvmf_qpair *qpair); 527 void nvmf_qpair_auth_destroy(struct spdk_nvmf_qpair *qpair); 528 void nvmf_qpair_auth_dump(struct spdk_nvmf_qpair *qpair, struct spdk_json_write_ctx *w); 529 530 int nvmf_auth_request_exec(struct spdk_nvmf_request *req); 531 bool nvmf_auth_is_supported(void); 532 533 static inline bool 534 nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req) 535 { 536 return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC && 537 req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT; 538 } 539 540 /* 541 * Tests whether a given string represents a valid NQN. 542 */ 543 bool nvmf_nqn_is_valid(const char *nqn); 544 545 /* 546 * Tests whether a given NQN describes a discovery subsystem. 547 */ 548 bool nvmf_nqn_is_discovery(const char *nqn); 549 550 /** 551 * Initiates a zcopy start operation 552 * 553 * \param bdev The \ref spdk_bdev 554 * \param desc The \ref spdk_bdev_desc 555 * \param ch The \ref spdk_io_channel 556 * \param req The \ref spdk_nvmf_request passed to the bdev for processing 557 * 558 * \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or 559 * SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be 560 * completed asynchronously. Asynchronous completions are notified through 561 * spdk_nvmf_request_complete(). 562 */ 563 int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 564 struct spdk_bdev_desc *desc, 565 struct spdk_io_channel *ch, 566 struct spdk_nvmf_request *req); 567 568 /** 569 * Ends a zcopy operation 570 * 571 * \param req The NVMe-oF request 572 * \param commit Flag indicating whether the buffers should be committed 573 */ 574 void nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit); 575 576 /** 577 * Publishes the mDNS PRR (Pull Registration Request) for the NVMe-oF target. 578 * 579 * \param tgt The NVMe-oF target 580 * 581 * \return 0 on success, negative errno on failure 582 */ 583 int nvmf_publish_mdns_prr(struct spdk_nvmf_tgt *tgt); 584 585 /** 586 * Stops the mDNS PRR (Pull Registration Request) for the NVMe-oF target. 587 * 588 * \param tgt The NVMe-oF target 589 */ 590 void nvmf_tgt_stop_mdns_prr(struct spdk_nvmf_tgt *tgt); 591 592 /** 593 * Updates the listener list in the mDNS PRR (Pull Registration Request) for the NVMe-oF target. 594 * 595 * \param tgt The NVMe-oF target 596 * 597 * \return 0 on success, negative errno on failure 598 */ 599 int nvmf_tgt_update_mdns_prr(struct spdk_nvmf_tgt *tgt); 600 601 static inline struct spdk_nvmf_transport_poll_group * 602 nvmf_get_transport_poll_group(struct spdk_nvmf_poll_group *group, 603 struct spdk_nvmf_transport *transport) 604 { 605 struct spdk_nvmf_transport_poll_group *tgroup; 606 607 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 608 if (tgroup->transport == transport) { 609 return tgroup; 610 } 611 } 612 613 return NULL; 614 } 615 616 /** 617 * Generates a new NVMF controller id 618 * 619 * \param subsystem The subsystem 620 * 621 * \return unique controller id or 0xFFFF when all controller ids are in use 622 */ 623 uint16_t nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem); 624 625 #endif /* __NVMF_INTERNAL_H__ */ 626