1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #ifndef __NVMF_INTERNAL_H__ 8 #define __NVMF_INTERNAL_H__ 9 10 #include "spdk/stdinc.h" 11 12 #include "spdk/likely.h" 13 #include "spdk/nvmf.h" 14 #include "spdk/nvmf_cmd.h" 15 #include "spdk/nvmf_transport.h" 16 #include "spdk/nvmf_spec.h" 17 #include "spdk/assert.h" 18 #include "spdk/bdev.h" 19 #include "spdk/queue.h" 20 #include "spdk/util.h" 21 #include "spdk/thread.h" 22 #include "spdk/tree.h" 23 #include "spdk/bit_array.h" 24 25 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 26 #define NVMF_MIN_CNTLID 1 27 #define NVMF_MAX_CNTLID 0xFFEF 28 29 enum spdk_nvmf_tgt_state { 30 NVMF_TGT_IDLE = 0, 31 NVMF_TGT_RUNNING, 32 NVMF_TGT_PAUSING, 33 NVMF_TGT_PAUSED, 34 NVMF_TGT_RESUMING, 35 }; 36 37 enum spdk_nvmf_subsystem_state { 38 SPDK_NVMF_SUBSYSTEM_INACTIVE = 0, 39 SPDK_NVMF_SUBSYSTEM_ACTIVATING, 40 SPDK_NVMF_SUBSYSTEM_ACTIVE, 41 SPDK_NVMF_SUBSYSTEM_PAUSING, 42 SPDK_NVMF_SUBSYSTEM_PAUSED, 43 SPDK_NVMF_SUBSYSTEM_RESUMING, 44 SPDK_NVMF_SUBSYSTEM_DEACTIVATING, 45 SPDK_NVMF_SUBSYSTEM_NUM_STATES, 46 }; 47 48 RB_HEAD(subsystem_tree, spdk_nvmf_subsystem); 49 50 struct spdk_nvmf_tgt { 51 char name[NVMF_TGT_NAME_MAX_LENGTH]; 52 53 pthread_mutex_t mutex; 54 55 uint64_t discovery_genctr; 56 57 uint32_t max_subsystems; 58 59 enum spdk_nvmf_tgt_discovery_filter discovery_filter; 60 61 enum spdk_nvmf_tgt_state state; 62 63 struct spdk_bit_array *subsystem_ids; 64 65 struct subsystem_tree subsystems; 66 67 TAILQ_HEAD(, spdk_nvmf_transport) transports; 68 TAILQ_HEAD(, spdk_nvmf_poll_group) poll_groups; 69 TAILQ_HEAD(, spdk_nvmf_referral) referrals; 70 71 /* Used for round-robin assignment of connections to poll groups */ 72 struct spdk_nvmf_poll_group *next_poll_group; 73 74 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn; 75 void *destroy_cb_arg; 76 77 uint16_t crdt[3]; 78 uint16_t num_poll_groups; 79 80 TAILQ_ENTRY(spdk_nvmf_tgt) link; 81 }; 82 83 struct spdk_nvmf_host { 84 char nqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 85 TAILQ_ENTRY(spdk_nvmf_host) link; 86 }; 87 88 struct spdk_nvmf_subsystem_listener { 89 struct spdk_nvmf_subsystem *subsystem; 90 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 91 void *cb_arg; 92 struct spdk_nvme_transport_id *trid; 93 struct spdk_nvmf_transport *transport; 94 enum spdk_nvme_ana_state *ana_state; 95 uint64_t ana_state_change_count; 96 uint16_t id; 97 struct spdk_nvmf_listener_opts opts; 98 TAILQ_ENTRY(spdk_nvmf_subsystem_listener) link; 99 }; 100 101 struct spdk_nvmf_referral { 102 /* Discovery Log Page Entry for this referral */ 103 struct spdk_nvmf_discovery_log_page_entry entry; 104 /* Transport ID */ 105 struct spdk_nvme_transport_id trid; 106 TAILQ_ENTRY(spdk_nvmf_referral) link; 107 }; 108 109 struct spdk_nvmf_subsystem_pg_ns_info { 110 struct spdk_io_channel *channel; 111 struct spdk_uuid uuid; 112 /* current reservation key, no reservation if the value is 0 */ 113 uint64_t crkey; 114 /* reservation type */ 115 enum spdk_nvme_reservation_type rtype; 116 /* Host ID which holds the reservation */ 117 struct spdk_uuid holder_id; 118 /* Host ID for the registrants with the namespace */ 119 struct spdk_uuid reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 120 uint64_t num_blocks; 121 122 /* I/O outstanding to this namespace */ 123 uint64_t io_outstanding; 124 enum spdk_nvmf_subsystem_state state; 125 }; 126 127 typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status); 128 129 struct spdk_nvmf_subsystem_poll_group { 130 /* Array of namespace information for each namespace indexed by nsid - 1 */ 131 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 132 uint32_t num_ns; 133 enum spdk_nvmf_subsystem_state state; 134 135 /* Number of ADMIN and FABRICS requests outstanding */ 136 uint64_t mgmt_io_outstanding; 137 spdk_nvmf_poll_group_mod_done cb_fn; 138 void *cb_arg; 139 140 TAILQ_HEAD(, spdk_nvmf_request) queued; 141 }; 142 143 struct spdk_nvmf_registrant { 144 TAILQ_ENTRY(spdk_nvmf_registrant) link; 145 struct spdk_uuid hostid; 146 /* Registration key */ 147 uint64_t rkey; 148 }; 149 150 struct spdk_nvmf_ns { 151 uint32_t nsid; 152 uint32_t anagrpid; 153 struct spdk_nvmf_subsystem *subsystem; 154 struct spdk_bdev *bdev; 155 struct spdk_bdev_desc *desc; 156 struct spdk_nvmf_ns_opts opts; 157 /* reservation notification mask */ 158 uint32_t mask; 159 /* generation code */ 160 uint32_t gen; 161 /* registrants head */ 162 TAILQ_HEAD(, spdk_nvmf_registrant) registrants; 163 /* current reservation key */ 164 uint64_t crkey; 165 /* reservation type */ 166 enum spdk_nvme_reservation_type rtype; 167 /* current reservation holder, only valid if reservation type can only have one holder */ 168 struct spdk_nvmf_registrant *holder; 169 /* Persist Through Power Loss file which contains the persistent reservation */ 170 char *ptpl_file; 171 /* Persist Through Power Loss feature is enabled */ 172 bool ptpl_activated; 173 /* ZCOPY supported on bdev device */ 174 bool zcopy; 175 /* Command Set Identifier */ 176 enum spdk_nvme_csi csi; 177 /* Make namespace visible to controllers of these hosts */ 178 TAILQ_HEAD(, spdk_nvmf_host) hosts; 179 /* Namespace is always visible to all controllers */ 180 bool always_visible; 181 }; 182 183 /* 184 * NVMf reservation notification log page. 185 */ 186 struct spdk_nvmf_reservation_log { 187 struct spdk_nvme_reservation_notification_log log; 188 TAILQ_ENTRY(spdk_nvmf_reservation_log) link; 189 struct spdk_nvmf_ctrlr *ctrlr; 190 }; 191 192 /* 193 * NVMf async event completion. 194 */ 195 struct spdk_nvmf_async_event_completion { 196 union spdk_nvme_async_event_completion event; 197 STAILQ_ENTRY(spdk_nvmf_async_event_completion) link; 198 }; 199 200 /* 201 * This structure represents an NVMe-oF controller, 202 * which is like a "session" in networking terms. 203 */ 204 struct spdk_nvmf_ctrlr { 205 uint16_t cntlid; 206 char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 207 struct spdk_nvmf_subsystem *subsys; 208 struct spdk_bit_array *visible_ns; 209 210 struct spdk_nvmf_ctrlr_data cdata; 211 212 struct spdk_nvmf_registers vcprop; 213 214 struct spdk_nvmf_ctrlr_feat feat; 215 216 struct spdk_nvmf_qpair *admin_qpair; 217 struct spdk_thread *thread; 218 struct spdk_bit_array *qpair_mask; 219 220 const struct spdk_nvmf_subsystem_listener *listener; 221 222 struct spdk_nvmf_request *aer_req[SPDK_NVMF_MAX_ASYNC_EVENTS]; 223 STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events; 224 uint64_t notice_aen_mask; 225 uint8_t nr_aer_reqs; 226 struct spdk_uuid hostid; 227 228 uint32_t association_timeout; /* in milliseconds */ 229 uint16_t changed_ns_list_count; 230 struct spdk_nvme_ns_list changed_ns_list; 231 uint64_t log_page_count; 232 uint8_t num_avail_log_pages; 233 TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head; 234 235 /* Time to trigger keep-alive--poller_time = now_tick + period */ 236 uint64_t last_keep_alive_tick; 237 struct spdk_poller *keep_alive_poller; 238 239 struct spdk_poller *association_timer; 240 241 struct spdk_poller *cc_timer; 242 uint64_t cc_timeout_tsc; 243 struct spdk_poller *cc_timeout_timer; 244 245 bool dif_insert_or_strip; 246 bool in_destruct; 247 bool disconnect_in_progress; 248 /* valid only when disconnect_in_progress is true */ 249 bool disconnect_is_shn; 250 bool acre_enabled; 251 bool dynamic_ctrlr; 252 253 TAILQ_ENTRY(spdk_nvmf_ctrlr) link; 254 }; 255 256 #define NVMF_MAX_LISTENERS_PER_SUBSYSTEM 16 257 258 struct spdk_nvmf_subsystem { 259 struct spdk_thread *thread; 260 261 uint32_t id; 262 263 enum spdk_nvmf_subsystem_state state; 264 enum spdk_nvmf_subtype subtype; 265 266 uint16_t next_cntlid; 267 struct { 268 uint8_t allow_any_host : 1; 269 uint8_t allow_any_listener : 1; 270 uint8_t ana_reporting : 1; 271 uint8_t reserved : 5; 272 } flags; 273 274 /* boolean for state change synchronization */ 275 bool changing_state; 276 277 bool destroying; 278 bool async_destroy; 279 280 /* FDP related fields */ 281 bool fdp_supported; 282 283 /* Zoned storage related fields */ 284 bool zone_append_supported; 285 uint64_t max_zone_append_size_kib; 286 287 struct spdk_nvmf_tgt *tgt; 288 RB_ENTRY(spdk_nvmf_subsystem) link; 289 290 /* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */ 291 struct spdk_nvmf_ns **ns; 292 uint32_t max_nsid; 293 294 uint16_t min_cntlid; 295 uint16_t max_cntlid; 296 297 uint64_t max_discard_size_kib; 298 uint64_t max_write_zeroes_size_kib; 299 300 TAILQ_HEAD(, spdk_nvmf_ctrlr) ctrlrs; 301 302 /* A mutex used to protect the hosts list and allow_any_host flag. Unlike the namespace 303 * array, this list is not used on the I/O path (it's needed for handling things like 304 * the CONNECT command), so use a mutex to protect it instead of requiring the subsystem 305 * state to be paused. This removes the requirement to pause the subsystem when hosts 306 * are added or removed dynamically. */ 307 pthread_mutex_t mutex; 308 TAILQ_HEAD(, spdk_nvmf_host) hosts; 309 TAILQ_HEAD(, spdk_nvmf_subsystem_listener) listeners; 310 struct spdk_bit_array *used_listener_ids; 311 312 TAILQ_ENTRY(spdk_nvmf_subsystem) entries; 313 314 nvmf_subsystem_destroy_cb async_destroy_cb; 315 void *async_destroy_cb_arg; 316 317 char sn[SPDK_NVME_CTRLR_SN_LEN + 1]; 318 char mn[SPDK_NVME_CTRLR_MN_LEN + 1]; 319 char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 320 321 /* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1 322 * It will be enough for ANA group to use the same size as namespaces. 323 */ 324 uint32_t *ana_group; 325 }; 326 327 static int 328 subsystem_cmp(struct spdk_nvmf_subsystem *subsystem1, struct spdk_nvmf_subsystem *subsystem2) 329 { 330 return strncmp(subsystem1->subnqn, subsystem2->subnqn, sizeof(subsystem1->subnqn)); 331 } 332 333 RB_GENERATE_STATIC(subsystem_tree, spdk_nvmf_subsystem, link, subsystem_cmp); 334 335 int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 336 struct spdk_nvmf_subsystem *subsystem); 337 int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 338 struct spdk_nvmf_subsystem *subsystem, 339 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 340 void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 341 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 342 void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 343 struct spdk_nvmf_subsystem *subsystem, 344 uint32_t nsid, 345 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 346 void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 347 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg); 348 349 void nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt, const char *hostnqn); 350 void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 351 uint32_t iovcnt, uint64_t offset, uint32_t length, 352 struct spdk_nvme_transport_id *cmd_source_trid); 353 354 void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr); 355 int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req); 356 int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req); 357 bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr); 358 bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr); 359 bool nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr); 360 void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid); 361 bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req); 362 363 void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 364 bool dif_insert_or_strip); 365 int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 366 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 367 int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 368 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 369 int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 370 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 371 int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 372 struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req); 373 int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 374 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 375 int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 376 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 377 int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 378 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 379 int nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 380 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 381 int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 382 struct spdk_io_channel *ch, struct spdk_nvmf_request *req); 383 bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 384 struct spdk_dif_ctx *dif_ctx); 385 bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev); 386 387 int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, 388 struct spdk_nvmf_ctrlr *ctrlr); 389 void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 390 struct spdk_nvmf_ctrlr *ctrlr); 391 void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 392 bool stop); 393 struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, 394 uint16_t cntlid); 395 struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener( 396 struct spdk_nvmf_subsystem *subsystem, 397 const struct spdk_nvme_transport_id *trid); 398 struct spdk_nvmf_listener *nvmf_transport_find_listener( 399 struct spdk_nvmf_transport *transport, 400 const struct spdk_nvme_transport_id *trid); 401 void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w, 402 bool named); 403 void nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid, 404 struct spdk_json_write_ctx *w); 405 406 /** 407 * Sets the controller ID range for a subsystem. 408 * Valid range is [1, 0xFFEF]. 409 * 410 * May only be performed on subsystems in the INACTIVE state. 411 * 412 * \param subsystem Subsystem to modify. 413 * \param min_cntlid Minimum controller ID. 414 * \param max_cntlid Maximum controller ID. 415 * 416 * \return 0 on success, or negated errno value on failure. 417 */ 418 int nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem, 419 uint16_t min_cntlid, uint16_t max_cntlid); 420 421 int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr); 422 int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr); 423 void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx); 424 void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr); 425 426 void nvmf_ns_reservation_request(void *ctx); 427 void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 428 struct spdk_nvmf_ns *ns, 429 enum spdk_nvme_reservation_notification_log_page_type type); 430 431 bool nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns); 432 433 static inline struct spdk_nvmf_host * 434 nvmf_ns_find_host(struct spdk_nvmf_ns *ns, const char *hostnqn) 435 { 436 struct spdk_nvmf_host *host = NULL; 437 438 TAILQ_FOREACH(host, &ns->hosts, link) { 439 if (strcmp(hostnqn, host->nqn) == 0) { 440 return host; 441 } 442 } 443 444 return NULL; 445 } 446 447 /* 448 * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't 449 * started zcopy_end. These requests are kept on the outstanding queue, but are not waiting for a 450 * completion from the bdev layer, so, when a qpair is being disconnected, we need to kick them to 451 * force their completion. 452 */ 453 void nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair); 454 455 /* 456 * Free aer simply frees the rdma resources for the aer without informing the host. 457 * This function should be called when deleting a qpair when one wants to make sure 458 * the qpair is completely empty before freeing the request. The reason we free the 459 * AER without sending a completion is to prevent the host from sending another AER. 460 */ 461 void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair); 462 463 int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req); 464 465 void nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr); 466 467 static inline bool 468 nvmf_ctrlr_ns_is_visible(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 469 { 470 return spdk_bit_array_get(ctrlr->visible_ns, nsid - 1); 471 } 472 473 static inline struct spdk_nvmf_ns * 474 _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 475 { 476 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 477 if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) { 478 return NULL; 479 } 480 481 return subsystem->ns[nsid - 1]; 482 } 483 484 static inline struct spdk_nvmf_ns * 485 nvmf_ctrlr_get_ns(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 486 { 487 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 488 struct spdk_nvmf_ns *ns = _nvmf_subsystem_get_ns(subsystem, nsid); 489 490 return ns && nvmf_ctrlr_ns_is_visible(ctrlr, nsid) ? ns : NULL; 491 } 492 493 static inline bool 494 nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair) 495 { 496 return qpair->qid == 0; 497 } 498 499 static inline bool 500 nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req) 501 { 502 return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC && 503 req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT; 504 } 505 506 /* 507 * Tests whether a given string represents a valid NQN. 508 */ 509 bool nvmf_nqn_is_valid(const char *nqn); 510 511 /* 512 * Tests whether a given NQN describes a discovery subsystem. 513 */ 514 bool nvmf_nqn_is_discovery(const char *nqn); 515 516 /** 517 * Initiates a zcopy start operation 518 * 519 * \param bdev The \ref spdk_bdev 520 * \param desc The \ref spdk_bdev_desc 521 * \param ch The \ref spdk_io_channel 522 * \param req The \ref spdk_nvmf_request passed to the bdev for processing 523 * 524 * \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or 525 * SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be 526 * completed asynchronously. Asynchronous completions are notified through 527 * spdk_nvmf_request_complete(). 528 */ 529 int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 530 struct spdk_bdev_desc *desc, 531 struct spdk_io_channel *ch, 532 struct spdk_nvmf_request *req); 533 534 /** 535 * Ends a zcopy operation 536 * 537 * \param req The NVMe-oF request 538 * \param commit Flag indicating whether the buffers should be committed 539 */ 540 void nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit); 541 542 #endif /* __NVMF_INTERNAL_H__ */ 543