1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2015 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 5 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved. 7 */ 8 9 #include "nvme_internal.h" 10 11 static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, 12 struct spdk_nvme_qpair *qpair, 13 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset, 14 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, 15 void *cb_arg, uint32_t opc, uint32_t io_flags, 16 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, 17 void *accel_sequence, int *rc); 18 19 static bool 20 nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io, 21 uint32_t sectors_per_stripe, uint32_t qdepth) 22 { 23 uint32_t child_per_io = UINT32_MAX; 24 25 /* After a namespace is destroyed(e.g. hotplug), all the fields associated with the 26 * namespace will be cleared to zero, the function will return TRUE for this case, 27 * and -EINVAL will be returned to caller. 28 */ 29 if (sectors_per_stripe > 0) { 30 child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe; 31 } else if (sectors_per_max_io > 0) { 32 child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io; 33 } 34 35 SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io); 36 37 return child_per_io >= qdepth; 38 } 39 40 static inline int 41 nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io, 42 uint32_t sectors_per_stripe, uint32_t qdepth, int rc) 43 { 44 assert(rc); 45 if (rc == -ENOMEM && 46 nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) { 47 return -EINVAL; 48 } 49 return rc; 50 } 51 52 static inline bool 53 _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags) 54 { 55 return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) && 56 (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) && 57 (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) && 58 (ns->md_size == 8); 59 } 60 61 static inline uint32_t 62 _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags) 63 { 64 return _nvme_md_excluded_from_xfer(ns, io_flags) ? 65 ns->sector_size : ns->extended_lba_size; 66 } 67 68 static inline uint32_t 69 _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags) 70 { 71 return _nvme_md_excluded_from_xfer(ns, io_flags) ? 72 ns->sectors_per_max_io_no_md : ns->sectors_per_max_io; 73 } 74 75 static struct nvme_request * 76 _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 77 const struct nvme_payload *payload, 78 uint32_t payload_offset, uint32_t md_offset, 79 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 80 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 81 struct nvme_request *parent, bool check_sgl, int *rc) 82 { 83 struct nvme_request *child; 84 85 child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn, 86 cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, NULL, rc); 87 if (child == NULL) { 88 nvme_request_free_children(parent); 89 nvme_free_request(parent); 90 return NULL; 91 } 92 93 nvme_request_add_child(parent, child); 94 return child; 95 } 96 97 static struct nvme_request * 98 _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns, 99 struct spdk_nvme_qpair *qpair, 100 const struct nvme_payload *payload, 101 uint32_t payload_offset, uint32_t md_offset, 102 uint64_t lba, uint32_t lba_count, 103 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 104 uint32_t io_flags, struct nvme_request *req, 105 uint32_t sectors_per_max_io, uint32_t sector_mask, 106 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 107 void *accel_sequence, int *rc) 108 { 109 uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 110 uint32_t remaining_lba_count = lba_count; 111 struct nvme_request *child; 112 113 if (spdk_unlikely(accel_sequence != NULL)) { 114 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 115 *rc = -EINVAL; 116 return NULL; 117 } 118 119 while (remaining_lba_count > 0) { 120 lba_count = sectors_per_max_io - (lba & sector_mask); 121 lba_count = spdk_min(remaining_lba_count, lba_count); 122 123 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 124 lba, lba_count, cb_fn, cb_arg, opc, 125 io_flags, apptag_mask, apptag, cdw13, req, true, rc); 126 if (child == NULL) { 127 return NULL; 128 } 129 130 remaining_lba_count -= lba_count; 131 lba += lba_count; 132 payload_offset += lba_count * sector_size; 133 md_offset += lba_count * ns->md_size; 134 } 135 136 return req; 137 } 138 139 static inline bool 140 _is_io_flags_valid(uint32_t io_flags) 141 { 142 if (spdk_unlikely(io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK)) { 143 /* Invalid io_flags */ 144 SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags); 145 return false; 146 } 147 148 return true; 149 } 150 151 static inline bool 152 _is_accel_sequence_valid(struct spdk_nvme_qpair *qpair, void *seq) 153 { 154 /* An accel sequence can only be executed if the controller supports accel and a qpair is 155 * part of a of a poll group */ 156 if (spdk_likely(seq == NULL || ((qpair->ctrlr->flags & SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED) && 157 qpair->poll_group != NULL))) { 158 return true; 159 } 160 161 return false; 162 } 163 164 static void 165 _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req, 166 uint32_t opc, uint64_t lba, uint32_t lba_count, 167 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, 168 uint32_t cdw13) 169 { 170 struct spdk_nvme_cmd *cmd; 171 172 assert(_is_io_flags_valid(io_flags)); 173 174 cmd = &req->cmd; 175 cmd->opc = opc; 176 cmd->nsid = ns->id; 177 178 *(uint64_t *)&cmd->cdw10 = lba; 179 180 if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) { 181 switch (ns->pi_type) { 182 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1: 183 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2: 184 cmd->cdw14 = (uint32_t)lba; 185 break; 186 } 187 } 188 189 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 190 191 cmd->cdw12 = lba_count - 1; 192 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 193 194 cmd->cdw13 = cdw13; 195 196 cmd->cdw15 = apptag_mask; 197 cmd->cdw15 = (cmd->cdw15 << 16 | apptag); 198 } 199 200 static struct nvme_request * 201 _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns, 202 struct spdk_nvme_qpair *qpair, 203 const struct nvme_payload *payload, 204 uint32_t payload_offset, uint32_t md_offset, 205 uint64_t lba, uint32_t lba_count, 206 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 207 uint32_t io_flags, struct nvme_request *req, 208 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 209 void *accel_sequence, int *rc) 210 { 211 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn; 212 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn; 213 void *sgl_cb_arg = req->payload.contig_or_cb_arg; 214 bool start_valid, end_valid, last_sge, child_equals_parent; 215 uint64_t child_lba = lba; 216 uint32_t req_current_length = 0; 217 uint32_t child_length = 0; 218 uint32_t sge_length; 219 uint32_t page_size = qpair->ctrlr->page_size; 220 uintptr_t address; 221 222 reset_sgl_fn(sgl_cb_arg, payload_offset); 223 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 224 while (req_current_length < req->payload_size) { 225 226 if (sge_length == 0) { 227 continue; 228 } else if (req_current_length + sge_length > req->payload_size) { 229 sge_length = req->payload_size - req_current_length; 230 } 231 232 /* 233 * The start of the SGE is invalid if the start address is not page aligned, 234 * unless it is the first SGE in the child request. 235 */ 236 start_valid = child_length == 0 || _is_page_aligned(address, page_size); 237 238 /* Boolean for whether this is the last SGE in the parent request. */ 239 last_sge = (req_current_length + sge_length == req->payload_size); 240 241 /* 242 * The end of the SGE is invalid if the end address is not page aligned, 243 * unless it is the last SGE in the parent request. 244 */ 245 end_valid = last_sge || _is_page_aligned(address + sge_length, page_size); 246 247 /* 248 * This child request equals the parent request, meaning that no splitting 249 * was required for the parent request (the one passed into this function). 250 * In this case, we do not create a child request at all - we just send 251 * the original request as a single request at the end of this function. 252 */ 253 child_equals_parent = (child_length + sge_length == req->payload_size); 254 255 if (start_valid) { 256 /* 257 * The start of the SGE is valid, so advance the length parameters, 258 * to include this SGE with previous SGEs for this child request 259 * (if any). If it is not valid, we do not advance the length 260 * parameters nor get the next SGE, because we must send what has 261 * been collected before this SGE as a child request. 262 */ 263 child_length += sge_length; 264 req_current_length += sge_length; 265 if (req_current_length < req->payload_size) { 266 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 267 /* 268 * If the next SGE is not page aligned, we will need to create a 269 * child request for what we have so far, and then start a new 270 * child request for the next SGE. 271 */ 272 start_valid = _is_page_aligned(address, page_size); 273 } 274 } 275 276 if (start_valid && end_valid && !last_sge) { 277 continue; 278 } 279 280 /* 281 * We need to create a split here. Send what we have accumulated so far as a child 282 * request. Checking if child_equals_parent allows us to *not* create a child request 283 * when no splitting is required - in that case we will fall-through and just create 284 * a single request with no children for the entire I/O. 285 */ 286 if (!child_equals_parent) { 287 struct nvme_request *child; 288 uint32_t child_lba_count; 289 290 if ((child_length % ns->extended_lba_size) != 0) { 291 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n", 292 child_length, ns->extended_lba_size); 293 *rc = -EINVAL; 294 return NULL; 295 } 296 if (spdk_unlikely(accel_sequence != NULL)) { 297 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 298 *rc = -EINVAL; 299 return NULL; 300 } 301 302 child_lba_count = child_length / ns->extended_lba_size; 303 /* 304 * Note the last parameter is set to "false" - this tells the recursive 305 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting 306 * since we have already verified it here. 307 */ 308 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 309 child_lba, child_lba_count, 310 cb_fn, cb_arg, opc, io_flags, 311 apptag_mask, apptag, cdw13, req, false, rc); 312 if (child == NULL) { 313 return NULL; 314 } 315 payload_offset += child_length; 316 md_offset += child_lba_count * ns->md_size; 317 child_lba += child_lba_count; 318 child_length = 0; 319 } 320 } 321 322 if (child_length == req->payload_size) { 323 /* No splitting was required, so setup the whole payload as one request. */ 324 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 325 } 326 327 return req; 328 } 329 330 static struct nvme_request * 331 _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns, 332 struct spdk_nvme_qpair *qpair, 333 const struct nvme_payload *payload, 334 uint32_t payload_offset, uint32_t md_offset, 335 uint64_t lba, uint32_t lba_count, 336 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 337 uint32_t io_flags, struct nvme_request *req, 338 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 339 void *accel_sequence, int *rc) 340 { 341 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn; 342 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn; 343 void *sgl_cb_arg = req->payload.contig_or_cb_arg; 344 uint64_t child_lba = lba; 345 uint32_t req_current_length = 0; 346 uint32_t accumulated_length = 0; 347 uint32_t sge_length; 348 uint16_t max_sges, num_sges; 349 uintptr_t address; 350 351 max_sges = ns->ctrlr->max_sges; 352 353 reset_sgl_fn(sgl_cb_arg, payload_offset); 354 num_sges = 0; 355 356 while (req_current_length < req->payload_size) { 357 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 358 359 if (req_current_length + sge_length > req->payload_size) { 360 sge_length = req->payload_size - req_current_length; 361 } 362 363 accumulated_length += sge_length; 364 req_current_length += sge_length; 365 num_sges++; 366 367 if (num_sges < max_sges && req_current_length < req->payload_size) { 368 continue; 369 } 370 371 /* 372 * We need to create a split here. Send what we have accumulated so far as a child 373 * request. Checking if the child equals the full payload allows us to *not* 374 * create a child request when no splitting is required - in that case we will 375 * fall-through and just create a single request with no children for the entire I/O. 376 */ 377 if (accumulated_length != req->payload_size) { 378 struct nvme_request *child; 379 uint32_t child_lba_count; 380 uint32_t child_length; 381 uint32_t extra_length; 382 383 child_length = accumulated_length; 384 /* Child length may not be a multiple of the block size! */ 385 child_lba_count = child_length / ns->extended_lba_size; 386 extra_length = child_length - (child_lba_count * ns->extended_lba_size); 387 if (extra_length != 0) { 388 /* The last SGE does not end on a block boundary. We need to cut it off. */ 389 if (extra_length >= child_length) { 390 SPDK_ERRLOG("Unable to send I/O. Would require more than the supported number of " 391 "SGL Elements."); 392 *rc = -EINVAL; 393 return NULL; 394 } 395 child_length -= extra_length; 396 } 397 398 if (spdk_unlikely(accel_sequence != NULL)) { 399 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 400 *rc = -EINVAL; 401 return NULL; 402 } 403 404 /* 405 * Note the last parameter is set to "false" - this tells the recursive 406 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting 407 * since we have already verified it here. 408 */ 409 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 410 child_lba, child_lba_count, 411 cb_fn, cb_arg, opc, io_flags, 412 apptag_mask, apptag, cdw13, req, false, rc); 413 if (child == NULL) { 414 return NULL; 415 } 416 payload_offset += child_length; 417 md_offset += child_lba_count * ns->md_size; 418 child_lba += child_lba_count; 419 accumulated_length -= child_length; 420 num_sges = accumulated_length > 0; 421 } 422 } 423 424 if (accumulated_length == req->payload_size) { 425 /* No splitting was required, so setup the whole payload as one request. */ 426 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 427 } 428 429 return req; 430 } 431 432 static inline struct nvme_request * 433 _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 434 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset, 435 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 436 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, 437 void *accel_sequence, int *rc) 438 { 439 struct nvme_request *req; 440 uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 441 uint32_t sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags); 442 uint32_t sectors_per_stripe = ns->sectors_per_stripe; 443 444 assert(rc != NULL); 445 assert(*rc == 0); 446 447 req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size, 448 cb_fn, cb_arg); 449 if (req == NULL) { 450 *rc = -ENOMEM; 451 return NULL; 452 } 453 454 req->payload_offset = payload_offset; 455 req->md_offset = md_offset; 456 req->accel_sequence = accel_sequence; 457 458 /* Zone append commands cannot be split. */ 459 if (opc == SPDK_NVME_OPC_ZONE_APPEND) { 460 assert(ns->csi == SPDK_NVME_CSI_ZNS); 461 /* 462 * As long as we disable driver-assisted striping for Zone append commands, 463 * _nvme_ns_cmd_rw() should never cause a proper request to be split. 464 * If a request is split, after all, error handling is done in caller functions. 465 */ 466 sectors_per_stripe = 0; 467 } 468 469 /* 470 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping. 471 * If this controller defines a stripe boundary and this I/O spans a stripe 472 * boundary, split the request into multiple requests and submit each 473 * separately to hardware. 474 */ 475 if (sectors_per_stripe > 0 && 476 (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) { 477 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, 478 cb_fn, 479 cb_arg, opc, 480 io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, 481 apptag_mask, apptag, cdw13, accel_sequence, rc); 482 } else if (lba_count > sectors_per_max_io) { 483 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, 484 cb_fn, 485 cb_arg, opc, 486 io_flags, req, sectors_per_max_io, 0, apptag_mask, 487 apptag, cdw13, accel_sequence, rc); 488 } else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) { 489 if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) { 490 return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset, 491 lba, lba_count, cb_fn, cb_arg, opc, io_flags, 492 req, apptag_mask, apptag, cdw13, 493 accel_sequence, rc); 494 } else { 495 return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset, 496 lba, lba_count, cb_fn, cb_arg, opc, io_flags, 497 req, apptag_mask, apptag, cdw13, 498 accel_sequence, rc); 499 } 500 } 501 502 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 503 return req; 504 } 505 506 int 507 spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 508 uint64_t lba, 509 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 510 uint32_t io_flags) 511 { 512 struct nvme_request *req; 513 struct nvme_payload payload; 514 int rc = 0; 515 516 if (!_is_io_flags_valid(io_flags)) { 517 return -EINVAL; 518 } 519 520 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 521 522 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 523 SPDK_NVME_OPC_COMPARE, 524 io_flags, 0, 525 0, 0, false, NULL, &rc); 526 if (req != NULL) { 527 return nvme_qpair_submit_request(qpair, req); 528 } else { 529 return nvme_ns_map_failure_rc(lba_count, 530 ns->sectors_per_max_io, 531 ns->sectors_per_stripe, 532 qpair->ctrlr->opts.io_queue_requests, 533 rc); 534 } 535 } 536 537 int 538 spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 539 void *buffer, 540 void *metadata, 541 uint64_t lba, 542 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 543 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 544 { 545 struct nvme_request *req; 546 struct nvme_payload payload; 547 int rc = 0; 548 549 if (!_is_io_flags_valid(io_flags)) { 550 return -EINVAL; 551 } 552 553 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 554 555 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 556 SPDK_NVME_OPC_COMPARE, 557 io_flags, 558 apptag_mask, apptag, 0, false, NULL, &rc); 559 if (req != NULL) { 560 return nvme_qpair_submit_request(qpair, req); 561 } else { 562 return nvme_ns_map_failure_rc(lba_count, 563 ns->sectors_per_max_io, 564 ns->sectors_per_stripe, 565 qpair->ctrlr->opts.io_queue_requests, 566 rc); 567 } 568 } 569 570 int 571 spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 572 uint64_t lba, uint32_t lba_count, 573 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 574 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 575 spdk_nvme_req_next_sge_cb next_sge_fn) 576 { 577 struct nvme_request *req; 578 struct nvme_payload payload; 579 int rc = 0; 580 581 if (!_is_io_flags_valid(io_flags)) { 582 return -EINVAL; 583 } 584 585 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 586 return -EINVAL; 587 } 588 589 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 590 591 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 592 SPDK_NVME_OPC_COMPARE, 593 io_flags, 0, 0, 0, true, NULL, &rc); 594 if (req != NULL) { 595 return nvme_qpair_submit_request(qpair, req); 596 } else { 597 return nvme_ns_map_failure_rc(lba_count, 598 ns->sectors_per_max_io, 599 ns->sectors_per_stripe, 600 qpair->ctrlr->opts.io_queue_requests, 601 rc); 602 } 603 } 604 605 int 606 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 607 uint64_t lba, uint32_t lba_count, 608 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 609 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 610 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 611 uint16_t apptag_mask, uint16_t apptag) 612 { 613 struct nvme_request *req; 614 struct nvme_payload payload; 615 int rc = 0; 616 617 if (!_is_io_flags_valid(io_flags)) { 618 return -EINVAL; 619 } 620 621 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 622 return -EINVAL; 623 } 624 625 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 626 627 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 628 SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true, 629 NULL, &rc); 630 if (req != NULL) { 631 return nvme_qpair_submit_request(qpair, req); 632 } else { 633 return nvme_ns_map_failure_rc(lba_count, 634 ns->sectors_per_max_io, 635 ns->sectors_per_stripe, 636 qpair->ctrlr->opts.io_queue_requests, 637 rc); 638 } 639 } 640 641 int 642 spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 643 uint64_t lba, 644 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 645 uint32_t io_flags) 646 { 647 struct nvme_request *req; 648 struct nvme_payload payload; 649 int rc = 0; 650 651 if (!_is_io_flags_valid(io_flags)) { 652 return -EINVAL; 653 } 654 655 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 656 657 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 658 io_flags, 0, 659 0, 0, false, NULL, &rc); 660 if (req != NULL) { 661 return nvme_qpair_submit_request(qpair, req); 662 } else { 663 return nvme_ns_map_failure_rc(lba_count, 664 ns->sectors_per_max_io, 665 ns->sectors_per_stripe, 666 qpair->ctrlr->opts.io_queue_requests, 667 rc); 668 } 669 } 670 671 int 672 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 673 void *metadata, 674 uint64_t lba, 675 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 676 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 677 { 678 struct nvme_request *req; 679 struct nvme_payload payload; 680 int rc = 0; 681 682 if (!_is_io_flags_valid(io_flags)) { 683 return -EINVAL; 684 } 685 686 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 687 688 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 689 io_flags, 690 apptag_mask, apptag, 0, false, NULL, &rc); 691 if (req != NULL) { 692 return nvme_qpair_submit_request(qpair, req); 693 } else { 694 return nvme_ns_map_failure_rc(lba_count, 695 ns->sectors_per_max_io, 696 ns->sectors_per_stripe, 697 qpair->ctrlr->opts.io_queue_requests, 698 rc); 699 } 700 } 701 702 static int 703 nvme_ns_cmd_rw_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 704 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 705 struct spdk_nvme_ns_cmd_ext_io_opts *opts, enum spdk_nvme_nvm_opcode opc) 706 { 707 struct nvme_request *req; 708 struct nvme_payload payload; 709 void *seq; 710 int rc = 0; 711 712 assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE); 713 assert(opts); 714 715 payload = NVME_PAYLOAD_CONTIG(buffer, opts->metadata); 716 717 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) { 718 return -EINVAL; 719 } 720 721 seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL); 722 if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) { 723 return -EINVAL; 724 } 725 726 payload.opts = opts; 727 728 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags, 729 opts->apptag_mask, opts->apptag, 0, false, seq, &rc); 730 if (spdk_unlikely(req == NULL)) { 731 return nvme_ns_map_failure_rc(lba_count, 732 ns->sectors_per_max_io, 733 ns->sectors_per_stripe, 734 qpair->ctrlr->opts.io_queue_requests, 735 rc); 736 } 737 738 return nvme_qpair_submit_request(qpair, req); 739 } 740 741 int 742 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 743 uint64_t lba, 744 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 745 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 746 { 747 return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts, 748 SPDK_NVME_OPC_READ); 749 } 750 751 int 752 spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 753 uint64_t lba, uint32_t lba_count, 754 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 755 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 756 spdk_nvme_req_next_sge_cb next_sge_fn) 757 { 758 struct nvme_request *req; 759 struct nvme_payload payload; 760 int rc = 0; 761 762 if (!_is_io_flags_valid(io_flags)) { 763 return -EINVAL; 764 } 765 766 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 767 return -EINVAL; 768 } 769 770 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 771 772 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 773 io_flags, 0, 0, 0, true, NULL, &rc); 774 if (req != NULL) { 775 return nvme_qpair_submit_request(qpair, req); 776 } else { 777 return nvme_ns_map_failure_rc(lba_count, 778 ns->sectors_per_max_io, 779 ns->sectors_per_stripe, 780 qpair->ctrlr->opts.io_queue_requests, 781 rc); 782 } 783 } 784 785 int 786 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 787 uint64_t lba, uint32_t lba_count, 788 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 789 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 790 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 791 uint16_t apptag_mask, uint16_t apptag) 792 { 793 struct nvme_request *req; 794 struct nvme_payload payload; 795 int rc = 0; 796 797 if (!_is_io_flags_valid(io_flags)) { 798 return -EINVAL; 799 } 800 801 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 802 return -EINVAL; 803 } 804 805 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 806 807 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 808 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 809 if (req != NULL) { 810 return nvme_qpair_submit_request(qpair, req); 811 } else { 812 return nvme_ns_map_failure_rc(lba_count, 813 ns->sectors_per_max_io, 814 ns->sectors_per_stripe, 815 qpair->ctrlr->opts.io_queue_requests, 816 rc); 817 } 818 } 819 820 static int 821 nvme_ns_cmd_rwv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba, 822 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 823 spdk_nvme_req_next_sge_cb next_sge_fn, struct spdk_nvme_ns_cmd_ext_io_opts *opts, 824 enum spdk_nvme_nvm_opcode opc) 825 { 826 struct nvme_request *req; 827 struct nvme_payload payload; 828 void *seq; 829 int rc = 0; 830 831 assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE); 832 833 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 834 return -EINVAL; 835 } 836 837 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 838 839 if (opts) { 840 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) { 841 return -EINVAL; 842 } 843 844 seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL); 845 if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) { 846 return -EINVAL; 847 } 848 849 payload.opts = opts; 850 payload.md = opts->metadata; 851 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags, 852 opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc); 853 854 } else { 855 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, 0, 0, 0, 0, 856 true, NULL, &rc); 857 } 858 859 if (req == NULL) { 860 return nvme_ns_map_failure_rc(lba_count, 861 ns->sectors_per_max_io, 862 ns->sectors_per_stripe, 863 qpair->ctrlr->opts.io_queue_requests, 864 rc); 865 } 866 867 return nvme_qpair_submit_request(qpair, req); 868 } 869 870 int 871 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 872 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, 873 void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 874 spdk_nvme_req_next_sge_cb next_sge_fn, 875 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 876 { 877 return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn, 878 opts, SPDK_NVME_OPC_READ); 879 } 880 881 int 882 spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 883 void *buffer, uint64_t lba, 884 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 885 uint32_t io_flags) 886 { 887 struct nvme_request *req; 888 struct nvme_payload payload; 889 int rc = 0; 890 891 if (!_is_io_flags_valid(io_flags)) { 892 return -EINVAL; 893 } 894 895 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 896 897 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 898 io_flags, 0, 0, 0, false, NULL, &rc); 899 if (req != NULL) { 900 return nvme_qpair_submit_request(qpair, req); 901 } else { 902 return nvme_ns_map_failure_rc(lba_count, 903 ns->sectors_per_max_io, 904 ns->sectors_per_stripe, 905 qpair->ctrlr->opts.io_queue_requests, 906 rc); 907 } 908 } 909 910 static int 911 nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags) 912 { 913 uint32_t sector_size; 914 915 /* Not all NVMe Zoned Namespaces support the zone append command. */ 916 if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) { 917 return -EINVAL; 918 } 919 920 sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 921 922 /* Fail a too large zone append command early. */ 923 if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) { 924 return -EINVAL; 925 } 926 927 return 0; 928 } 929 930 int 931 nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 932 void *buffer, void *metadata, uint64_t zslba, 933 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 934 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 935 { 936 struct nvme_request *req; 937 struct nvme_payload payload; 938 int rc = 0; 939 940 if (!_is_io_flags_valid(io_flags)) { 941 return -EINVAL; 942 } 943 944 rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags); 945 if (rc) { 946 return rc; 947 } 948 949 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 950 951 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg, 952 SPDK_NVME_OPC_ZONE_APPEND, 953 io_flags, apptag_mask, apptag, 0, false, NULL, &rc); 954 if (req != NULL) { 955 /* 956 * Zone append commands cannot be split (num_children has to be 0). 957 * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split 958 * to happen, since a too large request would have already been failed by 959 * nvme_ns_cmd_check_zone_append(), since zasl <= mdts. 960 */ 961 assert(req->num_children == 0); 962 if (req->num_children) { 963 nvme_request_free_children(req); 964 nvme_free_request(req); 965 return -EINVAL; 966 } 967 return nvme_qpair_submit_request(qpair, req); 968 } else { 969 return nvme_ns_map_failure_rc(lba_count, 970 ns->sectors_per_max_io, 971 ns->sectors_per_stripe, 972 qpair->ctrlr->opts.io_queue_requests, 973 rc); 974 } 975 } 976 977 int 978 nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 979 uint64_t zslba, uint32_t lba_count, 980 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 981 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 982 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 983 uint16_t apptag_mask, uint16_t apptag) 984 { 985 struct nvme_request *req; 986 struct nvme_payload payload; 987 int rc = 0; 988 989 if (!_is_io_flags_valid(io_flags)) { 990 return -EINVAL; 991 } 992 993 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 994 return -EINVAL; 995 } 996 997 rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags); 998 if (rc) { 999 return rc; 1000 } 1001 1002 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 1003 1004 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg, 1005 SPDK_NVME_OPC_ZONE_APPEND, 1006 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 1007 if (req != NULL) { 1008 /* 1009 * Zone append commands cannot be split (num_children has to be 0). 1010 * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split. 1011 * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp() 1012 * do not always cause a request to be split. These functions verify payload size, 1013 * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs). 1014 * If any of the verifications fail, they will split the request. 1015 * In our case, a split is very unlikely, since we already verified the size using 1016 * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions 1017 * in order to perform the verification part. If they do cause a split, we return 1018 * an error here. For proper requests, these functions will never cause a split. 1019 */ 1020 if (req->num_children) { 1021 nvme_request_free_children(req); 1022 nvme_free_request(req); 1023 return -EINVAL; 1024 } 1025 return nvme_qpair_submit_request(qpair, req); 1026 } else { 1027 return nvme_ns_map_failure_rc(lba_count, 1028 ns->sectors_per_max_io, 1029 ns->sectors_per_stripe, 1030 qpair->ctrlr->opts.io_queue_requests, 1031 rc); 1032 } 1033 } 1034 1035 int 1036 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1037 void *buffer, void *metadata, uint64_t lba, 1038 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1039 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1040 { 1041 struct nvme_request *req; 1042 struct nvme_payload payload; 1043 int rc = 0; 1044 1045 if (!_is_io_flags_valid(io_flags)) { 1046 return -EINVAL; 1047 } 1048 1049 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 1050 1051 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1052 io_flags, apptag_mask, apptag, 0, false, NULL, &rc); 1053 if (req != NULL) { 1054 return nvme_qpair_submit_request(qpair, req); 1055 } else { 1056 return nvme_ns_map_failure_rc(lba_count, 1057 ns->sectors_per_max_io, 1058 ns->sectors_per_stripe, 1059 qpair->ctrlr->opts.io_queue_requests, 1060 rc); 1061 } 1062 } 1063 1064 int 1065 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1066 void *buffer, uint64_t lba, 1067 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1068 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1069 { 1070 return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts, 1071 SPDK_NVME_OPC_WRITE); 1072 } 1073 1074 int 1075 spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1076 uint64_t lba, uint32_t lba_count, 1077 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1078 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1079 spdk_nvme_req_next_sge_cb next_sge_fn) 1080 { 1081 struct nvme_request *req; 1082 struct nvme_payload payload; 1083 int rc = 0; 1084 1085 if (!_is_io_flags_valid(io_flags)) { 1086 return -EINVAL; 1087 } 1088 1089 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 1090 return -EINVAL; 1091 } 1092 1093 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 1094 1095 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1096 io_flags, 0, 0, 0, true, NULL, &rc); 1097 if (req != NULL) { 1098 return nvme_qpair_submit_request(qpair, req); 1099 } else { 1100 return nvme_ns_map_failure_rc(lba_count, 1101 ns->sectors_per_max_io, 1102 ns->sectors_per_stripe, 1103 qpair->ctrlr->opts.io_queue_requests, 1104 rc); 1105 } 1106 } 1107 1108 int 1109 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1110 uint64_t lba, uint32_t lba_count, 1111 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1112 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1113 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1114 uint16_t apptag_mask, uint16_t apptag) 1115 { 1116 struct nvme_request *req; 1117 struct nvme_payload payload; 1118 int rc = 0; 1119 1120 if (!_is_io_flags_valid(io_flags)) { 1121 return -EINVAL; 1122 } 1123 1124 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 1125 return -EINVAL; 1126 } 1127 1128 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 1129 1130 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1131 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 1132 if (req != NULL) { 1133 return nvme_qpair_submit_request(qpair, req); 1134 } else { 1135 return nvme_ns_map_failure_rc(lba_count, 1136 ns->sectors_per_max_io, 1137 ns->sectors_per_stripe, 1138 qpair->ctrlr->opts.io_queue_requests, 1139 rc); 1140 } 1141 } 1142 1143 int 1144 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba, 1145 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1146 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1147 spdk_nvme_req_next_sge_cb next_sge_fn, 1148 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1149 { 1150 return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn, 1151 opts, SPDK_NVME_OPC_WRITE); 1152 } 1153 1154 int 1155 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1156 uint64_t lba, uint32_t lba_count, 1157 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1158 uint32_t io_flags) 1159 { 1160 struct nvme_request *req; 1161 struct spdk_nvme_cmd *cmd; 1162 uint64_t *tmp_lba; 1163 1164 if (!_is_io_flags_valid(io_flags)) { 1165 return -EINVAL; 1166 } 1167 1168 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1169 return -EINVAL; 1170 } 1171 1172 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1173 if (req == NULL) { 1174 return -ENOMEM; 1175 } 1176 1177 cmd = &req->cmd; 1178 cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES; 1179 cmd->nsid = ns->id; 1180 1181 tmp_lba = (uint64_t *)&cmd->cdw10; 1182 *tmp_lba = lba; 1183 cmd->cdw12 = lba_count - 1; 1184 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 1185 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 1186 1187 return nvme_qpair_submit_request(qpair, req); 1188 } 1189 1190 int 1191 spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1192 uint64_t lba, uint32_t lba_count, 1193 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1194 uint32_t io_flags) 1195 { 1196 struct nvme_request *req; 1197 struct spdk_nvme_cmd *cmd; 1198 1199 if (!_is_io_flags_valid(io_flags)) { 1200 return -EINVAL; 1201 } 1202 1203 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1204 return -EINVAL; 1205 } 1206 1207 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1208 if (req == NULL) { 1209 return -ENOMEM; 1210 } 1211 1212 cmd = &req->cmd; 1213 cmd->opc = SPDK_NVME_OPC_VERIFY; 1214 cmd->nsid = ns->id; 1215 1216 *(uint64_t *)&cmd->cdw10 = lba; 1217 cmd->cdw12 = lba_count - 1; 1218 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 1219 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 1220 1221 return nvme_qpair_submit_request(qpair, req); 1222 } 1223 1224 int 1225 spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1226 uint64_t lba, uint32_t lba_count, 1227 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1228 { 1229 struct nvme_request *req; 1230 struct spdk_nvme_cmd *cmd; 1231 uint64_t *tmp_lba; 1232 1233 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1234 return -EINVAL; 1235 } 1236 1237 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1238 if (req == NULL) { 1239 return -ENOMEM; 1240 } 1241 1242 cmd = &req->cmd; 1243 cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE; 1244 cmd->nsid = ns->id; 1245 1246 tmp_lba = (uint64_t *)&cmd->cdw10; 1247 *tmp_lba = lba; 1248 cmd->cdw12 = lba_count - 1; 1249 1250 return nvme_qpair_submit_request(qpair, req); 1251 } 1252 1253 int 1254 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1255 uint32_t type, 1256 const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1257 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1258 { 1259 struct nvme_request *req; 1260 struct spdk_nvme_cmd *cmd; 1261 1262 if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) { 1263 return -EINVAL; 1264 } 1265 1266 if (ranges == NULL) { 1267 return -EINVAL; 1268 } 1269 1270 req = nvme_allocate_request_user_copy(qpair, (void *)ranges, 1271 num_ranges * sizeof(struct spdk_nvme_dsm_range), 1272 cb_fn, cb_arg, true); 1273 if (req == NULL) { 1274 return -ENOMEM; 1275 } 1276 1277 cmd = &req->cmd; 1278 cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1279 cmd->nsid = ns->id; 1280 1281 cmd->cdw10_bits.dsm.nr = num_ranges - 1; 1282 cmd->cdw11 = type; 1283 1284 return nvme_qpair_submit_request(qpair, req); 1285 } 1286 1287 int 1288 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1289 const struct spdk_nvme_scc_source_range *ranges, 1290 uint16_t num_ranges, uint64_t dest_lba, 1291 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1292 { 1293 struct nvme_request *req; 1294 struct spdk_nvme_cmd *cmd; 1295 1296 if (num_ranges == 0) { 1297 return -EINVAL; 1298 } 1299 1300 if (ranges == NULL) { 1301 return -EINVAL; 1302 } 1303 1304 req = nvme_allocate_request_user_copy(qpair, (void *)ranges, 1305 num_ranges * sizeof(struct spdk_nvme_scc_source_range), 1306 cb_fn, cb_arg, true); 1307 if (req == NULL) { 1308 return -ENOMEM; 1309 } 1310 1311 cmd = &req->cmd; 1312 cmd->opc = SPDK_NVME_OPC_COPY; 1313 cmd->nsid = ns->id; 1314 1315 *(uint64_t *)&cmd->cdw10 = dest_lba; 1316 cmd->cdw12 = num_ranges - 1; 1317 1318 return nvme_qpair_submit_request(qpair, req); 1319 } 1320 1321 int 1322 spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1323 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1324 { 1325 struct nvme_request *req; 1326 struct spdk_nvme_cmd *cmd; 1327 1328 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1329 if (req == NULL) { 1330 return -ENOMEM; 1331 } 1332 1333 cmd = &req->cmd; 1334 cmd->opc = SPDK_NVME_OPC_FLUSH; 1335 cmd->nsid = ns->id; 1336 1337 return nvme_qpair_submit_request(qpair, req); 1338 } 1339 1340 int 1341 spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns, 1342 struct spdk_nvme_qpair *qpair, 1343 struct spdk_nvme_reservation_register_data *payload, 1344 bool ignore_key, 1345 enum spdk_nvme_reservation_register_action action, 1346 enum spdk_nvme_reservation_register_cptpl cptpl, 1347 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1348 { 1349 struct nvme_request *req; 1350 struct spdk_nvme_cmd *cmd; 1351 1352 req = nvme_allocate_request_user_copy(qpair, 1353 payload, sizeof(struct spdk_nvme_reservation_register_data), 1354 cb_fn, cb_arg, true); 1355 if (req == NULL) { 1356 return -ENOMEM; 1357 } 1358 1359 cmd = &req->cmd; 1360 cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER; 1361 cmd->nsid = ns->id; 1362 1363 cmd->cdw10_bits.resv_register.rrega = action; 1364 cmd->cdw10_bits.resv_register.iekey = ignore_key; 1365 cmd->cdw10_bits.resv_register.cptpl = cptpl; 1366 1367 return nvme_qpair_submit_request(qpair, req); 1368 } 1369 1370 int 1371 spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns, 1372 struct spdk_nvme_qpair *qpair, 1373 struct spdk_nvme_reservation_key_data *payload, 1374 bool ignore_key, 1375 enum spdk_nvme_reservation_release_action action, 1376 enum spdk_nvme_reservation_type type, 1377 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1378 { 1379 struct nvme_request *req; 1380 struct spdk_nvme_cmd *cmd; 1381 1382 req = nvme_allocate_request_user_copy(qpair, 1383 payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn, 1384 cb_arg, true); 1385 if (req == NULL) { 1386 return -ENOMEM; 1387 } 1388 1389 cmd = &req->cmd; 1390 cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1391 cmd->nsid = ns->id; 1392 1393 cmd->cdw10_bits.resv_release.rrela = action; 1394 cmd->cdw10_bits.resv_release.iekey = ignore_key; 1395 cmd->cdw10_bits.resv_release.rtype = type; 1396 1397 return nvme_qpair_submit_request(qpair, req); 1398 } 1399 1400 int 1401 spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns, 1402 struct spdk_nvme_qpair *qpair, 1403 struct spdk_nvme_reservation_acquire_data *payload, 1404 bool ignore_key, 1405 enum spdk_nvme_reservation_acquire_action action, 1406 enum spdk_nvme_reservation_type type, 1407 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1408 { 1409 struct nvme_request *req; 1410 struct spdk_nvme_cmd *cmd; 1411 1412 req = nvme_allocate_request_user_copy(qpair, 1413 payload, sizeof(struct spdk_nvme_reservation_acquire_data), 1414 cb_fn, cb_arg, true); 1415 if (req == NULL) { 1416 return -ENOMEM; 1417 } 1418 1419 cmd = &req->cmd; 1420 cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE; 1421 cmd->nsid = ns->id; 1422 1423 cmd->cdw10_bits.resv_acquire.racqa = action; 1424 cmd->cdw10_bits.resv_acquire.iekey = ignore_key; 1425 cmd->cdw10_bits.resv_acquire.rtype = type; 1426 1427 return nvme_qpair_submit_request(qpair, req); 1428 } 1429 1430 int 1431 spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, 1432 struct spdk_nvme_qpair *qpair, 1433 void *payload, uint32_t len, 1434 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1435 { 1436 uint32_t num_dwords; 1437 struct nvme_request *req; 1438 struct spdk_nvme_cmd *cmd; 1439 1440 if (len & 0x3) { 1441 return -EINVAL; 1442 } 1443 1444 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1445 if (req == NULL) { 1446 return -ENOMEM; 1447 } 1448 1449 cmd = &req->cmd; 1450 cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT; 1451 cmd->nsid = ns->id; 1452 1453 num_dwords = (len >> 2); 1454 cmd->cdw10 = num_dwords - 1; /* 0-based */ 1455 1456 return nvme_qpair_submit_request(qpair, req); 1457 } 1458 1459 int 1460 spdk_nvme_ns_cmd_io_mgmt_recv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1461 void *payload, uint32_t len, uint8_t mo, uint16_t mos, 1462 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1463 { 1464 uint32_t num_dwords; 1465 struct nvme_request *req; 1466 struct spdk_nvme_cmd *cmd; 1467 1468 if (len & 0x3) { 1469 return -EINVAL; 1470 } 1471 1472 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1473 if (req == NULL) { 1474 return -ENOMEM; 1475 } 1476 1477 cmd = &req->cmd; 1478 cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE; 1479 cmd->nsid = ns->id; 1480 1481 cmd->cdw10_bits.mgmt_send_recv.mo = mo; 1482 cmd->cdw10_bits.mgmt_send_recv.mos = mos; 1483 1484 num_dwords = (len >> 2); 1485 cmd->cdw11 = num_dwords - 1; /* 0-based */ 1486 1487 return nvme_qpair_submit_request(qpair, req); 1488 } 1489 1490 int 1491 spdk_nvme_ns_cmd_io_mgmt_send(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1492 void *payload, uint32_t len, uint8_t mo, uint16_t mos, 1493 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1494 { 1495 struct nvme_request *req; 1496 struct spdk_nvme_cmd *cmd; 1497 1498 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1499 if (req == NULL) { 1500 return -ENOMEM; 1501 } 1502 1503 cmd = &req->cmd; 1504 cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_SEND; 1505 cmd->nsid = ns->id; 1506 1507 cmd->cdw10_bits.mgmt_send_recv.mo = mo; 1508 cmd->cdw10_bits.mgmt_send_recv.mos = mos; 1509 1510 return nvme_qpair_submit_request(qpair, req); 1511 } 1512