1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2015 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 5 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved. 7 */ 8 9 #include "nvme_internal.h" 10 11 static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, 12 struct spdk_nvme_qpair *qpair, 13 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset, 14 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, 15 void *cb_arg, uint32_t opc, uint32_t io_flags, 16 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, 17 void *accel_sequence, int *rc); 18 19 static bool 20 nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io, 21 uint32_t sectors_per_stripe, uint32_t qdepth) 22 { 23 uint32_t child_per_io = UINT32_MAX; 24 25 /* After a namespace is destroyed(e.g. hotplug), all the fields associated with the 26 * namespace will be cleared to zero, the function will return TRUE for this case, 27 * and -EINVAL will be returned to caller. 28 */ 29 if (sectors_per_stripe > 0) { 30 child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe; 31 } else if (sectors_per_max_io > 0) { 32 child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io; 33 } 34 35 SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io); 36 37 return child_per_io >= qdepth; 38 } 39 40 static inline int 41 nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io, 42 uint32_t sectors_per_stripe, uint32_t qdepth, int rc) 43 { 44 assert(rc); 45 if (rc == -ENOMEM && 46 nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) { 47 return -EINVAL; 48 } 49 return rc; 50 } 51 52 static inline bool 53 _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags) 54 { 55 return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) && 56 (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) && 57 (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) && 58 (ns->md_size == 8); 59 } 60 61 static inline uint32_t 62 _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags) 63 { 64 return _nvme_md_excluded_from_xfer(ns, io_flags) ? 65 ns->sector_size : ns->extended_lba_size; 66 } 67 68 static inline uint32_t 69 _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags) 70 { 71 return _nvme_md_excluded_from_xfer(ns, io_flags) ? 72 ns->sectors_per_max_io_no_md : ns->sectors_per_max_io; 73 } 74 75 static struct nvme_request * 76 _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 77 const struct nvme_payload *payload, 78 uint32_t payload_offset, uint32_t md_offset, 79 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 80 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 81 struct nvme_request *parent, bool check_sgl, int *rc) 82 { 83 struct nvme_request *child; 84 85 child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn, 86 cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, NULL, rc); 87 if (child == NULL) { 88 nvme_request_free_children(parent); 89 nvme_free_request(parent); 90 return NULL; 91 } 92 93 nvme_request_add_child(parent, child); 94 return child; 95 } 96 97 static struct nvme_request * 98 _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns, 99 struct spdk_nvme_qpair *qpair, 100 const struct nvme_payload *payload, 101 uint32_t payload_offset, uint32_t md_offset, 102 uint64_t lba, uint32_t lba_count, 103 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 104 uint32_t io_flags, struct nvme_request *req, 105 uint32_t sectors_per_max_io, uint32_t sector_mask, 106 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 107 void *accel_sequence, int *rc) 108 { 109 uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 110 uint32_t remaining_lba_count = lba_count; 111 struct nvme_request *child; 112 113 if (spdk_unlikely(accel_sequence != NULL)) { 114 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 115 *rc = -EINVAL; 116 return NULL; 117 } 118 119 while (remaining_lba_count > 0) { 120 lba_count = sectors_per_max_io - (lba & sector_mask); 121 lba_count = spdk_min(remaining_lba_count, lba_count); 122 123 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 124 lba, lba_count, cb_fn, cb_arg, opc, 125 io_flags, apptag_mask, apptag, cdw13, req, true, rc); 126 if (child == NULL) { 127 return NULL; 128 } 129 130 remaining_lba_count -= lba_count; 131 lba += lba_count; 132 payload_offset += lba_count * sector_size; 133 md_offset += lba_count * ns->md_size; 134 } 135 136 return req; 137 } 138 139 static inline bool 140 _is_io_flags_valid(uint32_t io_flags) 141 { 142 if (spdk_unlikely(io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK)) { 143 /* Invalid io_flags */ 144 SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags); 145 return false; 146 } 147 148 return true; 149 } 150 151 static inline bool 152 _is_accel_sequence_valid(struct spdk_nvme_qpair *qpair, void *seq) 153 { 154 /* An accel sequence can only be executed if the controller supports accel and a qpair is 155 * part of a of a poll group */ 156 if (spdk_likely(seq == NULL || ((qpair->ctrlr->flags & SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED) && 157 qpair->poll_group != NULL))) { 158 return true; 159 } 160 161 return false; 162 } 163 164 static void 165 _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req, 166 uint32_t opc, uint64_t lba, uint32_t lba_count, 167 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, 168 uint32_t cdw13) 169 { 170 struct spdk_nvme_cmd *cmd; 171 172 assert(_is_io_flags_valid(io_flags)); 173 174 cmd = &req->cmd; 175 cmd->opc = opc; 176 cmd->nsid = ns->id; 177 178 *(uint64_t *)&cmd->cdw10 = lba; 179 180 if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) { 181 switch (ns->pi_type) { 182 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1: 183 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2: 184 cmd->cdw14 = (uint32_t)lba; 185 break; 186 } 187 } 188 189 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 190 191 cmd->cdw12 = lba_count - 1; 192 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 193 194 cmd->cdw13 = cdw13; 195 196 cmd->cdw15 = apptag_mask; 197 cmd->cdw15 = (cmd->cdw15 << 16 | apptag); 198 } 199 200 static struct nvme_request * 201 _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns, 202 struct spdk_nvme_qpair *qpair, 203 const struct nvme_payload *payload, 204 uint32_t payload_offset, uint32_t md_offset, 205 uint64_t lba, uint32_t lba_count, 206 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 207 uint32_t io_flags, struct nvme_request *req, 208 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 209 void *accel_sequence, int *rc) 210 { 211 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn; 212 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn; 213 void *sgl_cb_arg = req->payload.contig_or_cb_arg; 214 bool start_valid, end_valid, last_sge, child_equals_parent; 215 uint64_t child_lba = lba; 216 uint32_t req_current_length = 0; 217 uint32_t child_length = 0; 218 uint32_t sge_length; 219 uint32_t page_size = qpair->ctrlr->page_size; 220 uintptr_t address; 221 222 reset_sgl_fn(sgl_cb_arg, payload_offset); 223 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 224 while (req_current_length < req->payload_size) { 225 226 if (sge_length == 0) { 227 continue; 228 } else if (req_current_length + sge_length > req->payload_size) { 229 sge_length = req->payload_size - req_current_length; 230 } 231 232 /* 233 * The start of the SGE is invalid if the start address is not page aligned, 234 * unless it is the first SGE in the child request. 235 */ 236 start_valid = child_length == 0 || _is_page_aligned(address, page_size); 237 238 /* Boolean for whether this is the last SGE in the parent request. */ 239 last_sge = (req_current_length + sge_length == req->payload_size); 240 241 /* 242 * The end of the SGE is invalid if the end address is not page aligned, 243 * unless it is the last SGE in the parent request. 244 */ 245 end_valid = last_sge || _is_page_aligned(address + sge_length, page_size); 246 247 /* 248 * This child request equals the parent request, meaning that no splitting 249 * was required for the parent request (the one passed into this function). 250 * In this case, we do not create a child request at all - we just send 251 * the original request as a single request at the end of this function. 252 */ 253 child_equals_parent = (child_length + sge_length == req->payload_size); 254 255 if (start_valid) { 256 /* 257 * The start of the SGE is valid, so advance the length parameters, 258 * to include this SGE with previous SGEs for this child request 259 * (if any). If it is not valid, we do not advance the length 260 * parameters nor get the next SGE, because we must send what has 261 * been collected before this SGE as a child request. 262 */ 263 child_length += sge_length; 264 req_current_length += sge_length; 265 if (req_current_length < req->payload_size) { 266 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 267 /* 268 * If the next SGE is not page aligned, we will need to create a 269 * child request for what we have so far, and then start a new 270 * child request for the next SGE. 271 */ 272 start_valid = _is_page_aligned(address, page_size); 273 } 274 } 275 276 if (start_valid && end_valid && !last_sge) { 277 continue; 278 } 279 280 /* 281 * We need to create a split here. Send what we have accumulated so far as a child 282 * request. Checking if child_equals_parent allows us to *not* create a child request 283 * when no splitting is required - in that case we will fall-through and just create 284 * a single request with no children for the entire I/O. 285 */ 286 if (!child_equals_parent) { 287 struct nvme_request *child; 288 uint32_t child_lba_count; 289 290 if ((child_length % ns->extended_lba_size) != 0) { 291 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n", 292 child_length, ns->extended_lba_size); 293 *rc = -EINVAL; 294 return NULL; 295 } 296 if (spdk_unlikely(accel_sequence != NULL)) { 297 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 298 *rc = -EINVAL; 299 return NULL; 300 } 301 302 child_lba_count = child_length / ns->extended_lba_size; 303 /* 304 * Note the last parameter is set to "false" - this tells the recursive 305 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting 306 * since we have already verified it here. 307 */ 308 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 309 child_lba, child_lba_count, 310 cb_fn, cb_arg, opc, io_flags, 311 apptag_mask, apptag, cdw13, req, false, rc); 312 if (child == NULL) { 313 return NULL; 314 } 315 payload_offset += child_length; 316 md_offset += child_lba_count * ns->md_size; 317 child_lba += child_lba_count; 318 child_length = 0; 319 } 320 } 321 322 if (child_length == req->payload_size) { 323 /* No splitting was required, so setup the whole payload as one request. */ 324 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 325 } 326 327 return req; 328 } 329 330 static struct nvme_request * 331 _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns, 332 struct spdk_nvme_qpair *qpair, 333 const struct nvme_payload *payload, 334 uint32_t payload_offset, uint32_t md_offset, 335 uint64_t lba, uint32_t lba_count, 336 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 337 uint32_t io_flags, struct nvme_request *req, 338 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 339 void *accel_sequence, int *rc) 340 { 341 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn; 342 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn; 343 void *sgl_cb_arg = req->payload.contig_or_cb_arg; 344 uint64_t child_lba = lba; 345 uint32_t req_current_length = 0; 346 uint32_t child_length = 0; 347 uint32_t sge_length; 348 uint16_t max_sges, num_sges; 349 uintptr_t address; 350 351 max_sges = ns->ctrlr->max_sges; 352 353 reset_sgl_fn(sgl_cb_arg, payload_offset); 354 num_sges = 0; 355 356 while (req_current_length < req->payload_size) { 357 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 358 359 if (req_current_length + sge_length > req->payload_size) { 360 sge_length = req->payload_size - req_current_length; 361 } 362 363 child_length += sge_length; 364 req_current_length += sge_length; 365 num_sges++; 366 367 if (num_sges < max_sges && req_current_length < req->payload_size) { 368 continue; 369 } 370 371 /* 372 * We need to create a split here. Send what we have accumulated so far as a child 373 * request. Checking if the child equals the full payload allows us to *not* 374 * create a child request when no splitting is required - in that case we will 375 * fall-through and just create a single request with no children for the entire I/O. 376 */ 377 if (child_length != req->payload_size) { 378 struct nvme_request *child; 379 uint32_t child_lba_count; 380 381 if ((child_length % ns->extended_lba_size) != 0) { 382 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n", 383 child_length, ns->extended_lba_size); 384 *rc = -EINVAL; 385 return NULL; 386 } 387 if (spdk_unlikely(accel_sequence != NULL)) { 388 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 389 *rc = -EINVAL; 390 return NULL; 391 } 392 393 child_lba_count = child_length / ns->extended_lba_size; 394 /* 395 * Note the last parameter is set to "false" - this tells the recursive 396 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting 397 * since we have already verified it here. 398 */ 399 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 400 child_lba, child_lba_count, 401 cb_fn, cb_arg, opc, io_flags, 402 apptag_mask, apptag, cdw13, req, false, rc); 403 if (child == NULL) { 404 return NULL; 405 } 406 payload_offset += child_length; 407 md_offset += child_lba_count * ns->md_size; 408 child_lba += child_lba_count; 409 child_length = 0; 410 num_sges = 0; 411 } 412 } 413 414 if (child_length == req->payload_size) { 415 /* No splitting was required, so setup the whole payload as one request. */ 416 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 417 } 418 419 return req; 420 } 421 422 static inline struct nvme_request * 423 _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 424 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset, 425 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 426 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, 427 void *accel_sequence, int *rc) 428 { 429 struct nvme_request *req; 430 uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 431 uint32_t sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags); 432 uint32_t sectors_per_stripe = ns->sectors_per_stripe; 433 434 assert(rc != NULL); 435 assert(*rc == 0); 436 437 req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size, 438 cb_fn, cb_arg); 439 if (req == NULL) { 440 *rc = -ENOMEM; 441 return NULL; 442 } 443 444 req->payload_offset = payload_offset; 445 req->md_offset = md_offset; 446 req->accel_sequence = accel_sequence; 447 448 /* Zone append commands cannot be split. */ 449 if (opc == SPDK_NVME_OPC_ZONE_APPEND) { 450 assert(ns->csi == SPDK_NVME_CSI_ZNS); 451 /* 452 * As long as we disable driver-assisted striping for Zone append commands, 453 * _nvme_ns_cmd_rw() should never cause a proper request to be split. 454 * If a request is split, after all, error handling is done in caller functions. 455 */ 456 sectors_per_stripe = 0; 457 } 458 459 /* 460 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping. 461 * If this controller defines a stripe boundary and this I/O spans a stripe 462 * boundary, split the request into multiple requests and submit each 463 * separately to hardware. 464 */ 465 if (sectors_per_stripe > 0 && 466 (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) { 467 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, 468 cb_fn, 469 cb_arg, opc, 470 io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, 471 apptag_mask, apptag, cdw13, accel_sequence, rc); 472 } else if (lba_count > sectors_per_max_io) { 473 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, 474 cb_fn, 475 cb_arg, opc, 476 io_flags, req, sectors_per_max_io, 0, apptag_mask, 477 apptag, cdw13, accel_sequence, rc); 478 } else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) { 479 if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) { 480 return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset, 481 lba, lba_count, cb_fn, cb_arg, opc, io_flags, 482 req, apptag_mask, apptag, cdw13, 483 accel_sequence, rc); 484 } else { 485 return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset, 486 lba, lba_count, cb_fn, cb_arg, opc, io_flags, 487 req, apptag_mask, apptag, cdw13, 488 accel_sequence, rc); 489 } 490 } 491 492 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 493 return req; 494 } 495 496 int 497 spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 498 uint64_t lba, 499 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 500 uint32_t io_flags) 501 { 502 struct nvme_request *req; 503 struct nvme_payload payload; 504 int rc = 0; 505 506 if (!_is_io_flags_valid(io_flags)) { 507 return -EINVAL; 508 } 509 510 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 511 512 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 513 SPDK_NVME_OPC_COMPARE, 514 io_flags, 0, 515 0, 0, false, NULL, &rc); 516 if (req != NULL) { 517 return nvme_qpair_submit_request(qpair, req); 518 } else { 519 return nvme_ns_map_failure_rc(lba_count, 520 ns->sectors_per_max_io, 521 ns->sectors_per_stripe, 522 qpair->ctrlr->opts.io_queue_requests, 523 rc); 524 } 525 } 526 527 int 528 spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 529 void *buffer, 530 void *metadata, 531 uint64_t lba, 532 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 533 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 534 { 535 struct nvme_request *req; 536 struct nvme_payload payload; 537 int rc = 0; 538 539 if (!_is_io_flags_valid(io_flags)) { 540 return -EINVAL; 541 } 542 543 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 544 545 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 546 SPDK_NVME_OPC_COMPARE, 547 io_flags, 548 apptag_mask, apptag, 0, false, NULL, &rc); 549 if (req != NULL) { 550 return nvme_qpair_submit_request(qpair, req); 551 } else { 552 return nvme_ns_map_failure_rc(lba_count, 553 ns->sectors_per_max_io, 554 ns->sectors_per_stripe, 555 qpair->ctrlr->opts.io_queue_requests, 556 rc); 557 } 558 } 559 560 int 561 spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 562 uint64_t lba, uint32_t lba_count, 563 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 564 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 565 spdk_nvme_req_next_sge_cb next_sge_fn) 566 { 567 struct nvme_request *req; 568 struct nvme_payload payload; 569 int rc = 0; 570 571 if (!_is_io_flags_valid(io_flags)) { 572 return -EINVAL; 573 } 574 575 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 576 return -EINVAL; 577 } 578 579 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 580 581 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 582 SPDK_NVME_OPC_COMPARE, 583 io_flags, 0, 0, 0, true, NULL, &rc); 584 if (req != NULL) { 585 return nvme_qpair_submit_request(qpair, req); 586 } else { 587 return nvme_ns_map_failure_rc(lba_count, 588 ns->sectors_per_max_io, 589 ns->sectors_per_stripe, 590 qpair->ctrlr->opts.io_queue_requests, 591 rc); 592 } 593 } 594 595 int 596 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 597 uint64_t lba, uint32_t lba_count, 598 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 599 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 600 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 601 uint16_t apptag_mask, uint16_t apptag) 602 { 603 struct nvme_request *req; 604 struct nvme_payload payload; 605 int rc = 0; 606 607 if (!_is_io_flags_valid(io_flags)) { 608 return -EINVAL; 609 } 610 611 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 612 return -EINVAL; 613 } 614 615 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 616 617 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 618 SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true, 619 NULL, &rc); 620 if (req != NULL) { 621 return nvme_qpair_submit_request(qpair, req); 622 } else { 623 return nvme_ns_map_failure_rc(lba_count, 624 ns->sectors_per_max_io, 625 ns->sectors_per_stripe, 626 qpair->ctrlr->opts.io_queue_requests, 627 rc); 628 } 629 } 630 631 int 632 spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 633 uint64_t lba, 634 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 635 uint32_t io_flags) 636 { 637 struct nvme_request *req; 638 struct nvme_payload payload; 639 int rc = 0; 640 641 if (!_is_io_flags_valid(io_flags)) { 642 return -EINVAL; 643 } 644 645 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 646 647 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 648 io_flags, 0, 649 0, 0, false, NULL, &rc); 650 if (req != NULL) { 651 return nvme_qpair_submit_request(qpair, req); 652 } else { 653 return nvme_ns_map_failure_rc(lba_count, 654 ns->sectors_per_max_io, 655 ns->sectors_per_stripe, 656 qpair->ctrlr->opts.io_queue_requests, 657 rc); 658 } 659 } 660 661 int 662 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 663 void *metadata, 664 uint64_t lba, 665 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 666 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 667 { 668 struct nvme_request *req; 669 struct nvme_payload payload; 670 int rc = 0; 671 672 if (!_is_io_flags_valid(io_flags)) { 673 return -EINVAL; 674 } 675 676 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 677 678 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 679 io_flags, 680 apptag_mask, apptag, 0, false, NULL, &rc); 681 if (req != NULL) { 682 return nvme_qpair_submit_request(qpair, req); 683 } else { 684 return nvme_ns_map_failure_rc(lba_count, 685 ns->sectors_per_max_io, 686 ns->sectors_per_stripe, 687 qpair->ctrlr->opts.io_queue_requests, 688 rc); 689 } 690 } 691 692 static int 693 nvme_ns_cmd_rw_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 694 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 695 struct spdk_nvme_ns_cmd_ext_io_opts *opts, enum spdk_nvme_nvm_opcode opc) 696 { 697 struct nvme_request *req; 698 struct nvme_payload payload; 699 void *seq; 700 int rc = 0; 701 702 assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE); 703 assert(opts); 704 705 payload = NVME_PAYLOAD_CONTIG(buffer, opts->metadata); 706 707 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) { 708 return -EINVAL; 709 } 710 711 seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL); 712 if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) { 713 return -EINVAL; 714 } 715 716 payload.opts = opts; 717 718 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags, 719 opts->apptag_mask, opts->apptag, 0, false, seq, &rc); 720 if (spdk_unlikely(req == NULL)) { 721 return nvme_ns_map_failure_rc(lba_count, 722 ns->sectors_per_max_io, 723 ns->sectors_per_stripe, 724 qpair->ctrlr->opts.io_queue_requests, 725 rc); 726 } 727 728 return nvme_qpair_submit_request(qpair, req); 729 } 730 731 int 732 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 733 uint64_t lba, 734 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 735 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 736 { 737 return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts, 738 SPDK_NVME_OPC_READ); 739 } 740 741 int 742 spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 743 uint64_t lba, uint32_t lba_count, 744 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 745 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 746 spdk_nvme_req_next_sge_cb next_sge_fn) 747 { 748 struct nvme_request *req; 749 struct nvme_payload payload; 750 int rc = 0; 751 752 if (!_is_io_flags_valid(io_flags)) { 753 return -EINVAL; 754 } 755 756 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 757 return -EINVAL; 758 } 759 760 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 761 762 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 763 io_flags, 0, 0, 0, true, NULL, &rc); 764 if (req != NULL) { 765 return nvme_qpair_submit_request(qpair, req); 766 } else { 767 return nvme_ns_map_failure_rc(lba_count, 768 ns->sectors_per_max_io, 769 ns->sectors_per_stripe, 770 qpair->ctrlr->opts.io_queue_requests, 771 rc); 772 } 773 } 774 775 int 776 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 777 uint64_t lba, uint32_t lba_count, 778 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 779 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 780 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 781 uint16_t apptag_mask, uint16_t apptag) 782 { 783 struct nvme_request *req; 784 struct nvme_payload payload; 785 int rc = 0; 786 787 if (!_is_io_flags_valid(io_flags)) { 788 return -EINVAL; 789 } 790 791 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 792 return -EINVAL; 793 } 794 795 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 796 797 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 798 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 799 if (req != NULL) { 800 return nvme_qpair_submit_request(qpair, req); 801 } else { 802 return nvme_ns_map_failure_rc(lba_count, 803 ns->sectors_per_max_io, 804 ns->sectors_per_stripe, 805 qpair->ctrlr->opts.io_queue_requests, 806 rc); 807 } 808 } 809 810 static int 811 nvme_ns_cmd_rwv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba, 812 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 813 spdk_nvme_req_next_sge_cb next_sge_fn, struct spdk_nvme_ns_cmd_ext_io_opts *opts, 814 enum spdk_nvme_nvm_opcode opc) 815 { 816 struct nvme_request *req; 817 struct nvme_payload payload; 818 void *seq; 819 int rc = 0; 820 821 assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE); 822 823 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 824 return -EINVAL; 825 } 826 827 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 828 829 if (opts) { 830 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) { 831 return -EINVAL; 832 } 833 834 seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL); 835 if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) { 836 return -EINVAL; 837 } 838 839 payload.opts = opts; 840 payload.md = opts->metadata; 841 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags, 842 opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc); 843 844 } else { 845 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, 0, 0, 0, 0, 846 true, NULL, &rc); 847 } 848 849 if (req == NULL) { 850 return nvme_ns_map_failure_rc(lba_count, 851 ns->sectors_per_max_io, 852 ns->sectors_per_stripe, 853 qpair->ctrlr->opts.io_queue_requests, 854 rc); 855 } 856 857 return nvme_qpair_submit_request(qpair, req); 858 } 859 860 int 861 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 862 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, 863 void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 864 spdk_nvme_req_next_sge_cb next_sge_fn, 865 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 866 { 867 return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn, 868 opts, SPDK_NVME_OPC_READ); 869 } 870 871 int 872 spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 873 void *buffer, uint64_t lba, 874 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 875 uint32_t io_flags) 876 { 877 struct nvme_request *req; 878 struct nvme_payload payload; 879 int rc = 0; 880 881 if (!_is_io_flags_valid(io_flags)) { 882 return -EINVAL; 883 } 884 885 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 886 887 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 888 io_flags, 0, 0, 0, false, NULL, &rc); 889 if (req != NULL) { 890 return nvme_qpair_submit_request(qpair, req); 891 } else { 892 return nvme_ns_map_failure_rc(lba_count, 893 ns->sectors_per_max_io, 894 ns->sectors_per_stripe, 895 qpair->ctrlr->opts.io_queue_requests, 896 rc); 897 } 898 } 899 900 static int 901 nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags) 902 { 903 uint32_t sector_size; 904 905 /* Not all NVMe Zoned Namespaces support the zone append command. */ 906 if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) { 907 return -EINVAL; 908 } 909 910 sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 911 912 /* Fail a too large zone append command early. */ 913 if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) { 914 return -EINVAL; 915 } 916 917 return 0; 918 } 919 920 int 921 nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 922 void *buffer, void *metadata, uint64_t zslba, 923 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 924 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 925 { 926 struct nvme_request *req; 927 struct nvme_payload payload; 928 int rc = 0; 929 930 if (!_is_io_flags_valid(io_flags)) { 931 return -EINVAL; 932 } 933 934 rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags); 935 if (rc) { 936 return rc; 937 } 938 939 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 940 941 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg, 942 SPDK_NVME_OPC_ZONE_APPEND, 943 io_flags, apptag_mask, apptag, 0, false, NULL, &rc); 944 if (req != NULL) { 945 /* 946 * Zone append commands cannot be split (num_children has to be 0). 947 * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split 948 * to happen, since a too large request would have already been failed by 949 * nvme_ns_cmd_check_zone_append(), since zasl <= mdts. 950 */ 951 assert(req->num_children == 0); 952 if (req->num_children) { 953 nvme_request_free_children(req); 954 nvme_free_request(req); 955 return -EINVAL; 956 } 957 return nvme_qpair_submit_request(qpair, req); 958 } else { 959 return nvme_ns_map_failure_rc(lba_count, 960 ns->sectors_per_max_io, 961 ns->sectors_per_stripe, 962 qpair->ctrlr->opts.io_queue_requests, 963 rc); 964 } 965 } 966 967 int 968 nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 969 uint64_t zslba, uint32_t lba_count, 970 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 971 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 972 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 973 uint16_t apptag_mask, uint16_t apptag) 974 { 975 struct nvme_request *req; 976 struct nvme_payload payload; 977 int rc = 0; 978 979 if (!_is_io_flags_valid(io_flags)) { 980 return -EINVAL; 981 } 982 983 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 984 return -EINVAL; 985 } 986 987 rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags); 988 if (rc) { 989 return rc; 990 } 991 992 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 993 994 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg, 995 SPDK_NVME_OPC_ZONE_APPEND, 996 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 997 if (req != NULL) { 998 /* 999 * Zone append commands cannot be split (num_children has to be 0). 1000 * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split. 1001 * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp() 1002 * do not always cause a request to be split. These functions verify payload size, 1003 * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs). 1004 * If any of the verifications fail, they will split the request. 1005 * In our case, a split is very unlikely, since we already verified the size using 1006 * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions 1007 * in order to perform the verification part. If they do cause a split, we return 1008 * an error here. For proper requests, these functions will never cause a split. 1009 */ 1010 if (req->num_children) { 1011 nvme_request_free_children(req); 1012 nvme_free_request(req); 1013 return -EINVAL; 1014 } 1015 return nvme_qpair_submit_request(qpair, req); 1016 } else { 1017 return nvme_ns_map_failure_rc(lba_count, 1018 ns->sectors_per_max_io, 1019 ns->sectors_per_stripe, 1020 qpair->ctrlr->opts.io_queue_requests, 1021 rc); 1022 } 1023 } 1024 1025 int 1026 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1027 void *buffer, void *metadata, uint64_t lba, 1028 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1029 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1030 { 1031 struct nvme_request *req; 1032 struct nvme_payload payload; 1033 int rc = 0; 1034 1035 if (!_is_io_flags_valid(io_flags)) { 1036 return -EINVAL; 1037 } 1038 1039 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 1040 1041 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1042 io_flags, apptag_mask, apptag, 0, false, NULL, &rc); 1043 if (req != NULL) { 1044 return nvme_qpair_submit_request(qpair, req); 1045 } else { 1046 return nvme_ns_map_failure_rc(lba_count, 1047 ns->sectors_per_max_io, 1048 ns->sectors_per_stripe, 1049 qpair->ctrlr->opts.io_queue_requests, 1050 rc); 1051 } 1052 } 1053 1054 int 1055 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1056 void *buffer, uint64_t lba, 1057 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1058 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1059 { 1060 return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts, 1061 SPDK_NVME_OPC_WRITE); 1062 } 1063 1064 int 1065 spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1066 uint64_t lba, uint32_t lba_count, 1067 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1068 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1069 spdk_nvme_req_next_sge_cb next_sge_fn) 1070 { 1071 struct nvme_request *req; 1072 struct nvme_payload payload; 1073 int rc = 0; 1074 1075 if (!_is_io_flags_valid(io_flags)) { 1076 return -EINVAL; 1077 } 1078 1079 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 1080 return -EINVAL; 1081 } 1082 1083 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 1084 1085 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1086 io_flags, 0, 0, 0, true, NULL, &rc); 1087 if (req != NULL) { 1088 return nvme_qpair_submit_request(qpair, req); 1089 } else { 1090 return nvme_ns_map_failure_rc(lba_count, 1091 ns->sectors_per_max_io, 1092 ns->sectors_per_stripe, 1093 qpair->ctrlr->opts.io_queue_requests, 1094 rc); 1095 } 1096 } 1097 1098 int 1099 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1100 uint64_t lba, uint32_t lba_count, 1101 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1102 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1103 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1104 uint16_t apptag_mask, uint16_t apptag) 1105 { 1106 struct nvme_request *req; 1107 struct nvme_payload payload; 1108 int rc = 0; 1109 1110 if (!_is_io_flags_valid(io_flags)) { 1111 return -EINVAL; 1112 } 1113 1114 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 1115 return -EINVAL; 1116 } 1117 1118 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 1119 1120 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1121 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 1122 if (req != NULL) { 1123 return nvme_qpair_submit_request(qpair, req); 1124 } else { 1125 return nvme_ns_map_failure_rc(lba_count, 1126 ns->sectors_per_max_io, 1127 ns->sectors_per_stripe, 1128 qpair->ctrlr->opts.io_queue_requests, 1129 rc); 1130 } 1131 } 1132 1133 int 1134 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba, 1135 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1136 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1137 spdk_nvme_req_next_sge_cb next_sge_fn, 1138 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1139 { 1140 return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn, 1141 opts, SPDK_NVME_OPC_WRITE); 1142 } 1143 1144 int 1145 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1146 uint64_t lba, uint32_t lba_count, 1147 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1148 uint32_t io_flags) 1149 { 1150 struct nvme_request *req; 1151 struct spdk_nvme_cmd *cmd; 1152 uint64_t *tmp_lba; 1153 1154 if (!_is_io_flags_valid(io_flags)) { 1155 return -EINVAL; 1156 } 1157 1158 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1159 return -EINVAL; 1160 } 1161 1162 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1163 if (req == NULL) { 1164 return -ENOMEM; 1165 } 1166 1167 cmd = &req->cmd; 1168 cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES; 1169 cmd->nsid = ns->id; 1170 1171 tmp_lba = (uint64_t *)&cmd->cdw10; 1172 *tmp_lba = lba; 1173 cmd->cdw12 = lba_count - 1; 1174 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 1175 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 1176 1177 return nvme_qpair_submit_request(qpair, req); 1178 } 1179 1180 int 1181 spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1182 uint64_t lba, uint32_t lba_count, 1183 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1184 uint32_t io_flags) 1185 { 1186 struct nvme_request *req; 1187 struct spdk_nvme_cmd *cmd; 1188 1189 if (!_is_io_flags_valid(io_flags)) { 1190 return -EINVAL; 1191 } 1192 1193 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1194 return -EINVAL; 1195 } 1196 1197 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1198 if (req == NULL) { 1199 return -ENOMEM; 1200 } 1201 1202 cmd = &req->cmd; 1203 cmd->opc = SPDK_NVME_OPC_VERIFY; 1204 cmd->nsid = ns->id; 1205 1206 *(uint64_t *)&cmd->cdw10 = lba; 1207 cmd->cdw12 = lba_count - 1; 1208 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 1209 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 1210 1211 return nvme_qpair_submit_request(qpair, req); 1212 } 1213 1214 int 1215 spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1216 uint64_t lba, uint32_t lba_count, 1217 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1218 { 1219 struct nvme_request *req; 1220 struct spdk_nvme_cmd *cmd; 1221 uint64_t *tmp_lba; 1222 1223 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1224 return -EINVAL; 1225 } 1226 1227 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1228 if (req == NULL) { 1229 return -ENOMEM; 1230 } 1231 1232 cmd = &req->cmd; 1233 cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE; 1234 cmd->nsid = ns->id; 1235 1236 tmp_lba = (uint64_t *)&cmd->cdw10; 1237 *tmp_lba = lba; 1238 cmd->cdw12 = lba_count - 1; 1239 1240 return nvme_qpair_submit_request(qpair, req); 1241 } 1242 1243 int 1244 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1245 uint32_t type, 1246 const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1247 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1248 { 1249 struct nvme_request *req; 1250 struct spdk_nvme_cmd *cmd; 1251 1252 if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) { 1253 return -EINVAL; 1254 } 1255 1256 if (ranges == NULL) { 1257 return -EINVAL; 1258 } 1259 1260 req = nvme_allocate_request_user_copy(qpair, (void *)ranges, 1261 num_ranges * sizeof(struct spdk_nvme_dsm_range), 1262 cb_fn, cb_arg, true); 1263 if (req == NULL) { 1264 return -ENOMEM; 1265 } 1266 1267 cmd = &req->cmd; 1268 cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1269 cmd->nsid = ns->id; 1270 1271 cmd->cdw10_bits.dsm.nr = num_ranges - 1; 1272 cmd->cdw11 = type; 1273 1274 return nvme_qpair_submit_request(qpair, req); 1275 } 1276 1277 int 1278 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1279 const struct spdk_nvme_scc_source_range *ranges, 1280 uint16_t num_ranges, uint64_t dest_lba, 1281 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1282 { 1283 struct nvme_request *req; 1284 struct spdk_nvme_cmd *cmd; 1285 1286 if (num_ranges == 0) { 1287 return -EINVAL; 1288 } 1289 1290 if (ranges == NULL) { 1291 return -EINVAL; 1292 } 1293 1294 req = nvme_allocate_request_user_copy(qpair, (void *)ranges, 1295 num_ranges * sizeof(struct spdk_nvme_scc_source_range), 1296 cb_fn, cb_arg, true); 1297 if (req == NULL) { 1298 return -ENOMEM; 1299 } 1300 1301 cmd = &req->cmd; 1302 cmd->opc = SPDK_NVME_OPC_COPY; 1303 cmd->nsid = ns->id; 1304 1305 *(uint64_t *)&cmd->cdw10 = dest_lba; 1306 cmd->cdw12 = num_ranges - 1; 1307 1308 return nvme_qpair_submit_request(qpair, req); 1309 } 1310 1311 int 1312 spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1313 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1314 { 1315 struct nvme_request *req; 1316 struct spdk_nvme_cmd *cmd; 1317 1318 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1319 if (req == NULL) { 1320 return -ENOMEM; 1321 } 1322 1323 cmd = &req->cmd; 1324 cmd->opc = SPDK_NVME_OPC_FLUSH; 1325 cmd->nsid = ns->id; 1326 1327 return nvme_qpair_submit_request(qpair, req); 1328 } 1329 1330 int 1331 spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns, 1332 struct spdk_nvme_qpair *qpair, 1333 struct spdk_nvme_reservation_register_data *payload, 1334 bool ignore_key, 1335 enum spdk_nvme_reservation_register_action action, 1336 enum spdk_nvme_reservation_register_cptpl cptpl, 1337 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1338 { 1339 struct nvme_request *req; 1340 struct spdk_nvme_cmd *cmd; 1341 1342 req = nvme_allocate_request_user_copy(qpair, 1343 payload, sizeof(struct spdk_nvme_reservation_register_data), 1344 cb_fn, cb_arg, true); 1345 if (req == NULL) { 1346 return -ENOMEM; 1347 } 1348 1349 cmd = &req->cmd; 1350 cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER; 1351 cmd->nsid = ns->id; 1352 1353 cmd->cdw10_bits.resv_register.rrega = action; 1354 cmd->cdw10_bits.resv_register.iekey = ignore_key; 1355 cmd->cdw10_bits.resv_register.cptpl = cptpl; 1356 1357 return nvme_qpair_submit_request(qpair, req); 1358 } 1359 1360 int 1361 spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns, 1362 struct spdk_nvme_qpair *qpair, 1363 struct spdk_nvme_reservation_key_data *payload, 1364 bool ignore_key, 1365 enum spdk_nvme_reservation_release_action action, 1366 enum spdk_nvme_reservation_type type, 1367 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1368 { 1369 struct nvme_request *req; 1370 struct spdk_nvme_cmd *cmd; 1371 1372 req = nvme_allocate_request_user_copy(qpair, 1373 payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn, 1374 cb_arg, true); 1375 if (req == NULL) { 1376 return -ENOMEM; 1377 } 1378 1379 cmd = &req->cmd; 1380 cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1381 cmd->nsid = ns->id; 1382 1383 cmd->cdw10_bits.resv_release.rrela = action; 1384 cmd->cdw10_bits.resv_release.iekey = ignore_key; 1385 cmd->cdw10_bits.resv_release.rtype = type; 1386 1387 return nvme_qpair_submit_request(qpair, req); 1388 } 1389 1390 int 1391 spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns, 1392 struct spdk_nvme_qpair *qpair, 1393 struct spdk_nvme_reservation_acquire_data *payload, 1394 bool ignore_key, 1395 enum spdk_nvme_reservation_acquire_action action, 1396 enum spdk_nvme_reservation_type type, 1397 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1398 { 1399 struct nvme_request *req; 1400 struct spdk_nvme_cmd *cmd; 1401 1402 req = nvme_allocate_request_user_copy(qpair, 1403 payload, sizeof(struct spdk_nvme_reservation_acquire_data), 1404 cb_fn, cb_arg, true); 1405 if (req == NULL) { 1406 return -ENOMEM; 1407 } 1408 1409 cmd = &req->cmd; 1410 cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE; 1411 cmd->nsid = ns->id; 1412 1413 cmd->cdw10_bits.resv_acquire.racqa = action; 1414 cmd->cdw10_bits.resv_acquire.iekey = ignore_key; 1415 cmd->cdw10_bits.resv_acquire.rtype = type; 1416 1417 return nvme_qpair_submit_request(qpair, req); 1418 } 1419 1420 int 1421 spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, 1422 struct spdk_nvme_qpair *qpair, 1423 void *payload, uint32_t len, 1424 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1425 { 1426 uint32_t num_dwords; 1427 struct nvme_request *req; 1428 struct spdk_nvme_cmd *cmd; 1429 1430 if (len & 0x3) { 1431 return -EINVAL; 1432 } 1433 1434 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1435 if (req == NULL) { 1436 return -ENOMEM; 1437 } 1438 1439 cmd = &req->cmd; 1440 cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT; 1441 cmd->nsid = ns->id; 1442 1443 num_dwords = (len >> 2); 1444 cmd->cdw10 = num_dwords - 1; /* 0-based */ 1445 1446 return nvme_qpair_submit_request(qpair, req); 1447 } 1448 1449 int 1450 spdk_nvme_ns_cmd_io_mgmt_recv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1451 void *payload, uint32_t len, uint8_t mo, uint16_t mos, 1452 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1453 { 1454 uint32_t num_dwords; 1455 struct nvme_request *req; 1456 struct spdk_nvme_cmd *cmd; 1457 1458 if (len & 0x3) { 1459 return -EINVAL; 1460 } 1461 1462 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1463 if (req == NULL) { 1464 return -ENOMEM; 1465 } 1466 1467 cmd = &req->cmd; 1468 cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE; 1469 cmd->nsid = ns->id; 1470 1471 cmd->cdw10_bits.mgmt_send_recv.mo = mo; 1472 cmd->cdw10_bits.mgmt_send_recv.mos = mos; 1473 1474 num_dwords = (len >> 2); 1475 cmd->cdw11 = num_dwords - 1; /* 0-based */ 1476 1477 return nvme_qpair_submit_request(qpair, req); 1478 } 1479 1480 int 1481 spdk_nvme_ns_cmd_io_mgmt_send(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1482 void *payload, uint32_t len, uint8_t mo, uint16_t mos, 1483 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1484 { 1485 struct nvme_request *req; 1486 struct spdk_nvme_cmd *cmd; 1487 1488 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1489 if (req == NULL) { 1490 return -ENOMEM; 1491 } 1492 1493 cmd = &req->cmd; 1494 cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_SEND; 1495 cmd->nsid = ns->id; 1496 1497 cmd->cdw10_bits.mgmt_send_recv.mo = mo; 1498 cmd->cdw10_bits.mgmt_send_recv.mos = mos; 1499 1500 return nvme_qpair_submit_request(qpair, req); 1501 } 1502