1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2015 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 5 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved. 7 */ 8 9 #include "nvme_internal.h" 10 11 static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, 12 struct spdk_nvme_qpair *qpair, 13 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset, 14 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, 15 void *cb_arg, uint32_t opc, uint32_t io_flags, 16 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, 17 void *accel_sequence, int *rc); 18 19 static bool 20 nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io, 21 uint32_t sectors_per_stripe, uint32_t qdepth) 22 { 23 uint32_t child_per_io = UINT32_MAX; 24 25 /* After a namespace is destroyed(e.g. hotplug), all the fields associated with the 26 * namespace will be cleared to zero, the function will return TRUE for this case, 27 * and -EINVAL will be returned to caller. 28 */ 29 if (sectors_per_stripe > 0) { 30 child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe; 31 } else if (sectors_per_max_io > 0) { 32 child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io; 33 } 34 35 SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io); 36 37 return child_per_io >= qdepth; 38 } 39 40 static inline int 41 nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io, 42 uint32_t sectors_per_stripe, uint32_t qdepth, int rc) 43 { 44 assert(rc); 45 if (rc == -ENOMEM && 46 nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) { 47 return -EINVAL; 48 } 49 return rc; 50 } 51 52 static inline bool 53 _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags) 54 { 55 return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) && 56 (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) && 57 (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) && 58 (ns->md_size == 8); 59 } 60 61 static inline uint32_t 62 _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags) 63 { 64 return _nvme_md_excluded_from_xfer(ns, io_flags) ? 65 ns->sector_size : ns->extended_lba_size; 66 } 67 68 static inline uint32_t 69 _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags) 70 { 71 return _nvme_md_excluded_from_xfer(ns, io_flags) ? 72 ns->sectors_per_max_io_no_md : ns->sectors_per_max_io; 73 } 74 75 static struct nvme_request * 76 _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 77 const struct nvme_payload *payload, 78 uint32_t payload_offset, uint32_t md_offset, 79 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 80 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 81 struct nvme_request *parent, bool check_sgl, int *rc) 82 { 83 struct nvme_request *child; 84 85 child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn, 86 cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, NULL, rc); 87 if (child == NULL) { 88 nvme_request_free_children(parent); 89 nvme_free_request(parent); 90 return NULL; 91 } 92 93 nvme_request_add_child(parent, child); 94 return child; 95 } 96 97 static struct nvme_request * 98 _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns, 99 struct spdk_nvme_qpair *qpair, 100 const struct nvme_payload *payload, 101 uint32_t payload_offset, uint32_t md_offset, 102 uint64_t lba, uint32_t lba_count, 103 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 104 uint32_t io_flags, struct nvme_request *req, 105 uint32_t sectors_per_max_io, uint32_t sector_mask, 106 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 107 void *accel_sequence, int *rc) 108 { 109 uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 110 uint32_t remaining_lba_count = lba_count; 111 struct nvme_request *child; 112 113 if (spdk_unlikely(accel_sequence != NULL)) { 114 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 115 *rc = -EINVAL; 116 return NULL; 117 } 118 119 while (remaining_lba_count > 0) { 120 lba_count = sectors_per_max_io - (lba & sector_mask); 121 lba_count = spdk_min(remaining_lba_count, lba_count); 122 123 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 124 lba, lba_count, cb_fn, cb_arg, opc, 125 io_flags, apptag_mask, apptag, cdw13, req, true, rc); 126 if (child == NULL) { 127 return NULL; 128 } 129 130 remaining_lba_count -= lba_count; 131 lba += lba_count; 132 payload_offset += lba_count * sector_size; 133 md_offset += lba_count * ns->md_size; 134 } 135 136 return req; 137 } 138 139 static inline bool 140 _is_io_flags_valid(uint32_t io_flags) 141 { 142 if (io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK) { 143 /* Invalid io_flags */ 144 SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags); 145 return false; 146 } 147 148 return true; 149 } 150 151 static inline bool 152 _is_accel_sequence_valid(struct spdk_nvme_qpair *qpair, void *seq) 153 { 154 /* An accel sequence can only be executed if the controller supports accel and a qpair is 155 * part of a of a poll group */ 156 return seq == NULL || ((qpair->ctrlr->flags & SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED) && 157 qpair->poll_group != NULL); 158 } 159 160 static void 161 _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req, 162 uint32_t opc, uint64_t lba, uint32_t lba_count, 163 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, 164 uint32_t cdw13) 165 { 166 struct spdk_nvme_cmd *cmd; 167 168 assert(_is_io_flags_valid(io_flags)); 169 170 cmd = &req->cmd; 171 cmd->opc = opc; 172 cmd->nsid = ns->id; 173 174 *(uint64_t *)&cmd->cdw10 = lba; 175 176 if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) { 177 switch (ns->pi_type) { 178 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1: 179 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2: 180 cmd->cdw14 = (uint32_t)lba; 181 break; 182 } 183 } 184 185 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 186 187 cmd->cdw12 = lba_count - 1; 188 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 189 190 cmd->cdw13 = cdw13; 191 192 cmd->cdw15 = apptag_mask; 193 cmd->cdw15 = (cmd->cdw15 << 16 | apptag); 194 } 195 196 static struct nvme_request * 197 _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns, 198 struct spdk_nvme_qpair *qpair, 199 const struct nvme_payload *payload, 200 uint32_t payload_offset, uint32_t md_offset, 201 uint64_t lba, uint32_t lba_count, 202 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 203 uint32_t io_flags, struct nvme_request *req, 204 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 205 void *accel_sequence, int *rc) 206 { 207 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn; 208 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn; 209 void *sgl_cb_arg = req->payload.contig_or_cb_arg; 210 bool start_valid, end_valid, last_sge, child_equals_parent; 211 uint64_t child_lba = lba; 212 uint32_t req_current_length = 0; 213 uint32_t child_length = 0; 214 uint32_t sge_length; 215 uint32_t page_size = qpair->ctrlr->page_size; 216 uintptr_t address; 217 218 reset_sgl_fn(sgl_cb_arg, payload_offset); 219 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 220 while (req_current_length < req->payload_size) { 221 222 if (sge_length == 0) { 223 continue; 224 } else if (req_current_length + sge_length > req->payload_size) { 225 sge_length = req->payload_size - req_current_length; 226 } 227 228 /* 229 * The start of the SGE is invalid if the start address is not page aligned, 230 * unless it is the first SGE in the child request. 231 */ 232 start_valid = child_length == 0 || _is_page_aligned(address, page_size); 233 234 /* Boolean for whether this is the last SGE in the parent request. */ 235 last_sge = (req_current_length + sge_length == req->payload_size); 236 237 /* 238 * The end of the SGE is invalid if the end address is not page aligned, 239 * unless it is the last SGE in the parent request. 240 */ 241 end_valid = last_sge || _is_page_aligned(address + sge_length, page_size); 242 243 /* 244 * This child request equals the parent request, meaning that no splitting 245 * was required for the parent request (the one passed into this function). 246 * In this case, we do not create a child request at all - we just send 247 * the original request as a single request at the end of this function. 248 */ 249 child_equals_parent = (child_length + sge_length == req->payload_size); 250 251 if (start_valid) { 252 /* 253 * The start of the SGE is valid, so advance the length parameters, 254 * to include this SGE with previous SGEs for this child request 255 * (if any). If it is not valid, we do not advance the length 256 * parameters nor get the next SGE, because we must send what has 257 * been collected before this SGE as a child request. 258 */ 259 child_length += sge_length; 260 req_current_length += sge_length; 261 if (req_current_length < req->payload_size) { 262 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 263 /* 264 * If the next SGE is not page aligned, we will need to create a 265 * child request for what we have so far, and then start a new 266 * child request for the next SGE. 267 */ 268 start_valid = _is_page_aligned(address, page_size); 269 } 270 } 271 272 if (start_valid && end_valid && !last_sge) { 273 continue; 274 } 275 276 /* 277 * We need to create a split here. Send what we have accumulated so far as a child 278 * request. Checking if child_equals_parent allows us to *not* create a child request 279 * when no splitting is required - in that case we will fall-through and just create 280 * a single request with no children for the entire I/O. 281 */ 282 if (!child_equals_parent) { 283 struct nvme_request *child; 284 uint32_t child_lba_count; 285 286 if ((child_length % ns->extended_lba_size) != 0) { 287 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n", 288 child_length, ns->extended_lba_size); 289 *rc = -EINVAL; 290 return NULL; 291 } 292 if (spdk_unlikely(accel_sequence != NULL)) { 293 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 294 *rc = -EINVAL; 295 return NULL; 296 } 297 298 child_lba_count = child_length / ns->extended_lba_size; 299 /* 300 * Note the last parameter is set to "false" - this tells the recursive 301 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting 302 * since we have already verified it here. 303 */ 304 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 305 child_lba, child_lba_count, 306 cb_fn, cb_arg, opc, io_flags, 307 apptag_mask, apptag, cdw13, req, false, rc); 308 if (child == NULL) { 309 return NULL; 310 } 311 payload_offset += child_length; 312 md_offset += child_lba_count * ns->md_size; 313 child_lba += child_lba_count; 314 child_length = 0; 315 } 316 } 317 318 if (child_length == req->payload_size) { 319 /* No splitting was required, so setup the whole payload as one request. */ 320 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 321 } 322 323 return req; 324 } 325 326 static struct nvme_request * 327 _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns, 328 struct spdk_nvme_qpair *qpair, 329 const struct nvme_payload *payload, 330 uint32_t payload_offset, uint32_t md_offset, 331 uint64_t lba, uint32_t lba_count, 332 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 333 uint32_t io_flags, struct nvme_request *req, 334 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, 335 void *accel_sequence, int *rc) 336 { 337 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn; 338 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn; 339 void *sgl_cb_arg = req->payload.contig_or_cb_arg; 340 uint64_t child_lba = lba; 341 uint32_t req_current_length = 0; 342 uint32_t child_length = 0; 343 uint32_t sge_length; 344 uint16_t max_sges, num_sges; 345 uintptr_t address; 346 347 max_sges = ns->ctrlr->max_sges; 348 349 reset_sgl_fn(sgl_cb_arg, payload_offset); 350 num_sges = 0; 351 352 while (req_current_length < req->payload_size) { 353 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 354 355 if (req_current_length + sge_length > req->payload_size) { 356 sge_length = req->payload_size - req_current_length; 357 } 358 359 child_length += sge_length; 360 req_current_length += sge_length; 361 num_sges++; 362 363 if (num_sges < max_sges && req_current_length < req->payload_size) { 364 continue; 365 } 366 367 /* 368 * We need to create a split here. Send what we have accumulated so far as a child 369 * request. Checking if the child equals the full payload allows us to *not* 370 * create a child request when no splitting is required - in that case we will 371 * fall-through and just create a single request with no children for the entire I/O. 372 */ 373 if (child_length != req->payload_size) { 374 struct nvme_request *child; 375 uint32_t child_lba_count; 376 377 if ((child_length % ns->extended_lba_size) != 0) { 378 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n", 379 child_length, ns->extended_lba_size); 380 *rc = -EINVAL; 381 return NULL; 382 } 383 if (spdk_unlikely(accel_sequence != NULL)) { 384 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n"); 385 *rc = -EINVAL; 386 return NULL; 387 } 388 389 child_lba_count = child_length / ns->extended_lba_size; 390 /* 391 * Note the last parameter is set to "false" - this tells the recursive 392 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting 393 * since we have already verified it here. 394 */ 395 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 396 child_lba, child_lba_count, 397 cb_fn, cb_arg, opc, io_flags, 398 apptag_mask, apptag, cdw13, req, false, rc); 399 if (child == NULL) { 400 return NULL; 401 } 402 payload_offset += child_length; 403 md_offset += child_lba_count * ns->md_size; 404 child_lba += child_lba_count; 405 child_length = 0; 406 num_sges = 0; 407 } 408 } 409 410 if (child_length == req->payload_size) { 411 /* No splitting was required, so setup the whole payload as one request. */ 412 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 413 } 414 415 return req; 416 } 417 418 static inline struct nvme_request * 419 _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 420 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset, 421 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 422 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, 423 void *accel_sequence, int *rc) 424 { 425 struct nvme_request *req; 426 uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 427 uint32_t sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags); 428 uint32_t sectors_per_stripe = ns->sectors_per_stripe; 429 430 assert(rc != NULL); 431 assert(*rc == 0); 432 433 req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size, 434 cb_fn, cb_arg); 435 if (req == NULL) { 436 *rc = -ENOMEM; 437 return NULL; 438 } 439 440 req->payload_offset = payload_offset; 441 req->md_offset = md_offset; 442 req->accel_sequence = accel_sequence; 443 444 /* Zone append commands cannot be split. */ 445 if (opc == SPDK_NVME_OPC_ZONE_APPEND) { 446 assert(ns->csi == SPDK_NVME_CSI_ZNS); 447 /* 448 * As long as we disable driver-assisted striping for Zone append commands, 449 * _nvme_ns_cmd_rw() should never cause a proper request to be split. 450 * If a request is split, after all, error handling is done in caller functions. 451 */ 452 sectors_per_stripe = 0; 453 } 454 455 /* 456 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping. 457 * If this controller defines a stripe boundary and this I/O spans a stripe 458 * boundary, split the request into multiple requests and submit each 459 * separately to hardware. 460 */ 461 if (sectors_per_stripe > 0 && 462 (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) { 463 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, 464 cb_fn, 465 cb_arg, opc, 466 io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, 467 apptag_mask, apptag, cdw13, accel_sequence, rc); 468 } else if (lba_count > sectors_per_max_io) { 469 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, 470 cb_fn, 471 cb_arg, opc, 472 io_flags, req, sectors_per_max_io, 0, apptag_mask, 473 apptag, cdw13, accel_sequence, rc); 474 } else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) { 475 if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) { 476 return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset, 477 lba, lba_count, cb_fn, cb_arg, opc, io_flags, 478 req, apptag_mask, apptag, cdw13, 479 accel_sequence, rc); 480 } else { 481 return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset, 482 lba, lba_count, cb_fn, cb_arg, opc, io_flags, 483 req, apptag_mask, apptag, cdw13, 484 accel_sequence, rc); 485 } 486 } 487 488 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13); 489 return req; 490 } 491 492 int 493 spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 494 uint64_t lba, 495 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 496 uint32_t io_flags) 497 { 498 struct nvme_request *req; 499 struct nvme_payload payload; 500 int rc = 0; 501 502 if (!_is_io_flags_valid(io_flags)) { 503 return -EINVAL; 504 } 505 506 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 507 508 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 509 SPDK_NVME_OPC_COMPARE, 510 io_flags, 0, 511 0, 0, false, NULL, &rc); 512 if (req != NULL) { 513 return nvme_qpair_submit_request(qpair, req); 514 } else { 515 return nvme_ns_map_failure_rc(lba_count, 516 ns->sectors_per_max_io, 517 ns->sectors_per_stripe, 518 qpair->ctrlr->opts.io_queue_requests, 519 rc); 520 } 521 } 522 523 int 524 spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 525 void *buffer, 526 void *metadata, 527 uint64_t lba, 528 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 529 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 530 { 531 struct nvme_request *req; 532 struct nvme_payload payload; 533 int rc = 0; 534 535 if (!_is_io_flags_valid(io_flags)) { 536 return -EINVAL; 537 } 538 539 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 540 541 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 542 SPDK_NVME_OPC_COMPARE, 543 io_flags, 544 apptag_mask, apptag, 0, false, NULL, &rc); 545 if (req != NULL) { 546 return nvme_qpair_submit_request(qpair, req); 547 } else { 548 return nvme_ns_map_failure_rc(lba_count, 549 ns->sectors_per_max_io, 550 ns->sectors_per_stripe, 551 qpair->ctrlr->opts.io_queue_requests, 552 rc); 553 } 554 } 555 556 int 557 spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 558 uint64_t lba, uint32_t lba_count, 559 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 560 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 561 spdk_nvme_req_next_sge_cb next_sge_fn) 562 { 563 struct nvme_request *req; 564 struct nvme_payload payload; 565 int rc = 0; 566 567 if (!_is_io_flags_valid(io_flags)) { 568 return -EINVAL; 569 } 570 571 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 572 return -EINVAL; 573 } 574 575 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 576 577 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 578 SPDK_NVME_OPC_COMPARE, 579 io_flags, 0, 0, 0, true, NULL, &rc); 580 if (req != NULL) { 581 return nvme_qpair_submit_request(qpair, req); 582 } else { 583 return nvme_ns_map_failure_rc(lba_count, 584 ns->sectors_per_max_io, 585 ns->sectors_per_stripe, 586 qpair->ctrlr->opts.io_queue_requests, 587 rc); 588 } 589 } 590 591 int 592 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 593 uint64_t lba, uint32_t lba_count, 594 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 595 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 596 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 597 uint16_t apptag_mask, uint16_t apptag) 598 { 599 struct nvme_request *req; 600 struct nvme_payload payload; 601 int rc = 0; 602 603 if (!_is_io_flags_valid(io_flags)) { 604 return -EINVAL; 605 } 606 607 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 608 return -EINVAL; 609 } 610 611 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 612 613 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 614 SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true, 615 NULL, &rc); 616 if (req != NULL) { 617 return nvme_qpair_submit_request(qpair, req); 618 } else { 619 return nvme_ns_map_failure_rc(lba_count, 620 ns->sectors_per_max_io, 621 ns->sectors_per_stripe, 622 qpair->ctrlr->opts.io_queue_requests, 623 rc); 624 } 625 } 626 627 int 628 spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 629 uint64_t lba, 630 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 631 uint32_t io_flags) 632 { 633 struct nvme_request *req; 634 struct nvme_payload payload; 635 int rc = 0; 636 637 if (!_is_io_flags_valid(io_flags)) { 638 return -EINVAL; 639 } 640 641 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 642 643 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 644 io_flags, 0, 645 0, 0, false, NULL, &rc); 646 if (req != NULL) { 647 return nvme_qpair_submit_request(qpair, req); 648 } else { 649 return nvme_ns_map_failure_rc(lba_count, 650 ns->sectors_per_max_io, 651 ns->sectors_per_stripe, 652 qpair->ctrlr->opts.io_queue_requests, 653 rc); 654 } 655 } 656 657 int 658 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 659 void *metadata, 660 uint64_t lba, 661 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 662 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 663 { 664 struct nvme_request *req; 665 struct nvme_payload payload; 666 int rc = 0; 667 668 if (!_is_io_flags_valid(io_flags)) { 669 return -EINVAL; 670 } 671 672 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 673 674 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 675 io_flags, 676 apptag_mask, apptag, 0, false, NULL, &rc); 677 if (req != NULL) { 678 return nvme_qpair_submit_request(qpair, req); 679 } else { 680 return nvme_ns_map_failure_rc(lba_count, 681 ns->sectors_per_max_io, 682 ns->sectors_per_stripe, 683 qpair->ctrlr->opts.io_queue_requests, 684 rc); 685 } 686 } 687 688 int 689 spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 690 uint64_t lba, uint32_t lba_count, 691 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 692 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 693 spdk_nvme_req_next_sge_cb next_sge_fn) 694 { 695 struct nvme_request *req; 696 struct nvme_payload payload; 697 int rc = 0; 698 699 if (!_is_io_flags_valid(io_flags)) { 700 return -EINVAL; 701 } 702 703 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 704 return -EINVAL; 705 } 706 707 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 708 709 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 710 io_flags, 0, 0, 0, true, NULL, &rc); 711 if (req != NULL) { 712 return nvme_qpair_submit_request(qpair, req); 713 } else { 714 return nvme_ns_map_failure_rc(lba_count, 715 ns->sectors_per_max_io, 716 ns->sectors_per_stripe, 717 qpair->ctrlr->opts.io_queue_requests, 718 rc); 719 } 720 } 721 722 int 723 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 724 uint64_t lba, uint32_t lba_count, 725 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 726 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 727 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 728 uint16_t apptag_mask, uint16_t apptag) 729 { 730 struct nvme_request *req; 731 struct nvme_payload payload; 732 int rc = 0; 733 734 if (!_is_io_flags_valid(io_flags)) { 735 return -EINVAL; 736 } 737 738 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 739 return -EINVAL; 740 } 741 742 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 743 744 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 745 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 746 if (req != NULL) { 747 return nvme_qpair_submit_request(qpair, req); 748 } else { 749 return nvme_ns_map_failure_rc(lba_count, 750 ns->sectors_per_max_io, 751 ns->sectors_per_stripe, 752 qpair->ctrlr->opts.io_queue_requests, 753 rc); 754 } 755 } 756 757 int 758 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 759 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, 760 void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 761 spdk_nvme_req_next_sge_cb next_sge_fn, 762 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 763 { 764 struct nvme_request *req; 765 struct nvme_payload payload; 766 void *seq; 767 int rc = 0; 768 769 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 770 return -EINVAL; 771 } 772 773 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 774 775 if (opts) { 776 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) { 777 return -EINVAL; 778 } 779 780 seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL); 781 if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) { 782 return -EINVAL; 783 } 784 785 payload.opts = opts; 786 payload.md = opts->metadata; 787 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 788 opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc); 789 790 } else { 791 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 792 0, 0, 0, 0, true, NULL, &rc); 793 } 794 795 if (req != NULL) { 796 return nvme_qpair_submit_request(qpair, req); 797 } else { 798 return nvme_ns_map_failure_rc(lba_count, 799 ns->sectors_per_max_io, 800 ns->sectors_per_stripe, 801 qpair->ctrlr->opts.io_queue_requests, 802 rc); 803 } 804 } 805 806 int 807 spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 808 void *buffer, uint64_t lba, 809 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 810 uint32_t io_flags) 811 { 812 struct nvme_request *req; 813 struct nvme_payload payload; 814 int rc = 0; 815 816 if (!_is_io_flags_valid(io_flags)) { 817 return -EINVAL; 818 } 819 820 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 821 822 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 823 io_flags, 0, 0, 0, false, NULL, &rc); 824 if (req != NULL) { 825 return nvme_qpair_submit_request(qpair, req); 826 } else { 827 return nvme_ns_map_failure_rc(lba_count, 828 ns->sectors_per_max_io, 829 ns->sectors_per_stripe, 830 qpair->ctrlr->opts.io_queue_requests, 831 rc); 832 } 833 } 834 835 static int 836 nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags) 837 { 838 uint32_t sector_size; 839 840 /* Not all NVMe Zoned Namespaces support the zone append command. */ 841 if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) { 842 return -EINVAL; 843 } 844 845 sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 846 847 /* Fail a too large zone append command early. */ 848 if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) { 849 return -EINVAL; 850 } 851 852 return 0; 853 } 854 855 int 856 nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 857 void *buffer, void *metadata, uint64_t zslba, 858 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 859 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 860 { 861 struct nvme_request *req; 862 struct nvme_payload payload; 863 int rc = 0; 864 865 if (!_is_io_flags_valid(io_flags)) { 866 return -EINVAL; 867 } 868 869 rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags); 870 if (rc) { 871 return rc; 872 } 873 874 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 875 876 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg, 877 SPDK_NVME_OPC_ZONE_APPEND, 878 io_flags, apptag_mask, apptag, 0, false, NULL, &rc); 879 if (req != NULL) { 880 /* 881 * Zone append commands cannot be split (num_children has to be 0). 882 * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split 883 * to happen, since a too large request would have already been failed by 884 * nvme_ns_cmd_check_zone_append(), since zasl <= mdts. 885 */ 886 assert(req->num_children == 0); 887 if (req->num_children) { 888 nvme_request_free_children(req); 889 nvme_free_request(req); 890 return -EINVAL; 891 } 892 return nvme_qpair_submit_request(qpair, req); 893 } else { 894 return nvme_ns_map_failure_rc(lba_count, 895 ns->sectors_per_max_io, 896 ns->sectors_per_stripe, 897 qpair->ctrlr->opts.io_queue_requests, 898 rc); 899 } 900 } 901 902 int 903 nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 904 uint64_t zslba, uint32_t lba_count, 905 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 906 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 907 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 908 uint16_t apptag_mask, uint16_t apptag) 909 { 910 struct nvme_request *req; 911 struct nvme_payload payload; 912 int rc = 0; 913 914 if (!_is_io_flags_valid(io_flags)) { 915 return -EINVAL; 916 } 917 918 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 919 return -EINVAL; 920 } 921 922 rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags); 923 if (rc) { 924 return rc; 925 } 926 927 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 928 929 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg, 930 SPDK_NVME_OPC_ZONE_APPEND, 931 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 932 if (req != NULL) { 933 /* 934 * Zone append commands cannot be split (num_children has to be 0). 935 * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split. 936 * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp() 937 * do not always cause a request to be split. These functions verify payload size, 938 * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs). 939 * If any of the verifications fail, they will split the request. 940 * In our case, a split is very unlikely, since we already verified the size using 941 * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions 942 * in order to perform the verification part. If they do cause a split, we return 943 * an error here. For proper requests, these functions will never cause a split. 944 */ 945 if (req->num_children) { 946 nvme_request_free_children(req); 947 nvme_free_request(req); 948 return -EINVAL; 949 } 950 return nvme_qpair_submit_request(qpair, req); 951 } else { 952 return nvme_ns_map_failure_rc(lba_count, 953 ns->sectors_per_max_io, 954 ns->sectors_per_stripe, 955 qpair->ctrlr->opts.io_queue_requests, 956 rc); 957 } 958 } 959 960 int 961 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 962 void *buffer, void *metadata, uint64_t lba, 963 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 964 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 965 { 966 struct nvme_request *req; 967 struct nvme_payload payload; 968 int rc = 0; 969 970 if (!_is_io_flags_valid(io_flags)) { 971 return -EINVAL; 972 } 973 974 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 975 976 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 977 io_flags, apptag_mask, apptag, 0, false, NULL, &rc); 978 if (req != NULL) { 979 return nvme_qpair_submit_request(qpair, req); 980 } else { 981 return nvme_ns_map_failure_rc(lba_count, 982 ns->sectors_per_max_io, 983 ns->sectors_per_stripe, 984 qpair->ctrlr->opts.io_queue_requests, 985 rc); 986 } 987 } 988 989 int 990 spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 991 uint64_t lba, uint32_t lba_count, 992 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 993 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 994 spdk_nvme_req_next_sge_cb next_sge_fn) 995 { 996 struct nvme_request *req; 997 struct nvme_payload payload; 998 int rc = 0; 999 1000 if (!_is_io_flags_valid(io_flags)) { 1001 return -EINVAL; 1002 } 1003 1004 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 1005 return -EINVAL; 1006 } 1007 1008 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 1009 1010 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1011 io_flags, 0, 0, 0, true, NULL, &rc); 1012 if (req != NULL) { 1013 return nvme_qpair_submit_request(qpair, req); 1014 } else { 1015 return nvme_ns_map_failure_rc(lba_count, 1016 ns->sectors_per_max_io, 1017 ns->sectors_per_stripe, 1018 qpair->ctrlr->opts.io_queue_requests, 1019 rc); 1020 } 1021 } 1022 1023 int 1024 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1025 uint64_t lba, uint32_t lba_count, 1026 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1027 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1028 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1029 uint16_t apptag_mask, uint16_t apptag) 1030 { 1031 struct nvme_request *req; 1032 struct nvme_payload payload; 1033 int rc = 0; 1034 1035 if (!_is_io_flags_valid(io_flags)) { 1036 return -EINVAL; 1037 } 1038 1039 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 1040 return -EINVAL; 1041 } 1042 1043 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 1044 1045 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1046 io_flags, apptag_mask, apptag, 0, true, NULL, &rc); 1047 if (req != NULL) { 1048 return nvme_qpair_submit_request(qpair, req); 1049 } else { 1050 return nvme_ns_map_failure_rc(lba_count, 1051 ns->sectors_per_max_io, 1052 ns->sectors_per_stripe, 1053 qpair->ctrlr->opts.io_queue_requests, 1054 rc); 1055 } 1056 } 1057 1058 int 1059 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba, 1060 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1061 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1062 spdk_nvme_req_next_sge_cb next_sge_fn, 1063 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1064 { 1065 struct nvme_request *req; 1066 struct nvme_payload payload; 1067 void *seq; 1068 int rc = 0; 1069 1070 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 1071 return -EINVAL; 1072 } 1073 1074 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 1075 1076 if (opts) { 1077 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) { 1078 return -EINVAL; 1079 } 1080 1081 seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL); 1082 if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) { 1083 return -EINVAL; 1084 } 1085 1086 payload.opts = opts; 1087 payload.md = opts->metadata; 1088 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1089 opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc); 1090 1091 } else { 1092 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1093 0, 0, 0, 0, true, NULL, &rc); 1094 } 1095 1096 if (req != NULL) { 1097 return nvme_qpair_submit_request(qpair, req); 1098 } else { 1099 return nvme_ns_map_failure_rc(lba_count, 1100 ns->sectors_per_max_io, 1101 ns->sectors_per_stripe, 1102 qpair->ctrlr->opts.io_queue_requests, 1103 rc); 1104 } 1105 } 1106 1107 int 1108 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1109 uint64_t lba, uint32_t lba_count, 1110 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1111 uint32_t io_flags) 1112 { 1113 struct nvme_request *req; 1114 struct spdk_nvme_cmd *cmd; 1115 uint64_t *tmp_lba; 1116 1117 if (!_is_io_flags_valid(io_flags)) { 1118 return -EINVAL; 1119 } 1120 1121 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1122 return -EINVAL; 1123 } 1124 1125 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1126 if (req == NULL) { 1127 return -ENOMEM; 1128 } 1129 1130 cmd = &req->cmd; 1131 cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES; 1132 cmd->nsid = ns->id; 1133 1134 tmp_lba = (uint64_t *)&cmd->cdw10; 1135 *tmp_lba = lba; 1136 cmd->cdw12 = lba_count - 1; 1137 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 1138 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 1139 1140 return nvme_qpair_submit_request(qpair, req); 1141 } 1142 1143 int 1144 spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1145 uint64_t lba, uint32_t lba_count, 1146 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1147 uint32_t io_flags) 1148 { 1149 struct nvme_request *req; 1150 struct spdk_nvme_cmd *cmd; 1151 1152 if (!_is_io_flags_valid(io_flags)) { 1153 return -EINVAL; 1154 } 1155 1156 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1157 return -EINVAL; 1158 } 1159 1160 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1161 if (req == NULL) { 1162 return -ENOMEM; 1163 } 1164 1165 cmd = &req->cmd; 1166 cmd->opc = SPDK_NVME_OPC_VERIFY; 1167 cmd->nsid = ns->id; 1168 1169 *(uint64_t *)&cmd->cdw10 = lba; 1170 cmd->cdw12 = lba_count - 1; 1171 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 1172 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 1173 1174 return nvme_qpair_submit_request(qpair, req); 1175 } 1176 1177 int 1178 spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1179 uint64_t lba, uint32_t lba_count, 1180 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1181 { 1182 struct nvme_request *req; 1183 struct spdk_nvme_cmd *cmd; 1184 uint64_t *tmp_lba; 1185 1186 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1187 return -EINVAL; 1188 } 1189 1190 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1191 if (req == NULL) { 1192 return -ENOMEM; 1193 } 1194 1195 cmd = &req->cmd; 1196 cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE; 1197 cmd->nsid = ns->id; 1198 1199 tmp_lba = (uint64_t *)&cmd->cdw10; 1200 *tmp_lba = lba; 1201 cmd->cdw12 = lba_count - 1; 1202 1203 return nvme_qpair_submit_request(qpair, req); 1204 } 1205 1206 int 1207 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1208 uint32_t type, 1209 const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1210 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1211 { 1212 struct nvme_request *req; 1213 struct spdk_nvme_cmd *cmd; 1214 1215 if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) { 1216 return -EINVAL; 1217 } 1218 1219 if (ranges == NULL) { 1220 return -EINVAL; 1221 } 1222 1223 req = nvme_allocate_request_user_copy(qpair, (void *)ranges, 1224 num_ranges * sizeof(struct spdk_nvme_dsm_range), 1225 cb_fn, cb_arg, true); 1226 if (req == NULL) { 1227 return -ENOMEM; 1228 } 1229 1230 cmd = &req->cmd; 1231 cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1232 cmd->nsid = ns->id; 1233 1234 cmd->cdw10_bits.dsm.nr = num_ranges - 1; 1235 cmd->cdw11 = type; 1236 1237 return nvme_qpair_submit_request(qpair, req); 1238 } 1239 1240 int 1241 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1242 const struct spdk_nvme_scc_source_range *ranges, 1243 uint16_t num_ranges, uint64_t dest_lba, 1244 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1245 { 1246 struct nvme_request *req; 1247 struct spdk_nvme_cmd *cmd; 1248 1249 if (num_ranges == 0) { 1250 return -EINVAL; 1251 } 1252 1253 if (ranges == NULL) { 1254 return -EINVAL; 1255 } 1256 1257 req = nvme_allocate_request_user_copy(qpair, (void *)ranges, 1258 num_ranges * sizeof(struct spdk_nvme_scc_source_range), 1259 cb_fn, cb_arg, true); 1260 if (req == NULL) { 1261 return -ENOMEM; 1262 } 1263 1264 cmd = &req->cmd; 1265 cmd->opc = SPDK_NVME_OPC_COPY; 1266 cmd->nsid = ns->id; 1267 1268 *(uint64_t *)&cmd->cdw10 = dest_lba; 1269 cmd->cdw12 = num_ranges - 1; 1270 1271 return nvme_qpair_submit_request(qpair, req); 1272 } 1273 1274 int 1275 spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1276 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1277 { 1278 struct nvme_request *req; 1279 struct spdk_nvme_cmd *cmd; 1280 1281 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1282 if (req == NULL) { 1283 return -ENOMEM; 1284 } 1285 1286 cmd = &req->cmd; 1287 cmd->opc = SPDK_NVME_OPC_FLUSH; 1288 cmd->nsid = ns->id; 1289 1290 return nvme_qpair_submit_request(qpair, req); 1291 } 1292 1293 int 1294 spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns, 1295 struct spdk_nvme_qpair *qpair, 1296 struct spdk_nvme_reservation_register_data *payload, 1297 bool ignore_key, 1298 enum spdk_nvme_reservation_register_action action, 1299 enum spdk_nvme_reservation_register_cptpl cptpl, 1300 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1301 { 1302 struct nvme_request *req; 1303 struct spdk_nvme_cmd *cmd; 1304 1305 req = nvme_allocate_request_user_copy(qpair, 1306 payload, sizeof(struct spdk_nvme_reservation_register_data), 1307 cb_fn, cb_arg, true); 1308 if (req == NULL) { 1309 return -ENOMEM; 1310 } 1311 1312 cmd = &req->cmd; 1313 cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER; 1314 cmd->nsid = ns->id; 1315 1316 cmd->cdw10_bits.resv_register.rrega = action; 1317 cmd->cdw10_bits.resv_register.iekey = ignore_key; 1318 cmd->cdw10_bits.resv_register.cptpl = cptpl; 1319 1320 return nvme_qpair_submit_request(qpair, req); 1321 } 1322 1323 int 1324 spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns, 1325 struct spdk_nvme_qpair *qpair, 1326 struct spdk_nvme_reservation_key_data *payload, 1327 bool ignore_key, 1328 enum spdk_nvme_reservation_release_action action, 1329 enum spdk_nvme_reservation_type type, 1330 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1331 { 1332 struct nvme_request *req; 1333 struct spdk_nvme_cmd *cmd; 1334 1335 req = nvme_allocate_request_user_copy(qpair, 1336 payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn, 1337 cb_arg, true); 1338 if (req == NULL) { 1339 return -ENOMEM; 1340 } 1341 1342 cmd = &req->cmd; 1343 cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1344 cmd->nsid = ns->id; 1345 1346 cmd->cdw10_bits.resv_release.rrela = action; 1347 cmd->cdw10_bits.resv_release.iekey = ignore_key; 1348 cmd->cdw10_bits.resv_release.rtype = type; 1349 1350 return nvme_qpair_submit_request(qpair, req); 1351 } 1352 1353 int 1354 spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns, 1355 struct spdk_nvme_qpair *qpair, 1356 struct spdk_nvme_reservation_acquire_data *payload, 1357 bool ignore_key, 1358 enum spdk_nvme_reservation_acquire_action action, 1359 enum spdk_nvme_reservation_type type, 1360 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1361 { 1362 struct nvme_request *req; 1363 struct spdk_nvme_cmd *cmd; 1364 1365 req = nvme_allocate_request_user_copy(qpair, 1366 payload, sizeof(struct spdk_nvme_reservation_acquire_data), 1367 cb_fn, cb_arg, true); 1368 if (req == NULL) { 1369 return -ENOMEM; 1370 } 1371 1372 cmd = &req->cmd; 1373 cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE; 1374 cmd->nsid = ns->id; 1375 1376 cmd->cdw10_bits.resv_acquire.racqa = action; 1377 cmd->cdw10_bits.resv_acquire.iekey = ignore_key; 1378 cmd->cdw10_bits.resv_acquire.rtype = type; 1379 1380 return nvme_qpair_submit_request(qpair, req); 1381 } 1382 1383 int 1384 spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, 1385 struct spdk_nvme_qpair *qpair, 1386 void *payload, uint32_t len, 1387 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1388 { 1389 uint32_t num_dwords; 1390 struct nvme_request *req; 1391 struct spdk_nvme_cmd *cmd; 1392 1393 if (len & 0x3) { 1394 return -EINVAL; 1395 } 1396 1397 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1398 if (req == NULL) { 1399 return -ENOMEM; 1400 } 1401 1402 cmd = &req->cmd; 1403 cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT; 1404 cmd->nsid = ns->id; 1405 1406 num_dwords = (len >> 2); 1407 cmd->cdw10 = num_dwords - 1; /* 0-based */ 1408 1409 return nvme_qpair_submit_request(qpair, req); 1410 } 1411 1412 int 1413 spdk_nvme_ns_cmd_io_mgmt_recv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1414 void *payload, uint32_t len, uint8_t mo, uint16_t mos, 1415 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1416 { 1417 uint32_t num_dwords; 1418 struct nvme_request *req; 1419 struct spdk_nvme_cmd *cmd; 1420 1421 if (len & 0x3) { 1422 return -EINVAL; 1423 } 1424 1425 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1426 if (req == NULL) { 1427 return -ENOMEM; 1428 } 1429 1430 cmd = &req->cmd; 1431 cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE; 1432 cmd->nsid = ns->id; 1433 1434 cmd->cdw10_bits.mgmt_send_recv.mo = mo; 1435 cmd->cdw10_bits.mgmt_send_recv.mos = mos; 1436 1437 num_dwords = (len >> 2); 1438 cmd->cdw11 = num_dwords - 1; /* 0-based */ 1439 1440 return nvme_qpair_submit_request(qpair, req); 1441 } 1442 1443 int 1444 spdk_nvme_ns_cmd_io_mgmt_send(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1445 void *payload, uint32_t len, uint8_t mo, uint16_t mos, 1446 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1447 { 1448 struct nvme_request *req; 1449 struct spdk_nvme_cmd *cmd; 1450 1451 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1452 if (req == NULL) { 1453 return -ENOMEM; 1454 } 1455 1456 cmd = &req->cmd; 1457 cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_SEND; 1458 cmd->nsid = ns->id; 1459 1460 cmd->cdw10_bits.mgmt_send_recv.mo = mo; 1461 cmd->cdw10_bits.mgmt_send_recv.mos = mos; 1462 1463 return nvme_qpair_submit_request(qpair, req); 1464 } 1465