1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2015 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 5 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 */ 7 8 #include "nvme_internal.h" 9 10 static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, 11 struct spdk_nvme_qpair *qpair, 12 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset, 13 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, 14 void *cb_arg, uint32_t opc, uint32_t io_flags, 15 uint16_t apptag_mask, uint16_t apptag, bool check_sgl, int *rc); 16 17 static bool 18 nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io, 19 uint32_t sectors_per_stripe, uint32_t qdepth) 20 { 21 uint32_t child_per_io = UINT32_MAX; 22 23 /* After a namespace is destroyed(e.g. hotplug), all the fields associated with the 24 * namespace will be cleared to zero, the function will return TRUE for this case, 25 * and -EINVAL will be returned to caller. 26 */ 27 if (sectors_per_stripe > 0) { 28 child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe; 29 } else if (sectors_per_max_io > 0) { 30 child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io; 31 } 32 33 SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io); 34 35 return child_per_io >= qdepth; 36 } 37 38 static inline int 39 nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io, 40 uint32_t sectors_per_stripe, uint32_t qdepth, int rc) 41 { 42 assert(rc); 43 if (rc == -ENOMEM && 44 nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) { 45 return -EINVAL; 46 } 47 return rc; 48 } 49 50 static inline bool 51 _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags) 52 { 53 return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) && 54 (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) && 55 (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) && 56 (ns->md_size == 8); 57 } 58 59 static inline uint32_t 60 _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags) 61 { 62 return _nvme_md_excluded_from_xfer(ns, io_flags) ? 63 ns->sector_size : ns->extended_lba_size; 64 } 65 66 static inline uint32_t 67 _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags) 68 { 69 return _nvme_md_excluded_from_xfer(ns, io_flags) ? 70 ns->sectors_per_max_io_no_md : ns->sectors_per_max_io; 71 } 72 73 static struct nvme_request * 74 _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 75 const struct nvme_payload *payload, 76 uint32_t payload_offset, uint32_t md_offset, 77 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 78 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, 79 struct nvme_request *parent, bool check_sgl, int *rc) 80 { 81 struct nvme_request *child; 82 83 child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn, 84 cb_arg, opc, io_flags, apptag_mask, apptag, check_sgl, rc); 85 if (child == NULL) { 86 nvme_request_free_children(parent); 87 nvme_free_request(parent); 88 return NULL; 89 } 90 91 nvme_request_add_child(parent, child); 92 return child; 93 } 94 95 static struct nvme_request * 96 _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns, 97 struct spdk_nvme_qpair *qpair, 98 const struct nvme_payload *payload, 99 uint32_t payload_offset, uint32_t md_offset, 100 uint64_t lba, uint32_t lba_count, 101 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 102 uint32_t io_flags, struct nvme_request *req, 103 uint32_t sectors_per_max_io, uint32_t sector_mask, 104 uint16_t apptag_mask, uint16_t apptag, int *rc) 105 { 106 uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 107 uint32_t remaining_lba_count = lba_count; 108 struct nvme_request *child; 109 110 while (remaining_lba_count > 0) { 111 lba_count = sectors_per_max_io - (lba & sector_mask); 112 lba_count = spdk_min(remaining_lba_count, lba_count); 113 114 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 115 lba, lba_count, cb_fn, cb_arg, opc, 116 io_flags, apptag_mask, apptag, req, true, rc); 117 if (child == NULL) { 118 return NULL; 119 } 120 121 remaining_lba_count -= lba_count; 122 lba += lba_count; 123 payload_offset += lba_count * sector_size; 124 md_offset += lba_count * ns->md_size; 125 } 126 127 return req; 128 } 129 130 static inline bool 131 _is_io_flags_valid(uint32_t io_flags) 132 { 133 if (io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK) { 134 /* Invalid io_flags */ 135 SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags); 136 return false; 137 } 138 139 return true; 140 } 141 142 static void 143 _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req, 144 uint32_t opc, uint64_t lba, uint32_t lba_count, 145 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 146 { 147 struct spdk_nvme_cmd *cmd; 148 149 assert(_is_io_flags_valid(io_flags)); 150 151 cmd = &req->cmd; 152 cmd->opc = opc; 153 cmd->nsid = ns->id; 154 155 *(uint64_t *)&cmd->cdw10 = lba; 156 157 if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) { 158 switch (ns->pi_type) { 159 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1: 160 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2: 161 cmd->cdw14 = (uint32_t)lba; 162 break; 163 } 164 } 165 166 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 167 168 cmd->cdw12 = lba_count - 1; 169 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 170 171 cmd->cdw15 = apptag_mask; 172 cmd->cdw15 = (cmd->cdw15 << 16 | apptag); 173 } 174 175 static struct nvme_request * 176 _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns, 177 struct spdk_nvme_qpair *qpair, 178 const struct nvme_payload *payload, 179 uint32_t payload_offset, uint32_t md_offset, 180 uint64_t lba, uint32_t lba_count, 181 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 182 uint32_t io_flags, struct nvme_request *req, 183 uint16_t apptag_mask, uint16_t apptag, int *rc) 184 { 185 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn; 186 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn; 187 void *sgl_cb_arg = req->payload.contig_or_cb_arg; 188 bool start_valid, end_valid, last_sge, child_equals_parent; 189 uint64_t child_lba = lba; 190 uint32_t req_current_length = 0; 191 uint32_t child_length = 0; 192 uint32_t sge_length; 193 uint32_t page_size = qpair->ctrlr->page_size; 194 uintptr_t address; 195 196 reset_sgl_fn(sgl_cb_arg, payload_offset); 197 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 198 while (req_current_length < req->payload_size) { 199 200 if (sge_length == 0) { 201 continue; 202 } else if (req_current_length + sge_length > req->payload_size) { 203 sge_length = req->payload_size - req_current_length; 204 } 205 206 /* 207 * The start of the SGE is invalid if the start address is not page aligned, 208 * unless it is the first SGE in the child request. 209 */ 210 start_valid = child_length == 0 || _is_page_aligned(address, page_size); 211 212 /* Boolean for whether this is the last SGE in the parent request. */ 213 last_sge = (req_current_length + sge_length == req->payload_size); 214 215 /* 216 * The end of the SGE is invalid if the end address is not page aligned, 217 * unless it is the last SGE in the parent request. 218 */ 219 end_valid = last_sge || _is_page_aligned(address + sge_length, page_size); 220 221 /* 222 * This child request equals the parent request, meaning that no splitting 223 * was required for the parent request (the one passed into this function). 224 * In this case, we do not create a child request at all - we just send 225 * the original request as a single request at the end of this function. 226 */ 227 child_equals_parent = (child_length + sge_length == req->payload_size); 228 229 if (start_valid) { 230 /* 231 * The start of the SGE is valid, so advance the length parameters, 232 * to include this SGE with previous SGEs for this child request 233 * (if any). If it is not valid, we do not advance the length 234 * parameters nor get the next SGE, because we must send what has 235 * been collected before this SGE as a child request. 236 */ 237 child_length += sge_length; 238 req_current_length += sge_length; 239 if (req_current_length < req->payload_size) { 240 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 241 /* 242 * If the next SGE is not page aligned, we will need to create a 243 * child request for what we have so far, and then start a new 244 * child request for the next SGE. 245 */ 246 start_valid = _is_page_aligned(address, page_size); 247 } 248 } 249 250 if (start_valid && end_valid && !last_sge) { 251 continue; 252 } 253 254 /* 255 * We need to create a split here. Send what we have accumulated so far as a child 256 * request. Checking if child_equals_parent allows us to *not* create a child request 257 * when no splitting is required - in that case we will fall-through and just create 258 * a single request with no children for the entire I/O. 259 */ 260 if (!child_equals_parent) { 261 struct nvme_request *child; 262 uint32_t child_lba_count; 263 264 if ((child_length % ns->extended_lba_size) != 0) { 265 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n", 266 child_length, ns->extended_lba_size); 267 *rc = -EINVAL; 268 return NULL; 269 } 270 child_lba_count = child_length / ns->extended_lba_size; 271 /* 272 * Note the last parameter is set to "false" - this tells the recursive 273 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting 274 * since we have already verified it here. 275 */ 276 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 277 child_lba, child_lba_count, 278 cb_fn, cb_arg, opc, io_flags, 279 apptag_mask, apptag, req, false, rc); 280 if (child == NULL) { 281 return NULL; 282 } 283 payload_offset += child_length; 284 md_offset += child_lba_count * ns->md_size; 285 child_lba += child_lba_count; 286 child_length = 0; 287 } 288 } 289 290 if (child_length == req->payload_size) { 291 /* No splitting was required, so setup the whole payload as one request. */ 292 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag); 293 } 294 295 return req; 296 } 297 298 static struct nvme_request * 299 _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns, 300 struct spdk_nvme_qpair *qpair, 301 const struct nvme_payload *payload, 302 uint32_t payload_offset, uint32_t md_offset, 303 uint64_t lba, uint32_t lba_count, 304 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 305 uint32_t io_flags, struct nvme_request *req, 306 uint16_t apptag_mask, uint16_t apptag, int *rc) 307 { 308 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn; 309 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn; 310 void *sgl_cb_arg = req->payload.contig_or_cb_arg; 311 uint64_t child_lba = lba; 312 uint32_t req_current_length = 0; 313 uint32_t child_length = 0; 314 uint32_t sge_length; 315 uint16_t max_sges, num_sges; 316 uintptr_t address; 317 318 max_sges = ns->ctrlr->max_sges; 319 320 reset_sgl_fn(sgl_cb_arg, payload_offset); 321 num_sges = 0; 322 323 while (req_current_length < req->payload_size) { 324 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length); 325 326 if (req_current_length + sge_length > req->payload_size) { 327 sge_length = req->payload_size - req_current_length; 328 } 329 330 child_length += sge_length; 331 req_current_length += sge_length; 332 num_sges++; 333 334 if (num_sges < max_sges && req_current_length < req->payload_size) { 335 continue; 336 } 337 338 /* 339 * We need to create a split here. Send what we have accumulated so far as a child 340 * request. Checking if the child equals the full payload allows us to *not* 341 * create a child request when no splitting is required - in that case we will 342 * fall-through and just create a single request with no children for the entire I/O. 343 */ 344 if (child_length != req->payload_size) { 345 struct nvme_request *child; 346 uint32_t child_lba_count; 347 348 if ((child_length % ns->extended_lba_size) != 0) { 349 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n", 350 child_length, ns->extended_lba_size); 351 *rc = -EINVAL; 352 return NULL; 353 } 354 child_lba_count = child_length / ns->extended_lba_size; 355 /* 356 * Note the last parameter is set to "false" - this tells the recursive 357 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting 358 * since we have already verified it here. 359 */ 360 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset, 361 child_lba, child_lba_count, 362 cb_fn, cb_arg, opc, io_flags, 363 apptag_mask, apptag, req, false, rc); 364 if (child == NULL) { 365 return NULL; 366 } 367 payload_offset += child_length; 368 md_offset += child_lba_count * ns->md_size; 369 child_lba += child_lba_count; 370 child_length = 0; 371 num_sges = 0; 372 } 373 } 374 375 if (child_length == req->payload_size) { 376 /* No splitting was required, so setup the whole payload as one request. */ 377 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag); 378 } 379 380 return req; 381 } 382 383 static inline struct nvme_request * 384 _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 385 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset, 386 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc, 387 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, bool check_sgl, int *rc) 388 { 389 struct nvme_request *req; 390 uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 391 uint32_t sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags); 392 uint32_t sectors_per_stripe = ns->sectors_per_stripe; 393 394 assert(rc != NULL); 395 assert(*rc == 0); 396 397 req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size, 398 cb_fn, cb_arg); 399 if (req == NULL) { 400 *rc = -ENOMEM; 401 return NULL; 402 } 403 404 req->payload_offset = payload_offset; 405 req->md_offset = md_offset; 406 407 /* Zone append commands cannot be split. */ 408 if (opc == SPDK_NVME_OPC_ZONE_APPEND) { 409 assert(ns->csi == SPDK_NVME_CSI_ZNS); 410 /* 411 * As long as we disable driver-assisted striping for Zone append commands, 412 * _nvme_ns_cmd_rw() should never cause a proper request to be split. 413 * If a request is split, after all, error handling is done in caller functions. 414 */ 415 sectors_per_stripe = 0; 416 } 417 418 /* 419 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping. 420 * If this controller defines a stripe boundary and this I/O spans a stripe 421 * boundary, split the request into multiple requests and submit each 422 * separately to hardware. 423 */ 424 if (sectors_per_stripe > 0 && 425 (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) { 426 427 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, 428 cb_fn, 429 cb_arg, opc, 430 io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, apptag_mask, apptag, rc); 431 } else if (lba_count > sectors_per_max_io) { 432 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, 433 cb_fn, 434 cb_arg, opc, 435 io_flags, req, sectors_per_max_io, 0, apptag_mask, apptag, rc); 436 } else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) { 437 if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) { 438 return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset, 439 lba, lba_count, cb_fn, cb_arg, opc, io_flags, 440 req, apptag_mask, apptag, rc); 441 } else { 442 return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset, 443 lba, lba_count, cb_fn, cb_arg, opc, io_flags, 444 req, apptag_mask, apptag, rc); 445 } 446 } 447 448 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag); 449 return req; 450 } 451 452 int 453 spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 454 uint64_t lba, 455 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 456 uint32_t io_flags) 457 { 458 struct nvme_request *req; 459 struct nvme_payload payload; 460 int rc = 0; 461 462 if (!_is_io_flags_valid(io_flags)) { 463 return -EINVAL; 464 } 465 466 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 467 468 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 469 SPDK_NVME_OPC_COMPARE, 470 io_flags, 0, 471 0, false, &rc); 472 if (req != NULL) { 473 return nvme_qpair_submit_request(qpair, req); 474 } else { 475 return nvme_ns_map_failure_rc(lba_count, 476 ns->sectors_per_max_io, 477 ns->sectors_per_stripe, 478 qpair->ctrlr->opts.io_queue_requests, 479 rc); 480 } 481 } 482 483 int 484 spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 485 void *buffer, 486 void *metadata, 487 uint64_t lba, 488 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 489 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 490 { 491 struct nvme_request *req; 492 struct nvme_payload payload; 493 int rc = 0; 494 495 if (!_is_io_flags_valid(io_flags)) { 496 return -EINVAL; 497 } 498 499 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 500 501 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 502 SPDK_NVME_OPC_COMPARE, 503 io_flags, 504 apptag_mask, apptag, false, &rc); 505 if (req != NULL) { 506 return nvme_qpair_submit_request(qpair, req); 507 } else { 508 return nvme_ns_map_failure_rc(lba_count, 509 ns->sectors_per_max_io, 510 ns->sectors_per_stripe, 511 qpair->ctrlr->opts.io_queue_requests, 512 rc); 513 } 514 } 515 516 int 517 spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 518 uint64_t lba, uint32_t lba_count, 519 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 520 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 521 spdk_nvme_req_next_sge_cb next_sge_fn) 522 { 523 struct nvme_request *req; 524 struct nvme_payload payload; 525 int rc = 0; 526 527 if (!_is_io_flags_valid(io_flags)) { 528 return -EINVAL; 529 } 530 531 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 532 return -EINVAL; 533 } 534 535 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 536 537 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 538 SPDK_NVME_OPC_COMPARE, 539 io_flags, 0, 0, true, &rc); 540 if (req != NULL) { 541 return nvme_qpair_submit_request(qpair, req); 542 } else { 543 return nvme_ns_map_failure_rc(lba_count, 544 ns->sectors_per_max_io, 545 ns->sectors_per_stripe, 546 qpair->ctrlr->opts.io_queue_requests, 547 rc); 548 } 549 } 550 551 int 552 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 553 uint64_t lba, uint32_t lba_count, 554 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 555 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 556 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 557 uint16_t apptag_mask, uint16_t apptag) 558 { 559 struct nvme_request *req; 560 struct nvme_payload payload; 561 int rc = 0; 562 563 if (!_is_io_flags_valid(io_flags)) { 564 return -EINVAL; 565 } 566 567 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 568 return -EINVAL; 569 } 570 571 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 572 573 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, 574 SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, true, &rc); 575 if (req != NULL) { 576 return nvme_qpair_submit_request(qpair, req); 577 } else { 578 return nvme_ns_map_failure_rc(lba_count, 579 ns->sectors_per_max_io, 580 ns->sectors_per_stripe, 581 qpair->ctrlr->opts.io_queue_requests, 582 rc); 583 } 584 } 585 586 int 587 spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 588 uint64_t lba, 589 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 590 uint32_t io_flags) 591 { 592 struct nvme_request *req; 593 struct nvme_payload payload; 594 int rc = 0; 595 596 if (!_is_io_flags_valid(io_flags)) { 597 return -EINVAL; 598 } 599 600 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 601 602 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 603 io_flags, 0, 604 0, false, &rc); 605 if (req != NULL) { 606 return nvme_qpair_submit_request(qpair, req); 607 } else { 608 return nvme_ns_map_failure_rc(lba_count, 609 ns->sectors_per_max_io, 610 ns->sectors_per_stripe, 611 qpair->ctrlr->opts.io_queue_requests, 612 rc); 613 } 614 } 615 616 int 617 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 618 void *metadata, 619 uint64_t lba, 620 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 621 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 622 { 623 struct nvme_request *req; 624 struct nvme_payload payload; 625 int rc = 0; 626 627 if (!_is_io_flags_valid(io_flags)) { 628 return -EINVAL; 629 } 630 631 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 632 633 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 634 io_flags, 635 apptag_mask, apptag, false, &rc); 636 if (req != NULL) { 637 return nvme_qpair_submit_request(qpair, req); 638 } else { 639 return nvme_ns_map_failure_rc(lba_count, 640 ns->sectors_per_max_io, 641 ns->sectors_per_stripe, 642 qpair->ctrlr->opts.io_queue_requests, 643 rc); 644 } 645 } 646 647 int 648 spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 649 uint64_t lba, uint32_t lba_count, 650 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 651 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 652 spdk_nvme_req_next_sge_cb next_sge_fn) 653 { 654 struct nvme_request *req; 655 struct nvme_payload payload; 656 int rc = 0; 657 658 if (!_is_io_flags_valid(io_flags)) { 659 return -EINVAL; 660 } 661 662 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 663 return -EINVAL; 664 } 665 666 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 667 668 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 669 io_flags, 0, 0, true, &rc); 670 if (req != NULL) { 671 return nvme_qpair_submit_request(qpair, req); 672 } else { 673 return nvme_ns_map_failure_rc(lba_count, 674 ns->sectors_per_max_io, 675 ns->sectors_per_stripe, 676 qpair->ctrlr->opts.io_queue_requests, 677 rc); 678 } 679 } 680 681 int 682 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 683 uint64_t lba, uint32_t lba_count, 684 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 685 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 686 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 687 uint16_t apptag_mask, uint16_t apptag) 688 { 689 struct nvme_request *req; 690 struct nvme_payload payload; 691 int rc = 0; 692 693 if (!_is_io_flags_valid(io_flags)) { 694 return -EINVAL; 695 } 696 697 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 698 return -EINVAL; 699 } 700 701 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 702 703 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 704 io_flags, apptag_mask, apptag, true, &rc); 705 if (req != NULL) { 706 return nvme_qpair_submit_request(qpair, req); 707 } else { 708 return nvme_ns_map_failure_rc(lba_count, 709 ns->sectors_per_max_io, 710 ns->sectors_per_stripe, 711 qpair->ctrlr->opts.io_queue_requests, 712 rc); 713 } 714 } 715 716 int 717 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 718 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, 719 void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 720 spdk_nvme_req_next_sge_cb next_sge_fn, 721 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 722 { 723 struct nvme_request *req; 724 struct nvme_payload payload; 725 int rc = 0; 726 727 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 728 return -EINVAL; 729 } 730 731 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 732 733 if (opts) { 734 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) { 735 return -EINVAL; 736 } 737 738 payload.opts = opts; 739 payload.md = opts->metadata; 740 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 741 opts->io_flags, opts->apptag_mask, opts->apptag, true, &rc); 742 743 } else { 744 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, 745 0, 0, 0, true, &rc); 746 } 747 748 if (req != NULL) { 749 return nvme_qpair_submit_request(qpair, req); 750 } else { 751 return nvme_ns_map_failure_rc(lba_count, 752 ns->sectors_per_max_io, 753 ns->sectors_per_stripe, 754 qpair->ctrlr->opts.io_queue_requests, 755 rc); 756 } 757 } 758 759 int 760 spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 761 void *buffer, uint64_t lba, 762 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 763 uint32_t io_flags) 764 { 765 struct nvme_request *req; 766 struct nvme_payload payload; 767 int rc = 0; 768 769 if (!_is_io_flags_valid(io_flags)) { 770 return -EINVAL; 771 } 772 773 payload = NVME_PAYLOAD_CONTIG(buffer, NULL); 774 775 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 776 io_flags, 0, 0, false, &rc); 777 if (req != NULL) { 778 return nvme_qpair_submit_request(qpair, req); 779 } else { 780 return nvme_ns_map_failure_rc(lba_count, 781 ns->sectors_per_max_io, 782 ns->sectors_per_stripe, 783 qpair->ctrlr->opts.io_queue_requests, 784 rc); 785 } 786 } 787 788 static int 789 nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags) 790 { 791 uint32_t sector_size; 792 793 /* Not all NVMe Zoned Namespaces support the zone append command. */ 794 if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) { 795 return -EINVAL; 796 } 797 798 sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags); 799 800 /* Fail a too large zone append command early. */ 801 if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) { 802 return -EINVAL; 803 } 804 805 return 0; 806 } 807 808 int 809 nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 810 void *buffer, void *metadata, uint64_t zslba, 811 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 812 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 813 { 814 struct nvme_request *req; 815 struct nvme_payload payload; 816 int rc = 0; 817 818 if (!_is_io_flags_valid(io_flags)) { 819 return -EINVAL; 820 } 821 822 rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags); 823 if (rc) { 824 return rc; 825 } 826 827 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 828 829 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg, 830 SPDK_NVME_OPC_ZONE_APPEND, 831 io_flags, apptag_mask, apptag, false, &rc); 832 if (req != NULL) { 833 /* 834 * Zone append commands cannot be split (num_children has to be 0). 835 * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split 836 * to happen, since a too large request would have already been failed by 837 * nvme_ns_cmd_check_zone_append(), since zasl <= mdts. 838 */ 839 assert(req->num_children == 0); 840 if (req->num_children) { 841 nvme_request_free_children(req); 842 nvme_free_request(req); 843 return -EINVAL; 844 } 845 return nvme_qpair_submit_request(qpair, req); 846 } else { 847 return nvme_ns_map_failure_rc(lba_count, 848 ns->sectors_per_max_io, 849 ns->sectors_per_stripe, 850 qpair->ctrlr->opts.io_queue_requests, 851 rc); 852 } 853 } 854 855 int 856 nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 857 uint64_t zslba, uint32_t lba_count, 858 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 859 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 860 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 861 uint16_t apptag_mask, uint16_t apptag) 862 { 863 struct nvme_request *req; 864 struct nvme_payload payload; 865 int rc = 0; 866 867 if (!_is_io_flags_valid(io_flags)) { 868 return -EINVAL; 869 } 870 871 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 872 return -EINVAL; 873 } 874 875 rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags); 876 if (rc) { 877 return rc; 878 } 879 880 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 881 882 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg, 883 SPDK_NVME_OPC_ZONE_APPEND, 884 io_flags, apptag_mask, apptag, true, &rc); 885 if (req != NULL) { 886 /* 887 * Zone append commands cannot be split (num_children has to be 0). 888 * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split. 889 * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp() 890 * do not always cause a request to be split. These functions verify payload size, 891 * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs). 892 * If any of the verifications fail, they will split the request. 893 * In our case, a split is very unlikely, since we already verified the size using 894 * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions 895 * in order to perform the verification part. If they do cause a split, we return 896 * an error here. For proper requests, these functions will never cause a split. 897 */ 898 if (req->num_children) { 899 nvme_request_free_children(req); 900 nvme_free_request(req); 901 return -EINVAL; 902 } 903 return nvme_qpair_submit_request(qpair, req); 904 } else { 905 return nvme_ns_map_failure_rc(lba_count, 906 ns->sectors_per_max_io, 907 ns->sectors_per_stripe, 908 qpair->ctrlr->opts.io_queue_requests, 909 rc); 910 } 911 } 912 913 int 914 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 915 void *buffer, void *metadata, uint64_t lba, 916 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 917 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 918 { 919 struct nvme_request *req; 920 struct nvme_payload payload; 921 int rc = 0; 922 923 if (!_is_io_flags_valid(io_flags)) { 924 return -EINVAL; 925 } 926 927 payload = NVME_PAYLOAD_CONTIG(buffer, metadata); 928 929 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 930 io_flags, apptag_mask, apptag, false, &rc); 931 if (req != NULL) { 932 return nvme_qpair_submit_request(qpair, req); 933 } else { 934 return nvme_ns_map_failure_rc(lba_count, 935 ns->sectors_per_max_io, 936 ns->sectors_per_stripe, 937 qpair->ctrlr->opts.io_queue_requests, 938 rc); 939 } 940 } 941 942 int 943 spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 944 uint64_t lba, uint32_t lba_count, 945 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 946 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 947 spdk_nvme_req_next_sge_cb next_sge_fn) 948 { 949 struct nvme_request *req; 950 struct nvme_payload payload; 951 int rc = 0; 952 953 if (!_is_io_flags_valid(io_flags)) { 954 return -EINVAL; 955 } 956 957 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 958 return -EINVAL; 959 } 960 961 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 962 963 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 964 io_flags, 0, 0, true, &rc); 965 if (req != NULL) { 966 return nvme_qpair_submit_request(qpair, req); 967 } else { 968 return nvme_ns_map_failure_rc(lba_count, 969 ns->sectors_per_max_io, 970 ns->sectors_per_stripe, 971 qpair->ctrlr->opts.io_queue_requests, 972 rc); 973 } 974 } 975 976 int 977 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 978 uint64_t lba, uint32_t lba_count, 979 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 980 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 981 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 982 uint16_t apptag_mask, uint16_t apptag) 983 { 984 struct nvme_request *req; 985 struct nvme_payload payload; 986 int rc = 0; 987 988 if (!_is_io_flags_valid(io_flags)) { 989 return -EINVAL; 990 } 991 992 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 993 return -EINVAL; 994 } 995 996 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata); 997 998 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 999 io_flags, apptag_mask, apptag, true, &rc); 1000 if (req != NULL) { 1001 return nvme_qpair_submit_request(qpair, req); 1002 } else { 1003 return nvme_ns_map_failure_rc(lba_count, 1004 ns->sectors_per_max_io, 1005 ns->sectors_per_stripe, 1006 qpair->ctrlr->opts.io_queue_requests, 1007 rc); 1008 } 1009 } 1010 1011 int 1012 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba, 1013 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1014 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1015 spdk_nvme_req_next_sge_cb next_sge_fn, 1016 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1017 { 1018 struct nvme_request *req; 1019 struct nvme_payload payload; 1020 int rc = 0; 1021 1022 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 1023 return -EINVAL; 1024 } 1025 1026 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL); 1027 1028 if (opts) { 1029 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) { 1030 return -EINVAL; 1031 } 1032 1033 payload.opts = opts; 1034 payload.md = opts->metadata; 1035 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1036 opts->io_flags, opts->apptag_mask, opts->apptag, true, &rc); 1037 1038 } else { 1039 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, 1040 0, 0, 0, true, &rc); 1041 } 1042 1043 if (req != NULL) { 1044 return nvme_qpair_submit_request(qpair, req); 1045 } else { 1046 return nvme_ns_map_failure_rc(lba_count, 1047 ns->sectors_per_max_io, 1048 ns->sectors_per_stripe, 1049 qpair->ctrlr->opts.io_queue_requests, 1050 rc); 1051 } 1052 } 1053 1054 int 1055 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1056 uint64_t lba, uint32_t lba_count, 1057 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1058 uint32_t io_flags) 1059 { 1060 struct nvme_request *req; 1061 struct spdk_nvme_cmd *cmd; 1062 uint64_t *tmp_lba; 1063 1064 if (!_is_io_flags_valid(io_flags)) { 1065 return -EINVAL; 1066 } 1067 1068 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1069 return -EINVAL; 1070 } 1071 1072 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1073 if (req == NULL) { 1074 return -ENOMEM; 1075 } 1076 1077 cmd = &req->cmd; 1078 cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES; 1079 cmd->nsid = ns->id; 1080 1081 tmp_lba = (uint64_t *)&cmd->cdw10; 1082 *tmp_lba = lba; 1083 cmd->cdw12 = lba_count - 1; 1084 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 1085 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 1086 1087 return nvme_qpair_submit_request(qpair, req); 1088 } 1089 1090 int 1091 spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1092 uint64_t lba, uint32_t lba_count, 1093 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1094 uint32_t io_flags) 1095 { 1096 struct nvme_request *req; 1097 struct spdk_nvme_cmd *cmd; 1098 1099 if (!_is_io_flags_valid(io_flags)) { 1100 return -EINVAL; 1101 } 1102 1103 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1104 return -EINVAL; 1105 } 1106 1107 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1108 if (req == NULL) { 1109 return -ENOMEM; 1110 } 1111 1112 cmd = &req->cmd; 1113 cmd->opc = SPDK_NVME_OPC_VERIFY; 1114 cmd->nsid = ns->id; 1115 1116 *(uint64_t *)&cmd->cdw10 = lba; 1117 cmd->cdw12 = lba_count - 1; 1118 cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK); 1119 cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK); 1120 1121 return nvme_qpair_submit_request(qpair, req); 1122 } 1123 1124 int 1125 spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1126 uint64_t lba, uint32_t lba_count, 1127 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1128 { 1129 struct nvme_request *req; 1130 struct spdk_nvme_cmd *cmd; 1131 uint64_t *tmp_lba; 1132 1133 if (lba_count == 0 || lba_count > UINT16_MAX + 1) { 1134 return -EINVAL; 1135 } 1136 1137 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1138 if (req == NULL) { 1139 return -ENOMEM; 1140 } 1141 1142 cmd = &req->cmd; 1143 cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE; 1144 cmd->nsid = ns->id; 1145 1146 tmp_lba = (uint64_t *)&cmd->cdw10; 1147 *tmp_lba = lba; 1148 cmd->cdw12 = lba_count - 1; 1149 1150 return nvme_qpair_submit_request(qpair, req); 1151 } 1152 1153 int 1154 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1155 uint32_t type, 1156 const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1157 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1158 { 1159 struct nvme_request *req; 1160 struct spdk_nvme_cmd *cmd; 1161 1162 if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) { 1163 return -EINVAL; 1164 } 1165 1166 if (ranges == NULL) { 1167 return -EINVAL; 1168 } 1169 1170 req = nvme_allocate_request_user_copy(qpair, (void *)ranges, 1171 num_ranges * sizeof(struct spdk_nvme_dsm_range), 1172 cb_fn, cb_arg, true); 1173 if (req == NULL) { 1174 return -ENOMEM; 1175 } 1176 1177 cmd = &req->cmd; 1178 cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1179 cmd->nsid = ns->id; 1180 1181 cmd->cdw10_bits.dsm.nr = num_ranges - 1; 1182 cmd->cdw11 = type; 1183 1184 return nvme_qpair_submit_request(qpair, req); 1185 } 1186 1187 int 1188 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1189 const struct spdk_nvme_scc_source_range *ranges, 1190 uint16_t num_ranges, uint64_t dest_lba, 1191 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1192 { 1193 struct nvme_request *req; 1194 struct spdk_nvme_cmd *cmd; 1195 1196 if (num_ranges == 0) { 1197 return -EINVAL; 1198 } 1199 1200 if (ranges == NULL) { 1201 return -EINVAL; 1202 } 1203 1204 req = nvme_allocate_request_user_copy(qpair, (void *)ranges, 1205 num_ranges * sizeof(struct spdk_nvme_scc_source_range), 1206 cb_fn, cb_arg, true); 1207 if (req == NULL) { 1208 return -ENOMEM; 1209 } 1210 1211 cmd = &req->cmd; 1212 cmd->opc = SPDK_NVME_OPC_COPY; 1213 cmd->nsid = ns->id; 1214 1215 *(uint64_t *)&cmd->cdw10 = dest_lba; 1216 cmd->cdw12 = num_ranges - 1; 1217 1218 return nvme_qpair_submit_request(qpair, req); 1219 } 1220 1221 int 1222 spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1223 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1224 { 1225 struct nvme_request *req; 1226 struct spdk_nvme_cmd *cmd; 1227 1228 req = nvme_allocate_request_null(qpair, cb_fn, cb_arg); 1229 if (req == NULL) { 1230 return -ENOMEM; 1231 } 1232 1233 cmd = &req->cmd; 1234 cmd->opc = SPDK_NVME_OPC_FLUSH; 1235 cmd->nsid = ns->id; 1236 1237 return nvme_qpair_submit_request(qpair, req); 1238 } 1239 1240 int 1241 spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns, 1242 struct spdk_nvme_qpair *qpair, 1243 struct spdk_nvme_reservation_register_data *payload, 1244 bool ignore_key, 1245 enum spdk_nvme_reservation_register_action action, 1246 enum spdk_nvme_reservation_register_cptpl cptpl, 1247 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1248 { 1249 struct nvme_request *req; 1250 struct spdk_nvme_cmd *cmd; 1251 1252 req = nvme_allocate_request_user_copy(qpair, 1253 payload, sizeof(struct spdk_nvme_reservation_register_data), 1254 cb_fn, cb_arg, true); 1255 if (req == NULL) { 1256 return -ENOMEM; 1257 } 1258 1259 cmd = &req->cmd; 1260 cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER; 1261 cmd->nsid = ns->id; 1262 1263 cmd->cdw10_bits.resv_register.rrega = action; 1264 cmd->cdw10_bits.resv_register.iekey = ignore_key; 1265 cmd->cdw10_bits.resv_register.cptpl = cptpl; 1266 1267 return nvme_qpair_submit_request(qpair, req); 1268 } 1269 1270 int 1271 spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns, 1272 struct spdk_nvme_qpair *qpair, 1273 struct spdk_nvme_reservation_key_data *payload, 1274 bool ignore_key, 1275 enum spdk_nvme_reservation_release_action action, 1276 enum spdk_nvme_reservation_type type, 1277 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1278 { 1279 struct nvme_request *req; 1280 struct spdk_nvme_cmd *cmd; 1281 1282 req = nvme_allocate_request_user_copy(qpair, 1283 payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn, 1284 cb_arg, true); 1285 if (req == NULL) { 1286 return -ENOMEM; 1287 } 1288 1289 cmd = &req->cmd; 1290 cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1291 cmd->nsid = ns->id; 1292 1293 cmd->cdw10_bits.resv_release.rrela = action; 1294 cmd->cdw10_bits.resv_release.iekey = ignore_key; 1295 cmd->cdw10_bits.resv_release.rtype = type; 1296 1297 return nvme_qpair_submit_request(qpair, req); 1298 } 1299 1300 int 1301 spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns, 1302 struct spdk_nvme_qpair *qpair, 1303 struct spdk_nvme_reservation_acquire_data *payload, 1304 bool ignore_key, 1305 enum spdk_nvme_reservation_acquire_action action, 1306 enum spdk_nvme_reservation_type type, 1307 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1308 { 1309 struct nvme_request *req; 1310 struct spdk_nvme_cmd *cmd; 1311 1312 req = nvme_allocate_request_user_copy(qpair, 1313 payload, sizeof(struct spdk_nvme_reservation_acquire_data), 1314 cb_fn, cb_arg, true); 1315 if (req == NULL) { 1316 return -ENOMEM; 1317 } 1318 1319 cmd = &req->cmd; 1320 cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE; 1321 cmd->nsid = ns->id; 1322 1323 cmd->cdw10_bits.resv_acquire.racqa = action; 1324 cmd->cdw10_bits.resv_acquire.iekey = ignore_key; 1325 cmd->cdw10_bits.resv_acquire.rtype = type; 1326 1327 return nvme_qpair_submit_request(qpair, req); 1328 } 1329 1330 int 1331 spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, 1332 struct spdk_nvme_qpair *qpair, 1333 void *payload, uint32_t len, 1334 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1335 { 1336 uint32_t num_dwords; 1337 struct nvme_request *req; 1338 struct spdk_nvme_cmd *cmd; 1339 1340 if (len % 4) { 1341 return -EINVAL; 1342 } 1343 num_dwords = len / 4; 1344 1345 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false); 1346 if (req == NULL) { 1347 return -ENOMEM; 1348 } 1349 1350 cmd = &req->cmd; 1351 cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT; 1352 cmd->nsid = ns->id; 1353 1354 cmd->cdw10 = num_dwords; 1355 1356 return nvme_qpair_submit_request(qpair, req); 1357 } 1358