1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "nvmf_internal.h" 10 11 #include "spdk/bdev.h" 12 #include "spdk/endian.h" 13 #include "spdk/thread.h" 14 #include "spdk/likely.h" 15 #include "spdk/nvme.h" 16 #include "spdk/nvmf_cmd.h" 17 #include "spdk/nvmf_spec.h" 18 #include "spdk/trace.h" 19 #include "spdk/scsi_spec.h" 20 #include "spdk/string.h" 21 #include "spdk/util.h" 22 23 #include "spdk/log.h" 24 25 static bool 26 nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem, 27 enum spdk_bdev_io_type io_type) 28 { 29 struct spdk_nvmf_ns *ns; 30 31 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 32 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 33 if (ns->bdev == NULL) { 34 continue; 35 } 36 37 if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) { 38 SPDK_DEBUGLOG(nvmf, 39 "Subsystem %s namespace %u (%s) does not support io_type %d\n", 40 spdk_nvmf_subsystem_get_nqn(subsystem), 41 ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type); 42 return false; 43 } 44 } 45 46 SPDK_DEBUGLOG(nvmf, "All devices in Subsystem %s support io_type %d\n", 47 spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type); 48 return true; 49 } 50 51 bool 52 nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr) 53 { 54 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP); 55 } 56 57 bool 58 nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr) 59 { 60 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); 61 } 62 63 bool 64 nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr) 65 { 66 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_COPY); 67 } 68 69 static void 70 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success, 71 void *cb_arg) 72 { 73 struct spdk_nvmf_request *req = cb_arg; 74 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 75 int first_sc = 0, first_sct = 0, sc = 0, sct = 0; 76 uint32_t cdw0 = 0; 77 struct spdk_nvmf_request *first_req = req->first_fused_req; 78 79 if (spdk_unlikely(first_req != NULL)) { 80 /* fused commands - get status for both operations */ 81 struct spdk_nvme_cpl *first_response = &first_req->rsp->nvme_cpl; 82 83 spdk_bdev_io_get_nvme_fused_status(bdev_io, &cdw0, &first_sct, &first_sc, &sct, &sc); 84 first_response->cdw0 = cdw0; 85 first_response->status.sc = first_sc; 86 first_response->status.sct = first_sct; 87 88 /* first request should be completed */ 89 spdk_nvmf_request_complete(first_req); 90 req->first_fused_req = NULL; 91 } else { 92 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 93 } 94 95 response->cdw0 = cdw0; 96 response->status.sc = sc; 97 response->status.sct = sct; 98 99 spdk_nvmf_request_complete(req); 100 spdk_bdev_free_io(bdev_io); 101 } 102 103 static void 104 nvmf_bdev_ctrlr_complete_admin_cmd(struct spdk_bdev_io *bdev_io, bool success, 105 void *cb_arg) 106 { 107 struct spdk_nvmf_request *req = cb_arg; 108 109 if (req->cmd_cb_fn) { 110 req->cmd_cb_fn(req); 111 } 112 113 nvmf_bdev_ctrlr_complete_cmd(bdev_io, success, req); 114 } 115 116 void 117 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 118 bool dif_insert_or_strip) 119 { 120 struct spdk_bdev *bdev = ns->bdev; 121 uint64_t num_blocks; 122 uint32_t phys_blocklen; 123 uint32_t max_copy; 124 125 num_blocks = spdk_bdev_get_num_blocks(bdev); 126 127 nsdata->nsze = num_blocks; 128 nsdata->ncap = num_blocks; 129 nsdata->nuse = num_blocks; 130 nsdata->nlbaf = 0; 131 nsdata->flbas.format = 0; 132 nsdata->flbas.msb_format = 0; 133 nsdata->nacwu = spdk_bdev_get_acwu(bdev) - 1; /* nacwu is 0-based */ 134 if (!dif_insert_or_strip) { 135 nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev); 136 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev)); 137 if (nsdata->lbaf[0].ms != 0) { 138 nsdata->flbas.extended = 1; 139 nsdata->mc.extended = 1; 140 nsdata->mc.pointer = 0; 141 nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev); 142 /* NVMf library doesn't process PRACT and PRCHK flags, we 143 * leave the use of extended LBA buffer to users. 144 */ 145 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE; 146 } 147 } else { 148 nsdata->lbaf[0].ms = 0; 149 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev)); 150 } 151 152 phys_blocklen = spdk_bdev_get_physical_block_size(bdev); 153 assert(phys_blocklen > 0); 154 /* Linux driver uses min(nawupf, npwg) to set physical_block_size */ 155 nsdata->nsfeat.optperf = 1; 156 nsdata->nsfeat.ns_atomic_write_unit = 1; 157 nsdata->npwg = (phys_blocklen >> nsdata->lbaf[0].lbads) - 1; 158 nsdata->nawupf = nsdata->npwg; 159 nsdata->npwa = nsdata->npwg; 160 nsdata->npdg = nsdata->npwg; 161 nsdata->npda = nsdata->npwg; 162 163 if (spdk_bdev_get_write_unit_size(bdev) == 1) { 164 nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev); 165 } 166 nsdata->nmic.can_share = 1; 167 if (ns->ptpl_file != NULL) { 168 nsdata->nsrescap.rescap.persist = 1; 169 } 170 nsdata->nsrescap.rescap.write_exclusive = 1; 171 nsdata->nsrescap.rescap.exclusive_access = 1; 172 nsdata->nsrescap.rescap.write_exclusive_reg_only = 1; 173 nsdata->nsrescap.rescap.exclusive_access_reg_only = 1; 174 nsdata->nsrescap.rescap.write_exclusive_all_reg = 1; 175 nsdata->nsrescap.rescap.exclusive_access_all_reg = 1; 176 nsdata->nsrescap.rescap.ignore_existing_key = 1; 177 178 SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch"); 179 memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid)); 180 181 SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch"); 182 memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64)); 183 184 /* For now we support just one source range for copy command */ 185 nsdata->msrc = 0; 186 187 max_copy = spdk_bdev_get_max_copy(bdev); 188 if (max_copy == 0 || max_copy > UINT16_MAX) { 189 /* Zero means copy size is unlimited */ 190 nsdata->mcl = UINT16_MAX; 191 nsdata->mssrl = UINT16_MAX; 192 } else { 193 nsdata->mcl = max_copy; 194 nsdata->mssrl = max_copy; 195 } 196 } 197 198 static void 199 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba, 200 uint64_t *num_blocks) 201 { 202 /* SLBA: CDW10 and CDW11 */ 203 *start_lba = from_le64(&cmd->cdw10); 204 205 /* NLB: CDW12 bits 15:00, 0's based */ 206 *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1; 207 } 208 209 static bool 210 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba, 211 uint64_t io_num_blocks) 212 { 213 if (io_start_lba + io_num_blocks > bdev_num_blocks || 214 io_start_lba + io_num_blocks < io_start_lba) { 215 return false; 216 } 217 218 return true; 219 } 220 221 static void 222 nvmf_ctrlr_process_io_cmd_resubmit(void *arg) 223 { 224 struct spdk_nvmf_request *req = arg; 225 int rc; 226 227 rc = nvmf_ctrlr_process_io_cmd(req); 228 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 229 spdk_nvmf_request_complete(req); 230 } 231 } 232 233 static void 234 nvmf_ctrlr_process_admin_cmd_resubmit(void *arg) 235 { 236 struct spdk_nvmf_request *req = arg; 237 int rc; 238 239 rc = nvmf_ctrlr_process_admin_cmd(req); 240 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 241 spdk_nvmf_request_complete(req); 242 } 243 } 244 245 static void 246 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 247 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg) 248 { 249 int rc; 250 251 req->bdev_io_wait.bdev = bdev; 252 req->bdev_io_wait.cb_fn = cb_fn; 253 req->bdev_io_wait.cb_arg = cb_arg; 254 255 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait); 256 if (rc != 0) { 257 assert(false); 258 } 259 req->qpair->group->stat.pending_bdev_io++; 260 } 261 262 bool 263 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 264 { 265 return spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY); 266 } 267 268 int 269 nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 270 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 271 { 272 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 273 uint32_t block_size = spdk_bdev_get_block_size(bdev); 274 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 275 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 276 uint64_t start_lba; 277 uint64_t num_blocks; 278 int rc; 279 280 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 281 282 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 283 SPDK_ERRLOG("end of media\n"); 284 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 285 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 286 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 287 } 288 289 if (spdk_unlikely(num_blocks * block_size > req->length)) { 290 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 291 num_blocks, block_size, req->length); 292 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 293 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 294 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 295 } 296 297 assert(!spdk_nvmf_request_using_zcopy(req)); 298 299 rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 300 nvmf_bdev_ctrlr_complete_cmd, req); 301 if (spdk_unlikely(rc)) { 302 if (rc == -ENOMEM) { 303 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 304 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 305 } 306 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 307 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 308 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 309 } 310 311 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 312 } 313 314 int 315 nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 316 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 317 { 318 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 319 uint32_t block_size = spdk_bdev_get_block_size(bdev); 320 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 321 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 322 uint64_t start_lba; 323 uint64_t num_blocks; 324 int rc; 325 326 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 327 328 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 329 SPDK_ERRLOG("end of media\n"); 330 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 331 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 332 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 333 } 334 335 if (spdk_unlikely(num_blocks * block_size > req->length)) { 336 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 337 num_blocks, block_size, req->length); 338 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 339 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 340 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 341 } 342 343 assert(!spdk_nvmf_request_using_zcopy(req)); 344 345 rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 346 nvmf_bdev_ctrlr_complete_cmd, req); 347 if (spdk_unlikely(rc)) { 348 if (rc == -ENOMEM) { 349 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 350 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 351 } 352 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 353 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 354 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 355 } 356 357 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 358 } 359 360 int 361 nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 362 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 363 { 364 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 365 uint32_t block_size = spdk_bdev_get_block_size(bdev); 366 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 367 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 368 uint64_t start_lba; 369 uint64_t num_blocks; 370 int rc; 371 372 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 373 374 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 375 SPDK_ERRLOG("end of media\n"); 376 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 377 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 378 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 379 } 380 381 if (spdk_unlikely(num_blocks * block_size > req->length)) { 382 SPDK_ERRLOG("Compare NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 383 num_blocks, block_size, req->length); 384 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 385 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 386 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 387 } 388 389 rc = spdk_bdev_comparev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 390 nvmf_bdev_ctrlr_complete_cmd, req); 391 if (spdk_unlikely(rc)) { 392 if (rc == -ENOMEM) { 393 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 394 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 395 } 396 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 397 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 398 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 399 } 400 401 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 402 } 403 404 int 405 nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 406 struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req) 407 { 408 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 409 uint32_t block_size = spdk_bdev_get_block_size(bdev); 410 struct spdk_nvme_cmd *cmp_cmd = &cmp_req->cmd->nvme_cmd; 411 struct spdk_nvme_cmd *write_cmd = &write_req->cmd->nvme_cmd; 412 struct spdk_nvme_cpl *rsp = &write_req->rsp->nvme_cpl; 413 uint64_t write_start_lba, cmp_start_lba; 414 uint64_t write_num_blocks, cmp_num_blocks; 415 int rc; 416 417 nvmf_bdev_ctrlr_get_rw_params(cmp_cmd, &cmp_start_lba, &cmp_num_blocks); 418 nvmf_bdev_ctrlr_get_rw_params(write_cmd, &write_start_lba, &write_num_blocks); 419 420 if (spdk_unlikely(write_start_lba != cmp_start_lba || write_num_blocks != cmp_num_blocks)) { 421 SPDK_ERRLOG("Fused command start lba / num blocks mismatch\n"); 422 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 423 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 424 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 425 } 426 427 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, write_start_lba, 428 write_num_blocks))) { 429 SPDK_ERRLOG("end of media\n"); 430 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 431 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 432 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 433 } 434 435 if (spdk_unlikely(write_num_blocks * block_size > write_req->length)) { 436 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 437 write_num_blocks, block_size, write_req->length); 438 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 439 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 440 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 441 } 442 443 rc = spdk_bdev_comparev_and_writev_blocks(desc, ch, cmp_req->iov, cmp_req->iovcnt, write_req->iov, 444 write_req->iovcnt, write_start_lba, write_num_blocks, nvmf_bdev_ctrlr_complete_cmd, write_req); 445 if (spdk_unlikely(rc)) { 446 if (rc == -ENOMEM) { 447 nvmf_bdev_ctrl_queue_io(cmp_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, cmp_req); 448 nvmf_bdev_ctrl_queue_io(write_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, write_req); 449 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 450 } 451 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 452 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 453 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 454 } 455 456 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 457 } 458 459 int 460 nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 461 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 462 { 463 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 464 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 465 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 466 uint64_t start_lba; 467 uint64_t num_blocks; 468 int rc; 469 470 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 471 472 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 473 SPDK_ERRLOG("end of media\n"); 474 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 475 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 476 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 477 } 478 479 rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks, 480 nvmf_bdev_ctrlr_complete_cmd, req); 481 if (spdk_unlikely(rc)) { 482 if (rc == -ENOMEM) { 483 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 484 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 485 } 486 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 487 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 488 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 489 } 490 491 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 492 } 493 494 int 495 nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 496 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 497 { 498 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 499 int rc; 500 501 /* As for NVMeoF controller, SPDK always set volatile write 502 * cache bit to 1, return success for those block devices 503 * which can't support FLUSH command. 504 */ 505 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) { 506 response->status.sct = SPDK_NVME_SCT_GENERIC; 507 response->status.sc = SPDK_NVME_SC_SUCCESS; 508 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 509 } 510 511 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev), 512 nvmf_bdev_ctrlr_complete_cmd, req); 513 if (spdk_unlikely(rc)) { 514 if (rc == -ENOMEM) { 515 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 516 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 517 } 518 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 519 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 520 } 521 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 522 } 523 524 struct nvmf_bdev_ctrlr_unmap { 525 struct spdk_nvmf_request *req; 526 uint32_t count; 527 struct spdk_bdev_desc *desc; 528 struct spdk_bdev *bdev; 529 struct spdk_io_channel *ch; 530 uint32_t range_index; 531 }; 532 533 static void 534 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success, 535 void *cb_arg) 536 { 537 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg; 538 struct spdk_nvmf_request *req = unmap_ctx->req; 539 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 540 int sc, sct; 541 uint32_t cdw0; 542 543 unmap_ctx->count--; 544 545 if (response->status.sct == SPDK_NVME_SCT_GENERIC && 546 response->status.sc == SPDK_NVME_SC_SUCCESS) { 547 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 548 response->cdw0 = cdw0; 549 response->status.sc = sc; 550 response->status.sct = sct; 551 } 552 553 if (unmap_ctx->count == 0) { 554 spdk_nvmf_request_complete(req); 555 free(unmap_ctx); 556 } 557 spdk_bdev_free_io(bdev_io); 558 } 559 560 static int nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 561 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 562 struct nvmf_bdev_ctrlr_unmap *unmap_ctx); 563 static void 564 nvmf_bdev_ctrlr_unmap_resubmit(void *arg) 565 { 566 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg; 567 struct spdk_nvmf_request *req = unmap_ctx->req; 568 struct spdk_bdev_desc *desc = unmap_ctx->desc; 569 struct spdk_bdev *bdev = unmap_ctx->bdev; 570 struct spdk_io_channel *ch = unmap_ctx->ch; 571 572 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx); 573 } 574 575 static int 576 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 577 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 578 struct nvmf_bdev_ctrlr_unmap *unmap_ctx) 579 { 580 uint16_t nr, i; 581 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 582 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 583 struct spdk_iov_xfer ix; 584 uint64_t lba; 585 uint32_t lba_count; 586 int rc; 587 588 nr = cmd->cdw10_bits.dsm.nr + 1; 589 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) { 590 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n"); 591 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 592 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 593 } 594 595 if (unmap_ctx == NULL) { 596 unmap_ctx = calloc(1, sizeof(*unmap_ctx)); 597 if (!unmap_ctx) { 598 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 599 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 600 } 601 602 unmap_ctx->req = req; 603 unmap_ctx->desc = desc; 604 unmap_ctx->ch = ch; 605 unmap_ctx->bdev = bdev; 606 607 response->status.sct = SPDK_NVME_SCT_GENERIC; 608 response->status.sc = SPDK_NVME_SC_SUCCESS; 609 } else { 610 unmap_ctx->count--; /* dequeued */ 611 } 612 613 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 614 615 for (i = unmap_ctx->range_index; i < nr; i++) { 616 struct spdk_nvme_dsm_range dsm_range = { 0 }; 617 618 spdk_iov_xfer_to_buf(&ix, &dsm_range, sizeof(dsm_range)); 619 620 lba = dsm_range.starting_lba; 621 lba_count = dsm_range.length; 622 623 unmap_ctx->count++; 624 625 rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count, 626 nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx); 627 if (rc) { 628 if (rc == -ENOMEM) { 629 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx); 630 /* Unmap was not yet submitted to bdev */ 631 /* unmap_ctx->count will be decremented when the request is dequeued */ 632 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 633 } 634 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 635 unmap_ctx->count--; 636 /* We can't return here - we may have to wait for any other 637 * unmaps already sent to complete */ 638 break; 639 } 640 unmap_ctx->range_index++; 641 } 642 643 if (unmap_ctx->count == 0) { 644 free(unmap_ctx); 645 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 646 } 647 648 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 649 } 650 651 int 652 nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 653 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 654 { 655 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 656 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 657 658 if (cmd->cdw11_bits.dsm.ad) { 659 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL); 660 } 661 662 response->status.sct = SPDK_NVME_SCT_GENERIC; 663 response->status.sc = SPDK_NVME_SC_SUCCESS; 664 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 665 } 666 667 int 668 nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 669 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 670 { 671 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 672 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 673 uint64_t sdlba = ((uint64_t)cmd->cdw11 << 32) + cmd->cdw10; 674 struct spdk_nvme_scc_source_range range = { 0 }; 675 struct spdk_iov_xfer ix; 676 int rc; 677 678 SPDK_DEBUGLOG(nvmf, "Copy command: SDLBA %lu, NR %u, desc format %u, PRINFOR %u, " 679 "DTYPE %u, STCW %u, PRINFOW %u, FUA %u, LR %u\n", 680 sdlba, 681 cmd->cdw12_bits.copy.nr, 682 cmd->cdw12_bits.copy.df, 683 cmd->cdw12_bits.copy.prinfor, 684 cmd->cdw12_bits.copy.dtype, 685 cmd->cdw12_bits.copy.stcw, 686 cmd->cdw12_bits.copy.prinfow, 687 cmd->cdw12_bits.copy.fua, 688 cmd->cdw12_bits.copy.lr); 689 690 if (spdk_unlikely(req->length != (cmd->cdw12_bits.copy.nr + 1) * 691 sizeof(struct spdk_nvme_scc_source_range))) { 692 response->status.sct = SPDK_NVME_SCT_GENERIC; 693 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 694 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 695 } 696 697 /* 698 * We support only one source range, and rely on this with the xfer 699 * below. 700 */ 701 if (cmd->cdw12_bits.copy.nr > 0) { 702 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 703 response->status.sc = SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED; 704 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 705 } 706 707 if (cmd->cdw12_bits.copy.df != 0) { 708 response->status.sct = SPDK_NVME_SCT_GENERIC; 709 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 710 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 711 } 712 713 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 714 spdk_iov_xfer_to_buf(&ix, &range, sizeof(range)); 715 716 rc = spdk_bdev_copy_blocks(desc, ch, sdlba, range.slba, range.nlb + 1, 717 nvmf_bdev_ctrlr_complete_cmd, req); 718 if (spdk_unlikely(rc)) { 719 if (rc == -ENOMEM) { 720 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 721 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 722 } 723 724 response->status.sct = SPDK_NVME_SCT_GENERIC; 725 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 726 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 727 } 728 729 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 730 } 731 732 int 733 nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 734 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 735 { 736 int rc; 737 738 if (spdk_unlikely(req->iovcnt > 1)) { 739 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 740 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 741 req->rsp->nvme_cpl.status.dnr = 1; 742 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 743 } 744 745 rc = spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->iov[0].iov_base, req->length, 746 nvmf_bdev_ctrlr_complete_cmd, req); 747 if (spdk_unlikely(rc)) { 748 if (rc == -ENOMEM) { 749 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 750 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 751 } 752 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 753 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 754 req->rsp->nvme_cpl.status.dnr = 1; 755 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 756 } 757 758 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 759 } 760 761 int 762 spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 763 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 764 spdk_nvmf_nvme_passthru_cmd_cb cb_fn) 765 { 766 int rc; 767 768 if (spdk_unlikely(req->iovcnt > 1)) { 769 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 770 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 771 req->rsp->nvme_cpl.status.dnr = 1; 772 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 773 } 774 775 req->cmd_cb_fn = cb_fn; 776 777 rc = spdk_bdev_nvme_admin_passthru(desc, ch, &req->cmd->nvme_cmd, req->iov[0].iov_base, req->length, 778 nvmf_bdev_ctrlr_complete_admin_cmd, req); 779 if (spdk_unlikely(rc)) { 780 if (rc == -ENOMEM) { 781 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req); 782 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 783 } 784 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 785 if (rc == -ENOTSUP) { 786 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 787 } else { 788 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 789 } 790 791 req->rsp->nvme_cpl.status.dnr = 1; 792 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 793 } 794 795 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 796 } 797 798 static void 799 nvmf_bdev_ctrlr_complete_abort_cmd(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 800 { 801 struct spdk_nvmf_request *req = cb_arg; 802 803 if (success) { 804 req->rsp->nvme_cpl.cdw0 &= ~1U; 805 } 806 807 spdk_nvmf_request_complete(req); 808 spdk_bdev_free_io(bdev_io); 809 } 810 811 int 812 spdk_nvmf_bdev_ctrlr_abort_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 813 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 814 struct spdk_nvmf_request *req_to_abort) 815 { 816 int rc; 817 818 assert((req->rsp->nvme_cpl.cdw0 & 1U) != 0); 819 820 rc = spdk_bdev_abort(desc, ch, req_to_abort, nvmf_bdev_ctrlr_complete_abort_cmd, req); 821 if (spdk_likely(rc == 0)) { 822 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 823 } else if (rc == -ENOMEM) { 824 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req); 825 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 826 } else { 827 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 828 } 829 } 830 831 bool 832 nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 833 struct spdk_dif_ctx *dif_ctx) 834 { 835 uint32_t init_ref_tag, dif_check_flags = 0; 836 int rc; 837 struct spdk_dif_ctx_init_ext_opts dif_opts; 838 839 if (spdk_bdev_get_md_size(bdev) == 0) { 840 return false; 841 } 842 843 /* Initial Reference Tag is the lower 32 bits of the start LBA. */ 844 init_ref_tag = (uint32_t)from_le64(&cmd->cdw10); 845 846 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) { 847 dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK; 848 } 849 850 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) { 851 dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK; 852 } 853 854 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 855 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 856 rc = spdk_dif_ctx_init(dif_ctx, 857 spdk_bdev_get_block_size(bdev), 858 spdk_bdev_get_md_size(bdev), 859 spdk_bdev_is_md_interleaved(bdev), 860 spdk_bdev_is_dif_head_of_md(bdev), 861 spdk_bdev_get_dif_type(bdev), 862 dif_check_flags, 863 init_ref_tag, 0, 0, 0, 0, &dif_opts); 864 865 return (rc == 0) ? true : false; 866 } 867 868 static void 869 nvmf_bdev_ctrlr_zcopy_start_complete(struct spdk_bdev_io *bdev_io, bool success, 870 void *cb_arg) 871 { 872 struct spdk_nvmf_request *req = cb_arg; 873 struct iovec *iov; 874 int iovcnt = 0; 875 876 if (spdk_unlikely(!success)) { 877 int sc = 0, sct = 0; 878 uint32_t cdw0 = 0; 879 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 880 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 881 882 response->cdw0 = cdw0; 883 response->status.sc = sc; 884 response->status.sct = sct; 885 886 spdk_bdev_free_io(bdev_io); 887 spdk_nvmf_request_complete(req); 888 return; 889 } 890 891 spdk_bdev_io_get_iovec(bdev_io, &iov, &iovcnt); 892 893 assert(iovcnt <= NVMF_REQ_MAX_BUFFERS); 894 assert(iovcnt > 0); 895 896 req->iovcnt = iovcnt; 897 898 assert(req->iov == iov); 899 900 /* backward compatible */ 901 req->data = req->iov[0].iov_base; 902 903 req->zcopy_bdev_io = bdev_io; /* Preserve the bdev_io for the end zcopy */ 904 905 spdk_nvmf_request_complete(req); 906 /* Don't free the bdev_io here as it is needed for the END ZCOPY */ 907 } 908 909 int 910 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 911 struct spdk_bdev_desc *desc, 912 struct spdk_io_channel *ch, 913 struct spdk_nvmf_request *req) 914 { 915 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 916 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 917 uint32_t block_size = spdk_bdev_get_block_size(bdev); 918 uint64_t start_lba; 919 uint64_t num_blocks; 920 int rc; 921 922 nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks); 923 924 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 925 SPDK_ERRLOG("end of media\n"); 926 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 927 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 928 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 929 } 930 931 if (spdk_unlikely(num_blocks * block_size > req->length)) { 932 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 933 num_blocks, block_size, req->length); 934 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 935 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 936 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 937 } 938 939 bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false; 940 941 rc = spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba, 942 num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req); 943 if (spdk_unlikely(rc != 0)) { 944 if (rc == -ENOMEM) { 945 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 946 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 947 } 948 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 949 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 950 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 951 } 952 953 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 954 } 955 956 static void 957 nvmf_bdev_ctrlr_zcopy_end_complete(struct spdk_bdev_io *bdev_io, bool success, 958 void *cb_arg) 959 { 960 struct spdk_nvmf_request *req = cb_arg; 961 962 if (spdk_unlikely(!success)) { 963 int sc = 0, sct = 0; 964 uint32_t cdw0 = 0; 965 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 966 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 967 968 response->cdw0 = cdw0; 969 response->status.sc = sc; 970 response->status.sct = sct; 971 } 972 973 spdk_bdev_free_io(bdev_io); 974 req->zcopy_bdev_io = NULL; 975 spdk_nvmf_request_complete(req); 976 } 977 978 void 979 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 980 { 981 int rc __attribute__((unused)); 982 983 rc = spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req); 984 985 /* The only way spdk_bdev_zcopy_end() can fail is if we pass a bdev_io type that isn't ZCOPY */ 986 assert(rc == 0); 987 } 988