1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "nvmf_internal.h" 10 11 #include "spdk/bdev.h" 12 #include "spdk/endian.h" 13 #include "spdk/thread.h" 14 #include "spdk/likely.h" 15 #include "spdk/nvme.h" 16 #include "spdk/nvmf_cmd.h" 17 #include "spdk/nvmf_spec.h" 18 #include "spdk/trace.h" 19 #include "spdk/scsi_spec.h" 20 #include "spdk/string.h" 21 #include "spdk/util.h" 22 23 #include "spdk/log.h" 24 25 static bool 26 nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem, 27 enum spdk_bdev_io_type io_type) 28 { 29 struct spdk_nvmf_ns *ns; 30 31 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 32 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 33 if (ns->bdev == NULL) { 34 continue; 35 } 36 37 if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) { 38 SPDK_DEBUGLOG(nvmf, 39 "Subsystem %s namespace %u (%s) does not support io_type %d\n", 40 spdk_nvmf_subsystem_get_nqn(subsystem), 41 ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type); 42 return false; 43 } 44 } 45 46 SPDK_DEBUGLOG(nvmf, "All devices in Subsystem %s support io_type %d\n", 47 spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type); 48 return true; 49 } 50 51 bool 52 nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr) 53 { 54 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP); 55 } 56 57 bool 58 nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr) 59 { 60 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); 61 } 62 63 bool 64 nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr) 65 { 66 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_COPY); 67 } 68 69 static void 70 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success, 71 void *cb_arg) 72 { 73 struct spdk_nvmf_request *req = cb_arg; 74 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 75 int first_sc = 0, first_sct = 0, sc = 0, sct = 0; 76 uint32_t cdw0 = 0; 77 struct spdk_nvmf_request *first_req = req->first_fused_req; 78 79 if (spdk_unlikely(first_req != NULL)) { 80 /* fused commands - get status for both operations */ 81 struct spdk_nvme_cpl *first_response = &first_req->rsp->nvme_cpl; 82 83 spdk_bdev_io_get_nvme_fused_status(bdev_io, &cdw0, &first_sct, &first_sc, &sct, &sc); 84 first_response->cdw0 = cdw0; 85 first_response->status.sc = first_sc; 86 first_response->status.sct = first_sct; 87 88 /* first request should be completed */ 89 spdk_nvmf_request_complete(first_req); 90 req->first_fused_req = NULL; 91 } else { 92 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 93 } 94 95 response->cdw0 = cdw0; 96 response->status.sc = sc; 97 response->status.sct = sct; 98 99 spdk_nvmf_request_complete(req); 100 spdk_bdev_free_io(bdev_io); 101 } 102 103 static void 104 nvmf_bdev_ctrlr_complete_admin_cmd(struct spdk_bdev_io *bdev_io, bool success, 105 void *cb_arg) 106 { 107 struct spdk_nvmf_request *req = cb_arg; 108 109 if (req->cmd_cb_fn) { 110 req->cmd_cb_fn(req); 111 } 112 113 nvmf_bdev_ctrlr_complete_cmd(bdev_io, success, req); 114 } 115 116 void 117 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 118 bool dif_insert_or_strip) 119 { 120 struct spdk_bdev *bdev = ns->bdev; 121 uint64_t num_blocks; 122 uint32_t phys_blocklen; 123 uint32_t max_copy; 124 125 num_blocks = spdk_bdev_get_num_blocks(bdev); 126 127 nsdata->nsze = num_blocks; 128 nsdata->ncap = num_blocks; 129 nsdata->nuse = num_blocks; 130 nsdata->nlbaf = 0; 131 nsdata->flbas.format = 0; 132 nsdata->flbas.msb_format = 0; 133 nsdata->nacwu = spdk_bdev_get_acwu(bdev) - 1; /* nacwu is 0-based */ 134 if (!dif_insert_or_strip) { 135 nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev); 136 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev)); 137 if (nsdata->lbaf[0].ms != 0) { 138 nsdata->flbas.extended = 1; 139 nsdata->mc.extended = 1; 140 nsdata->mc.pointer = 0; 141 nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev); 142 /* NVMf library doesn't process PRACT and PRCHK flags, we 143 * leave the use of extended LBA buffer to users. 144 */ 145 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE; 146 } 147 } else { 148 nsdata->lbaf[0].ms = 0; 149 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev)); 150 } 151 152 phys_blocklen = spdk_bdev_get_physical_block_size(bdev); 153 assert(phys_blocklen > 0); 154 /* Linux driver uses min(nawupf, npwg) to set physical_block_size */ 155 nsdata->nsfeat.optperf = 1; 156 nsdata->nsfeat.ns_atomic_write_unit = 1; 157 nsdata->npwg = (phys_blocklen >> nsdata->lbaf[0].lbads) - 1; 158 nsdata->nawupf = nsdata->npwg; 159 nsdata->npwa = nsdata->npwg; 160 nsdata->npdg = nsdata->npwg; 161 nsdata->npda = nsdata->npwg; 162 163 if (spdk_bdev_get_write_unit_size(bdev) == 1) { 164 nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev); 165 } 166 nsdata->nmic.can_share = 1; 167 if (ns->ptpl_file != NULL) { 168 nsdata->nsrescap.rescap.persist = 1; 169 } 170 nsdata->nsrescap.rescap.write_exclusive = 1; 171 nsdata->nsrescap.rescap.exclusive_access = 1; 172 nsdata->nsrescap.rescap.write_exclusive_reg_only = 1; 173 nsdata->nsrescap.rescap.exclusive_access_reg_only = 1; 174 nsdata->nsrescap.rescap.write_exclusive_all_reg = 1; 175 nsdata->nsrescap.rescap.exclusive_access_all_reg = 1; 176 nsdata->nsrescap.rescap.ignore_existing_key = 1; 177 178 SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch"); 179 memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid)); 180 181 SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch"); 182 memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64)); 183 184 /* For now we support just one source range for copy command */ 185 nsdata->msrc = 0; 186 187 max_copy = spdk_bdev_get_max_copy(bdev); 188 if (max_copy == 0 || max_copy > UINT16_MAX) { 189 /* Zero means copy size is unlimited */ 190 nsdata->mcl = UINT16_MAX; 191 nsdata->mssrl = UINT16_MAX; 192 } else { 193 nsdata->mcl = max_copy; 194 nsdata->mssrl = max_copy; 195 } 196 } 197 198 static void 199 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba, 200 uint64_t *num_blocks) 201 { 202 /* SLBA: CDW10 and CDW11 */ 203 *start_lba = from_le64(&cmd->cdw10); 204 205 /* NLB: CDW12 bits 15:00, 0's based */ 206 *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1; 207 } 208 209 static bool 210 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba, 211 uint64_t io_num_blocks) 212 { 213 if (io_start_lba + io_num_blocks > bdev_num_blocks || 214 io_start_lba + io_num_blocks < io_start_lba) { 215 return false; 216 } 217 218 return true; 219 } 220 221 static void 222 nvmf_ctrlr_process_io_cmd_resubmit(void *arg) 223 { 224 struct spdk_nvmf_request *req = arg; 225 int rc; 226 227 rc = nvmf_ctrlr_process_io_cmd(req); 228 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 229 spdk_nvmf_request_complete(req); 230 } 231 } 232 233 static void 234 nvmf_ctrlr_process_admin_cmd_resubmit(void *arg) 235 { 236 struct spdk_nvmf_request *req = arg; 237 int rc; 238 239 rc = nvmf_ctrlr_process_admin_cmd(req); 240 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 241 spdk_nvmf_request_complete(req); 242 } 243 } 244 245 static void 246 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 247 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg) 248 { 249 int rc; 250 251 req->bdev_io_wait.bdev = bdev; 252 req->bdev_io_wait.cb_fn = cb_fn; 253 req->bdev_io_wait.cb_arg = cb_arg; 254 255 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait); 256 if (rc != 0) { 257 assert(false); 258 } 259 req->qpair->group->stat.pending_bdev_io++; 260 } 261 262 bool 263 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 264 { 265 return spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY); 266 } 267 268 int 269 nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 270 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 271 { 272 struct spdk_bdev_ext_io_opts opts = { 273 .size = SPDK_SIZEOF(&opts, accel_sequence), 274 .memory_domain = req->memory_domain, 275 .memory_domain_ctx = req->memory_domain_ctx 276 }; 277 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 278 uint32_t block_size = spdk_bdev_get_block_size(bdev); 279 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 280 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 281 uint64_t start_lba; 282 uint64_t num_blocks; 283 int rc; 284 285 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 286 287 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 288 SPDK_ERRLOG("end of media\n"); 289 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 290 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 291 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 292 } 293 294 if (spdk_unlikely(num_blocks * block_size > req->length)) { 295 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 296 num_blocks, block_size, req->length); 297 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 298 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 299 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 300 } 301 302 assert(!spdk_nvmf_request_using_zcopy(req)); 303 304 rc = spdk_bdev_readv_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 305 nvmf_bdev_ctrlr_complete_cmd, req, &opts); 306 if (spdk_unlikely(rc)) { 307 if (rc == -ENOMEM) { 308 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 309 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 310 } 311 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 312 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 313 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 314 } 315 316 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 317 } 318 319 int 320 nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 321 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 322 { 323 struct spdk_bdev_ext_io_opts opts = { 324 .size = SPDK_SIZEOF(&opts, accel_sequence), 325 .memory_domain = req->memory_domain, 326 .memory_domain_ctx = req->memory_domain_ctx 327 }; 328 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 329 uint32_t block_size = spdk_bdev_get_block_size(bdev); 330 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 331 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 332 uint64_t start_lba; 333 uint64_t num_blocks; 334 int rc; 335 336 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 337 338 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 339 SPDK_ERRLOG("end of media\n"); 340 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 341 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 342 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 343 } 344 345 if (spdk_unlikely(num_blocks * block_size > req->length)) { 346 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 347 num_blocks, block_size, req->length); 348 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 349 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 350 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 351 } 352 353 assert(!spdk_nvmf_request_using_zcopy(req)); 354 355 rc = spdk_bdev_writev_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 356 nvmf_bdev_ctrlr_complete_cmd, req, &opts); 357 if (spdk_unlikely(rc)) { 358 if (rc == -ENOMEM) { 359 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 360 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 361 } 362 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 363 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 364 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 365 } 366 367 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 368 } 369 370 int 371 nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 372 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 373 { 374 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 375 uint32_t block_size = spdk_bdev_get_block_size(bdev); 376 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 377 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 378 uint64_t start_lba; 379 uint64_t num_blocks; 380 int rc; 381 382 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 383 384 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 385 SPDK_ERRLOG("end of media\n"); 386 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 387 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 388 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 389 } 390 391 if (spdk_unlikely(num_blocks * block_size > req->length)) { 392 SPDK_ERRLOG("Compare NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 393 num_blocks, block_size, req->length); 394 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 395 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 396 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 397 } 398 399 rc = spdk_bdev_comparev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 400 nvmf_bdev_ctrlr_complete_cmd, req); 401 if (spdk_unlikely(rc)) { 402 if (rc == -ENOMEM) { 403 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 404 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 405 } 406 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 407 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 408 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 409 } 410 411 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 412 } 413 414 int 415 nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 416 struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req) 417 { 418 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 419 uint32_t block_size = spdk_bdev_get_block_size(bdev); 420 struct spdk_nvme_cmd *cmp_cmd = &cmp_req->cmd->nvme_cmd; 421 struct spdk_nvme_cmd *write_cmd = &write_req->cmd->nvme_cmd; 422 struct spdk_nvme_cpl *rsp = &write_req->rsp->nvme_cpl; 423 uint64_t write_start_lba, cmp_start_lba; 424 uint64_t write_num_blocks, cmp_num_blocks; 425 int rc; 426 427 nvmf_bdev_ctrlr_get_rw_params(cmp_cmd, &cmp_start_lba, &cmp_num_blocks); 428 nvmf_bdev_ctrlr_get_rw_params(write_cmd, &write_start_lba, &write_num_blocks); 429 430 if (spdk_unlikely(write_start_lba != cmp_start_lba || write_num_blocks != cmp_num_blocks)) { 431 SPDK_ERRLOG("Fused command start lba / num blocks mismatch\n"); 432 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 433 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 434 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 435 } 436 437 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, write_start_lba, 438 write_num_blocks))) { 439 SPDK_ERRLOG("end of media\n"); 440 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 441 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 442 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 443 } 444 445 if (spdk_unlikely(write_num_blocks * block_size > write_req->length)) { 446 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 447 write_num_blocks, block_size, write_req->length); 448 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 449 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 450 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 451 } 452 453 rc = spdk_bdev_comparev_and_writev_blocks(desc, ch, cmp_req->iov, cmp_req->iovcnt, write_req->iov, 454 write_req->iovcnt, write_start_lba, write_num_blocks, nvmf_bdev_ctrlr_complete_cmd, write_req); 455 if (spdk_unlikely(rc)) { 456 if (rc == -ENOMEM) { 457 nvmf_bdev_ctrl_queue_io(cmp_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, cmp_req); 458 nvmf_bdev_ctrl_queue_io(write_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, write_req); 459 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 460 } 461 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 462 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 463 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 464 } 465 466 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 467 } 468 469 int 470 nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 471 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 472 { 473 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 474 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 475 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 476 uint64_t start_lba; 477 uint64_t num_blocks; 478 int rc; 479 480 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 481 482 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 483 SPDK_ERRLOG("end of media\n"); 484 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 485 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 486 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 487 } 488 489 rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks, 490 nvmf_bdev_ctrlr_complete_cmd, req); 491 if (spdk_unlikely(rc)) { 492 if (rc == -ENOMEM) { 493 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 494 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 495 } 496 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 497 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 498 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 499 } 500 501 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 502 } 503 504 int 505 nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 506 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 507 { 508 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 509 int rc; 510 511 /* As for NVMeoF controller, SPDK always set volatile write 512 * cache bit to 1, return success for those block devices 513 * which can't support FLUSH command. 514 */ 515 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) { 516 response->status.sct = SPDK_NVME_SCT_GENERIC; 517 response->status.sc = SPDK_NVME_SC_SUCCESS; 518 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 519 } 520 521 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev), 522 nvmf_bdev_ctrlr_complete_cmd, req); 523 if (spdk_unlikely(rc)) { 524 if (rc == -ENOMEM) { 525 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 526 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 527 } 528 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 529 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 530 } 531 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 532 } 533 534 struct nvmf_bdev_ctrlr_unmap { 535 struct spdk_nvmf_request *req; 536 uint32_t count; 537 struct spdk_bdev_desc *desc; 538 struct spdk_bdev *bdev; 539 struct spdk_io_channel *ch; 540 uint32_t range_index; 541 }; 542 543 static void 544 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success, 545 void *cb_arg) 546 { 547 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg; 548 struct spdk_nvmf_request *req = unmap_ctx->req; 549 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 550 int sc, sct; 551 uint32_t cdw0; 552 553 unmap_ctx->count--; 554 555 if (response->status.sct == SPDK_NVME_SCT_GENERIC && 556 response->status.sc == SPDK_NVME_SC_SUCCESS) { 557 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 558 response->cdw0 = cdw0; 559 response->status.sc = sc; 560 response->status.sct = sct; 561 } 562 563 if (unmap_ctx->count == 0) { 564 spdk_nvmf_request_complete(req); 565 free(unmap_ctx); 566 } 567 spdk_bdev_free_io(bdev_io); 568 } 569 570 static int nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 571 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 572 struct nvmf_bdev_ctrlr_unmap *unmap_ctx); 573 static void 574 nvmf_bdev_ctrlr_unmap_resubmit(void *arg) 575 { 576 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg; 577 struct spdk_nvmf_request *req = unmap_ctx->req; 578 struct spdk_bdev_desc *desc = unmap_ctx->desc; 579 struct spdk_bdev *bdev = unmap_ctx->bdev; 580 struct spdk_io_channel *ch = unmap_ctx->ch; 581 582 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx); 583 } 584 585 static int 586 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 587 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 588 struct nvmf_bdev_ctrlr_unmap *unmap_ctx) 589 { 590 uint16_t nr, i; 591 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 592 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 593 struct spdk_iov_xfer ix; 594 uint64_t lba; 595 uint32_t lba_count; 596 int rc; 597 598 nr = cmd->cdw10_bits.dsm.nr + 1; 599 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) { 600 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n"); 601 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 602 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 603 } 604 605 if (unmap_ctx == NULL) { 606 unmap_ctx = calloc(1, sizeof(*unmap_ctx)); 607 if (!unmap_ctx) { 608 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 609 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 610 } 611 612 unmap_ctx->req = req; 613 unmap_ctx->desc = desc; 614 unmap_ctx->ch = ch; 615 unmap_ctx->bdev = bdev; 616 617 response->status.sct = SPDK_NVME_SCT_GENERIC; 618 response->status.sc = SPDK_NVME_SC_SUCCESS; 619 } else { 620 unmap_ctx->count--; /* dequeued */ 621 } 622 623 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 624 625 for (i = unmap_ctx->range_index; i < nr; i++) { 626 struct spdk_nvme_dsm_range dsm_range = { 0 }; 627 628 spdk_iov_xfer_to_buf(&ix, &dsm_range, sizeof(dsm_range)); 629 630 lba = dsm_range.starting_lba; 631 lba_count = dsm_range.length; 632 633 unmap_ctx->count++; 634 635 rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count, 636 nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx); 637 if (rc) { 638 if (rc == -ENOMEM) { 639 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx); 640 /* Unmap was not yet submitted to bdev */ 641 /* unmap_ctx->count will be decremented when the request is dequeued */ 642 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 643 } 644 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 645 unmap_ctx->count--; 646 /* We can't return here - we may have to wait for any other 647 * unmaps already sent to complete */ 648 break; 649 } 650 unmap_ctx->range_index++; 651 } 652 653 if (unmap_ctx->count == 0) { 654 free(unmap_ctx); 655 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 656 } 657 658 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 659 } 660 661 int 662 nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 663 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 664 { 665 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 666 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 667 668 if (cmd->cdw11_bits.dsm.ad) { 669 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL); 670 } 671 672 response->status.sct = SPDK_NVME_SCT_GENERIC; 673 response->status.sc = SPDK_NVME_SC_SUCCESS; 674 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 675 } 676 677 int 678 nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 679 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 680 { 681 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 682 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 683 uint64_t sdlba = ((uint64_t)cmd->cdw11 << 32) + cmd->cdw10; 684 struct spdk_nvme_scc_source_range range = { 0 }; 685 struct spdk_iov_xfer ix; 686 int rc; 687 688 SPDK_DEBUGLOG(nvmf, "Copy command: SDLBA %lu, NR %u, desc format %u, PRINFOR %u, " 689 "DTYPE %u, STCW %u, PRINFOW %u, FUA %u, LR %u\n", 690 sdlba, 691 cmd->cdw12_bits.copy.nr, 692 cmd->cdw12_bits.copy.df, 693 cmd->cdw12_bits.copy.prinfor, 694 cmd->cdw12_bits.copy.dtype, 695 cmd->cdw12_bits.copy.stcw, 696 cmd->cdw12_bits.copy.prinfow, 697 cmd->cdw12_bits.copy.fua, 698 cmd->cdw12_bits.copy.lr); 699 700 if (spdk_unlikely(req->length != (cmd->cdw12_bits.copy.nr + 1) * 701 sizeof(struct spdk_nvme_scc_source_range))) { 702 response->status.sct = SPDK_NVME_SCT_GENERIC; 703 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 704 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 705 } 706 707 /* 708 * We support only one source range, and rely on this with the xfer 709 * below. 710 */ 711 if (cmd->cdw12_bits.copy.nr > 0) { 712 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 713 response->status.sc = SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED; 714 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 715 } 716 717 if (cmd->cdw12_bits.copy.df != 0) { 718 response->status.sct = SPDK_NVME_SCT_GENERIC; 719 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 720 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 721 } 722 723 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 724 spdk_iov_xfer_to_buf(&ix, &range, sizeof(range)); 725 726 rc = spdk_bdev_copy_blocks(desc, ch, sdlba, range.slba, range.nlb + 1, 727 nvmf_bdev_ctrlr_complete_cmd, req); 728 if (spdk_unlikely(rc)) { 729 if (rc == -ENOMEM) { 730 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 731 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 732 } 733 734 response->status.sct = SPDK_NVME_SCT_GENERIC; 735 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 736 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 737 } 738 739 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 740 } 741 742 int 743 nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 744 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 745 { 746 int rc; 747 748 rc = spdk_bdev_nvme_iov_passthru_md(desc, ch, &req->cmd->nvme_cmd, req->iov, req->iovcnt, 749 req->length, NULL, 0, nvmf_bdev_ctrlr_complete_cmd, req); 750 751 if (spdk_unlikely(rc)) { 752 if (rc == -ENOMEM) { 753 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 754 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 755 } 756 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 757 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 758 req->rsp->nvme_cpl.status.dnr = 1; 759 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 760 } 761 762 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 763 } 764 765 int 766 spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 767 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 768 spdk_nvmf_nvme_passthru_cmd_cb cb_fn) 769 { 770 int rc; 771 772 if (spdk_unlikely(req->iovcnt > 1)) { 773 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 774 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 775 req->rsp->nvme_cpl.status.dnr = 1; 776 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 777 } 778 779 req->cmd_cb_fn = cb_fn; 780 781 rc = spdk_bdev_nvme_admin_passthru(desc, ch, &req->cmd->nvme_cmd, req->iov[0].iov_base, req->length, 782 nvmf_bdev_ctrlr_complete_admin_cmd, req); 783 if (spdk_unlikely(rc)) { 784 if (rc == -ENOMEM) { 785 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req); 786 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 787 } 788 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 789 if (rc == -ENOTSUP) { 790 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 791 } else { 792 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 793 } 794 795 req->rsp->nvme_cpl.status.dnr = 1; 796 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 797 } 798 799 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 800 } 801 802 static void 803 nvmf_bdev_ctrlr_complete_abort_cmd(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 804 { 805 struct spdk_nvmf_request *req = cb_arg; 806 807 if (success) { 808 req->rsp->nvme_cpl.cdw0 &= ~1U; 809 } 810 811 spdk_nvmf_request_complete(req); 812 spdk_bdev_free_io(bdev_io); 813 } 814 815 int 816 spdk_nvmf_bdev_ctrlr_abort_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 817 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 818 struct spdk_nvmf_request *req_to_abort) 819 { 820 int rc; 821 822 assert((req->rsp->nvme_cpl.cdw0 & 1U) != 0); 823 824 rc = spdk_bdev_abort(desc, ch, req_to_abort, nvmf_bdev_ctrlr_complete_abort_cmd, req); 825 if (spdk_likely(rc == 0)) { 826 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 827 } else if (rc == -ENOMEM) { 828 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req); 829 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 830 } else { 831 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 832 } 833 } 834 835 bool 836 nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 837 struct spdk_dif_ctx *dif_ctx) 838 { 839 uint32_t init_ref_tag, dif_check_flags = 0; 840 int rc; 841 struct spdk_dif_ctx_init_ext_opts dif_opts; 842 843 if (spdk_bdev_get_md_size(bdev) == 0) { 844 return false; 845 } 846 847 /* Initial Reference Tag is the lower 32 bits of the start LBA. */ 848 init_ref_tag = (uint32_t)from_le64(&cmd->cdw10); 849 850 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) { 851 dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK; 852 } 853 854 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) { 855 dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK; 856 } 857 858 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 859 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 860 rc = spdk_dif_ctx_init(dif_ctx, 861 spdk_bdev_get_block_size(bdev), 862 spdk_bdev_get_md_size(bdev), 863 spdk_bdev_is_md_interleaved(bdev), 864 spdk_bdev_is_dif_head_of_md(bdev), 865 spdk_bdev_get_dif_type(bdev), 866 dif_check_flags, 867 init_ref_tag, 0, 0, 0, 0, &dif_opts); 868 869 return (rc == 0) ? true : false; 870 } 871 872 static void 873 nvmf_bdev_ctrlr_zcopy_start_complete(struct spdk_bdev_io *bdev_io, bool success, 874 void *cb_arg) 875 { 876 struct spdk_nvmf_request *req = cb_arg; 877 struct iovec *iov; 878 int iovcnt = 0; 879 880 if (spdk_unlikely(!success)) { 881 int sc = 0, sct = 0; 882 uint32_t cdw0 = 0; 883 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 884 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 885 886 response->cdw0 = cdw0; 887 response->status.sc = sc; 888 response->status.sct = sct; 889 890 spdk_bdev_free_io(bdev_io); 891 spdk_nvmf_request_complete(req); 892 return; 893 } 894 895 spdk_bdev_io_get_iovec(bdev_io, &iov, &iovcnt); 896 897 assert(iovcnt <= NVMF_REQ_MAX_BUFFERS); 898 assert(iovcnt > 0); 899 900 req->iovcnt = iovcnt; 901 902 assert(req->iov == iov); 903 904 req->zcopy_bdev_io = bdev_io; /* Preserve the bdev_io for the end zcopy */ 905 906 spdk_nvmf_request_complete(req); 907 /* Don't free the bdev_io here as it is needed for the END ZCOPY */ 908 } 909 910 int 911 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 912 struct spdk_bdev_desc *desc, 913 struct spdk_io_channel *ch, 914 struct spdk_nvmf_request *req) 915 { 916 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 917 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 918 uint32_t block_size = spdk_bdev_get_block_size(bdev); 919 uint64_t start_lba; 920 uint64_t num_blocks; 921 int rc; 922 923 nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks); 924 925 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 926 SPDK_ERRLOG("end of media\n"); 927 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 928 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 929 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 930 } 931 932 if (spdk_unlikely(num_blocks * block_size > req->length)) { 933 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 934 num_blocks, block_size, req->length); 935 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 936 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 937 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 938 } 939 940 bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false; 941 942 rc = spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba, 943 num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req); 944 if (spdk_unlikely(rc != 0)) { 945 if (rc == -ENOMEM) { 946 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 947 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 948 } 949 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 950 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 951 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 952 } 953 954 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 955 } 956 957 static void 958 nvmf_bdev_ctrlr_zcopy_end_complete(struct spdk_bdev_io *bdev_io, bool success, 959 void *cb_arg) 960 { 961 struct spdk_nvmf_request *req = cb_arg; 962 963 if (spdk_unlikely(!success)) { 964 int sc = 0, sct = 0; 965 uint32_t cdw0 = 0; 966 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 967 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 968 969 response->cdw0 = cdw0; 970 response->status.sc = sc; 971 response->status.sct = sct; 972 } 973 974 spdk_bdev_free_io(bdev_io); 975 req->zcopy_bdev_io = NULL; 976 spdk_nvmf_request_complete(req); 977 } 978 979 void 980 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 981 { 982 int rc __attribute__((unused)); 983 984 rc = spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req); 985 986 /* The only way spdk_bdev_zcopy_end() can fail is if we pass a bdev_io type that isn't ZCOPY */ 987 assert(rc == 0); 988 } 989