1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "nvmf_internal.h" 10 11 #include "spdk/bdev.h" 12 #include "spdk/endian.h" 13 #include "spdk/thread.h" 14 #include "spdk/likely.h" 15 #include "spdk/nvme.h" 16 #include "spdk/nvmf_cmd.h" 17 #include "spdk/nvmf_spec.h" 18 #include "spdk/trace.h" 19 #include "spdk/scsi_spec.h" 20 #include "spdk/string.h" 21 #include "spdk/util.h" 22 23 #include "spdk/log.h" 24 25 static bool 26 nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem, 27 enum spdk_bdev_io_type io_type) 28 { 29 struct spdk_nvmf_ns *ns; 30 31 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 32 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 33 if (ns->bdev == NULL) { 34 continue; 35 } 36 37 if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) { 38 SPDK_DEBUGLOG(nvmf, 39 "Subsystem %s namespace %u (%s) does not support io_type %d\n", 40 spdk_nvmf_subsystem_get_nqn(subsystem), 41 ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type); 42 return false; 43 } 44 } 45 46 SPDK_DEBUGLOG(nvmf, "All devices in Subsystem %s support io_type %d\n", 47 spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type); 48 return true; 49 } 50 51 bool 52 nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr) 53 { 54 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP); 55 } 56 57 bool 58 nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr) 59 { 60 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); 61 } 62 63 bool 64 nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr) 65 { 66 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_COPY); 67 } 68 69 static void 70 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success, 71 void *cb_arg) 72 { 73 struct spdk_nvmf_request *req = cb_arg; 74 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 75 int first_sc = 0, first_sct = 0, sc = 0, sct = 0; 76 uint32_t cdw0 = 0; 77 struct spdk_nvmf_request *first_req = req->first_fused_req; 78 79 if (spdk_unlikely(first_req != NULL)) { 80 /* fused commands - get status for both operations */ 81 struct spdk_nvme_cpl *first_response = &first_req->rsp->nvme_cpl; 82 83 spdk_bdev_io_get_nvme_fused_status(bdev_io, &cdw0, &first_sct, &first_sc, &sct, &sc); 84 first_response->cdw0 = cdw0; 85 first_response->status.sc = first_sc; 86 first_response->status.sct = first_sct; 87 88 /* first request should be completed */ 89 spdk_nvmf_request_complete(first_req); 90 req->first_fused_req = NULL; 91 } else { 92 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 93 } 94 95 response->cdw0 = cdw0; 96 response->status.sc = sc; 97 response->status.sct = sct; 98 99 spdk_nvmf_request_complete(req); 100 spdk_bdev_free_io(bdev_io); 101 } 102 103 static void 104 nvmf_bdev_ctrlr_complete_admin_cmd(struct spdk_bdev_io *bdev_io, bool success, 105 void *cb_arg) 106 { 107 struct spdk_nvmf_request *req = cb_arg; 108 109 if (req->cmd_cb_fn) { 110 req->cmd_cb_fn(req); 111 } 112 113 nvmf_bdev_ctrlr_complete_cmd(bdev_io, success, req); 114 } 115 116 void 117 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 118 bool dif_insert_or_strip) 119 { 120 struct spdk_bdev *bdev = ns->bdev; 121 uint64_t num_blocks; 122 uint32_t phys_blocklen; 123 uint32_t max_copy; 124 125 num_blocks = spdk_bdev_get_num_blocks(bdev); 126 127 nsdata->nsze = num_blocks; 128 nsdata->ncap = num_blocks; 129 nsdata->nuse = num_blocks; 130 nsdata->nlbaf = 0; 131 nsdata->flbas.format = 0; 132 nsdata->flbas.msb_format = 0; 133 nsdata->nacwu = spdk_bdev_get_acwu(bdev) - 1; /* nacwu is 0-based */ 134 if (!dif_insert_or_strip) { 135 nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev); 136 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev)); 137 if (nsdata->lbaf[0].ms != 0) { 138 nsdata->flbas.extended = 1; 139 nsdata->mc.extended = 1; 140 nsdata->mc.pointer = 0; 141 nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev); 142 /* NVMf library doesn't process PRACT and PRCHK flags, we 143 * leave the use of extended LBA buffer to users. 144 */ 145 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE; 146 } 147 } else { 148 nsdata->lbaf[0].ms = 0; 149 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev)); 150 } 151 152 phys_blocklen = spdk_bdev_get_physical_block_size(bdev); 153 assert(phys_blocklen > 0); 154 /* Linux driver uses min(nawupf, npwg) to set physical_block_size */ 155 nsdata->nsfeat.optperf = 1; 156 nsdata->nsfeat.ns_atomic_write_unit = 1; 157 nsdata->npwg = (phys_blocklen >> nsdata->lbaf[0].lbads) - 1; 158 nsdata->nawupf = nsdata->npwg; 159 nsdata->npwa = nsdata->npwg; 160 nsdata->npdg = nsdata->npwg; 161 nsdata->npda = nsdata->npwg; 162 163 if (spdk_bdev_get_write_unit_size(bdev) == 1) { 164 nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev); 165 } 166 nsdata->nmic.can_share = 1; 167 if (ns->ptpl_file != NULL) { 168 nsdata->nsrescap.rescap.persist = 1; 169 } 170 nsdata->nsrescap.rescap.write_exclusive = 1; 171 nsdata->nsrescap.rescap.exclusive_access = 1; 172 nsdata->nsrescap.rescap.write_exclusive_reg_only = 1; 173 nsdata->nsrescap.rescap.exclusive_access_reg_only = 1; 174 nsdata->nsrescap.rescap.write_exclusive_all_reg = 1; 175 nsdata->nsrescap.rescap.exclusive_access_all_reg = 1; 176 nsdata->nsrescap.rescap.ignore_existing_key = 1; 177 178 SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch"); 179 memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid)); 180 181 SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch"); 182 memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64)); 183 184 /* For now we support just one source range for copy command */ 185 nsdata->msrc = 0; 186 187 max_copy = spdk_bdev_get_max_copy(bdev); 188 if (max_copy == 0 || max_copy > UINT16_MAX) { 189 /* Zero means copy size is unlimited */ 190 nsdata->mcl = UINT16_MAX; 191 nsdata->mssrl = UINT16_MAX; 192 } else { 193 nsdata->mcl = max_copy; 194 nsdata->mssrl = max_copy; 195 } 196 } 197 198 static void 199 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba, 200 uint64_t *num_blocks) 201 { 202 /* SLBA: CDW10 and CDW11 */ 203 *start_lba = from_le64(&cmd->cdw10); 204 205 /* NLB: CDW12 bits 15:00, 0's based */ 206 *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1; 207 } 208 209 static bool 210 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba, 211 uint64_t io_num_blocks) 212 { 213 if (io_start_lba + io_num_blocks > bdev_num_blocks || 214 io_start_lba + io_num_blocks < io_start_lba) { 215 return false; 216 } 217 218 return true; 219 } 220 221 static void 222 nvmf_ctrlr_process_io_cmd_resubmit(void *arg) 223 { 224 struct spdk_nvmf_request *req = arg; 225 int rc; 226 227 rc = nvmf_ctrlr_process_io_cmd(req); 228 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 229 spdk_nvmf_request_complete(req); 230 } 231 } 232 233 static void 234 nvmf_ctrlr_process_admin_cmd_resubmit(void *arg) 235 { 236 struct spdk_nvmf_request *req = arg; 237 int rc; 238 239 rc = nvmf_ctrlr_process_admin_cmd(req); 240 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 241 spdk_nvmf_request_complete(req); 242 } 243 } 244 245 static void 246 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 247 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg) 248 { 249 int rc; 250 251 req->bdev_io_wait.bdev = bdev; 252 req->bdev_io_wait.cb_fn = cb_fn; 253 req->bdev_io_wait.cb_arg = cb_arg; 254 255 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait); 256 if (rc != 0) { 257 assert(false); 258 } 259 req->qpair->group->stat.pending_bdev_io++; 260 } 261 262 bool 263 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 264 { 265 return spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY); 266 } 267 268 int 269 nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 270 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 271 { 272 struct spdk_bdev_ext_io_opts opts = { 273 .size = SPDK_SIZEOF(&opts, accel_sequence), 274 .memory_domain = req->memory_domain, 275 .memory_domain_ctx = req->memory_domain_ctx 276 }; 277 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 278 uint32_t block_size = spdk_bdev_get_block_size(bdev); 279 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 280 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 281 uint64_t start_lba; 282 uint64_t num_blocks; 283 int rc; 284 285 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 286 287 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 288 SPDK_ERRLOG("end of media\n"); 289 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 290 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 291 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 292 } 293 294 if (spdk_unlikely(num_blocks * block_size > req->length)) { 295 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 296 num_blocks, block_size, req->length); 297 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 298 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 299 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 300 } 301 302 assert(!spdk_nvmf_request_using_zcopy(req)); 303 304 rc = spdk_bdev_readv_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 305 nvmf_bdev_ctrlr_complete_cmd, req, &opts); 306 if (spdk_unlikely(rc)) { 307 if (rc == -ENOMEM) { 308 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 309 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 310 } 311 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 312 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 313 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 314 } 315 316 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 317 } 318 319 int 320 nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 321 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 322 { 323 struct spdk_bdev_ext_io_opts opts = { 324 .size = SPDK_SIZEOF(&opts, accel_sequence), 325 .memory_domain = req->memory_domain, 326 .memory_domain_ctx = req->memory_domain_ctx 327 }; 328 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 329 uint32_t block_size = spdk_bdev_get_block_size(bdev); 330 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 331 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 332 uint64_t start_lba; 333 uint64_t num_blocks; 334 int rc; 335 336 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 337 338 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 339 SPDK_ERRLOG("end of media\n"); 340 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 341 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 342 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 343 } 344 345 if (spdk_unlikely(num_blocks * block_size > req->length)) { 346 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 347 num_blocks, block_size, req->length); 348 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 349 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 350 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 351 } 352 353 assert(!spdk_nvmf_request_using_zcopy(req)); 354 355 rc = spdk_bdev_writev_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 356 nvmf_bdev_ctrlr_complete_cmd, req, &opts); 357 if (spdk_unlikely(rc)) { 358 if (rc == -ENOMEM) { 359 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 360 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 361 } 362 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 363 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 364 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 365 } 366 367 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 368 } 369 370 int 371 nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 372 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 373 { 374 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 375 uint32_t block_size = spdk_bdev_get_block_size(bdev); 376 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 377 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 378 uint64_t start_lba; 379 uint64_t num_blocks; 380 int rc; 381 382 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 383 384 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 385 SPDK_ERRLOG("end of media\n"); 386 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 387 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 388 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 389 } 390 391 if (spdk_unlikely(num_blocks * block_size > req->length)) { 392 SPDK_ERRLOG("Compare NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 393 num_blocks, block_size, req->length); 394 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 395 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 396 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 397 } 398 399 rc = spdk_bdev_comparev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 400 nvmf_bdev_ctrlr_complete_cmd, req); 401 if (spdk_unlikely(rc)) { 402 if (rc == -ENOMEM) { 403 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 404 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 405 } 406 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 407 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 408 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 409 } 410 411 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 412 } 413 414 int 415 nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 416 struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req) 417 { 418 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 419 uint32_t block_size = spdk_bdev_get_block_size(bdev); 420 struct spdk_nvme_cmd *cmp_cmd = &cmp_req->cmd->nvme_cmd; 421 struct spdk_nvme_cmd *write_cmd = &write_req->cmd->nvme_cmd; 422 struct spdk_nvme_cpl *rsp = &write_req->rsp->nvme_cpl; 423 uint64_t write_start_lba, cmp_start_lba; 424 uint64_t write_num_blocks, cmp_num_blocks; 425 int rc; 426 427 nvmf_bdev_ctrlr_get_rw_params(cmp_cmd, &cmp_start_lba, &cmp_num_blocks); 428 nvmf_bdev_ctrlr_get_rw_params(write_cmd, &write_start_lba, &write_num_blocks); 429 430 if (spdk_unlikely(write_start_lba != cmp_start_lba || write_num_blocks != cmp_num_blocks)) { 431 SPDK_ERRLOG("Fused command start lba / num blocks mismatch\n"); 432 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 433 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 434 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 435 } 436 437 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, write_start_lba, 438 write_num_blocks))) { 439 SPDK_ERRLOG("end of media\n"); 440 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 441 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 442 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 443 } 444 445 if (spdk_unlikely(write_num_blocks * block_size > write_req->length)) { 446 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 447 write_num_blocks, block_size, write_req->length); 448 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 449 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 450 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 451 } 452 453 rc = spdk_bdev_comparev_and_writev_blocks(desc, ch, cmp_req->iov, cmp_req->iovcnt, write_req->iov, 454 write_req->iovcnt, write_start_lba, write_num_blocks, nvmf_bdev_ctrlr_complete_cmd, write_req); 455 if (spdk_unlikely(rc)) { 456 if (rc == -ENOMEM) { 457 nvmf_bdev_ctrl_queue_io(cmp_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, cmp_req); 458 nvmf_bdev_ctrl_queue_io(write_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, write_req); 459 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 460 } 461 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 462 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 463 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 464 } 465 466 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 467 } 468 469 int 470 nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 471 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 472 { 473 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 474 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 475 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 476 uint64_t max_write_zeroes_size = req->qpair->ctrlr->subsys->max_write_zeroes_size_kib; 477 uint64_t start_lba; 478 uint64_t num_blocks; 479 int rc; 480 481 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 482 if (spdk_unlikely(max_write_zeroes_size > 0 && 483 num_blocks > (max_write_zeroes_size << 10) / spdk_bdev_get_block_size(bdev))) { 484 SPDK_ERRLOG("invalid write zeroes size, should not exceed %" PRIu64 "Kib\n", max_write_zeroes_size); 485 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 486 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 487 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 488 } 489 490 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 491 SPDK_ERRLOG("end of media\n"); 492 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 493 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 494 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 495 } 496 497 rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks, 498 nvmf_bdev_ctrlr_complete_cmd, req); 499 if (spdk_unlikely(rc)) { 500 if (rc == -ENOMEM) { 501 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 502 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 503 } 504 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 505 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 506 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 507 } 508 509 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 510 } 511 512 int 513 nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 514 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 515 { 516 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 517 int rc; 518 519 /* As for NVMeoF controller, SPDK always set volatile write 520 * cache bit to 1, return success for those block devices 521 * which can't support FLUSH command. 522 */ 523 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) { 524 response->status.sct = SPDK_NVME_SCT_GENERIC; 525 response->status.sc = SPDK_NVME_SC_SUCCESS; 526 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 527 } 528 529 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev), 530 nvmf_bdev_ctrlr_complete_cmd, req); 531 if (spdk_unlikely(rc)) { 532 if (rc == -ENOMEM) { 533 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 534 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 535 } 536 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 537 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 538 } 539 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 540 } 541 542 struct nvmf_bdev_ctrlr_unmap { 543 struct spdk_nvmf_request *req; 544 uint32_t count; 545 struct spdk_bdev_desc *desc; 546 struct spdk_bdev *bdev; 547 struct spdk_io_channel *ch; 548 uint32_t range_index; 549 }; 550 551 static void 552 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success, 553 void *cb_arg) 554 { 555 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg; 556 struct spdk_nvmf_request *req = unmap_ctx->req; 557 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 558 int sc, sct; 559 uint32_t cdw0; 560 561 unmap_ctx->count--; 562 563 if (response->status.sct == SPDK_NVME_SCT_GENERIC && 564 response->status.sc == SPDK_NVME_SC_SUCCESS) { 565 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 566 response->cdw0 = cdw0; 567 response->status.sc = sc; 568 response->status.sct = sct; 569 } 570 571 if (unmap_ctx->count == 0) { 572 spdk_nvmf_request_complete(req); 573 free(unmap_ctx); 574 } 575 spdk_bdev_free_io(bdev_io); 576 } 577 578 static int nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 579 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 580 struct nvmf_bdev_ctrlr_unmap *unmap_ctx); 581 static void 582 nvmf_bdev_ctrlr_unmap_resubmit(void *arg) 583 { 584 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg; 585 struct spdk_nvmf_request *req = unmap_ctx->req; 586 struct spdk_bdev_desc *desc = unmap_ctx->desc; 587 struct spdk_bdev *bdev = unmap_ctx->bdev; 588 struct spdk_io_channel *ch = unmap_ctx->ch; 589 590 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx); 591 } 592 593 static int 594 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 595 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 596 struct nvmf_bdev_ctrlr_unmap *unmap_ctx) 597 { 598 uint16_t nr, i; 599 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 600 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 601 uint64_t max_discard_size = req->qpair->ctrlr->subsys->max_discard_size_kib; 602 uint32_t block_size = spdk_bdev_get_block_size(bdev); 603 struct spdk_iov_xfer ix; 604 uint64_t lba; 605 uint32_t lba_count; 606 int rc; 607 608 nr = cmd->cdw10_bits.dsm.nr + 1; 609 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) { 610 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n"); 611 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 612 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 613 } 614 615 if (unmap_ctx == NULL) { 616 unmap_ctx = calloc(1, sizeof(*unmap_ctx)); 617 if (!unmap_ctx) { 618 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 619 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 620 } 621 622 unmap_ctx->req = req; 623 unmap_ctx->desc = desc; 624 unmap_ctx->ch = ch; 625 unmap_ctx->bdev = bdev; 626 627 response->status.sct = SPDK_NVME_SCT_GENERIC; 628 response->status.sc = SPDK_NVME_SC_SUCCESS; 629 } else { 630 unmap_ctx->count--; /* dequeued */ 631 } 632 633 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 634 635 for (i = unmap_ctx->range_index; i < nr; i++) { 636 struct spdk_nvme_dsm_range dsm_range = { 0 }; 637 638 spdk_iov_xfer_to_buf(&ix, &dsm_range, sizeof(dsm_range)); 639 640 lba = dsm_range.starting_lba; 641 lba_count = dsm_range.length; 642 if (max_discard_size > 0 && lba_count > (max_discard_size << 10) / block_size) { 643 SPDK_ERRLOG("invalid unmap size, should not exceed %" PRIu64 "Kib\n", max_discard_size); 644 response->status.sct = SPDK_NVME_SCT_GENERIC; 645 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 646 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 647 } 648 649 unmap_ctx->count++; 650 651 rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count, 652 nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx); 653 if (rc) { 654 if (rc == -ENOMEM) { 655 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx); 656 /* Unmap was not yet submitted to bdev */ 657 /* unmap_ctx->count will be decremented when the request is dequeued */ 658 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 659 } 660 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 661 unmap_ctx->count--; 662 /* We can't return here - we may have to wait for any other 663 * unmaps already sent to complete */ 664 break; 665 } 666 unmap_ctx->range_index++; 667 } 668 669 if (unmap_ctx->count == 0) { 670 free(unmap_ctx); 671 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 672 } 673 674 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 675 } 676 677 int 678 nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 679 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 680 { 681 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 682 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 683 684 if (cmd->cdw11_bits.dsm.ad) { 685 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL); 686 } 687 688 response->status.sct = SPDK_NVME_SCT_GENERIC; 689 response->status.sc = SPDK_NVME_SC_SUCCESS; 690 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 691 } 692 693 int 694 nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 695 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 696 { 697 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 698 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 699 uint64_t sdlba = ((uint64_t)cmd->cdw11 << 32) + cmd->cdw10; 700 struct spdk_nvme_scc_source_range range = { 0 }; 701 struct spdk_iov_xfer ix; 702 int rc; 703 704 SPDK_DEBUGLOG(nvmf, "Copy command: SDLBA %lu, NR %u, desc format %u, PRINFOR %u, " 705 "DTYPE %u, STCW %u, PRINFOW %u, FUA %u, LR %u\n", 706 sdlba, 707 cmd->cdw12_bits.copy.nr, 708 cmd->cdw12_bits.copy.df, 709 cmd->cdw12_bits.copy.prinfor, 710 cmd->cdw12_bits.copy.dtype, 711 cmd->cdw12_bits.copy.stcw, 712 cmd->cdw12_bits.copy.prinfow, 713 cmd->cdw12_bits.copy.fua, 714 cmd->cdw12_bits.copy.lr); 715 716 if (spdk_unlikely(req->length != (cmd->cdw12_bits.copy.nr + 1) * 717 sizeof(struct spdk_nvme_scc_source_range))) { 718 response->status.sct = SPDK_NVME_SCT_GENERIC; 719 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 720 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 721 } 722 723 /* 724 * We support only one source range, and rely on this with the xfer 725 * below. 726 */ 727 if (cmd->cdw12_bits.copy.nr > 0) { 728 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 729 response->status.sc = SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED; 730 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 731 } 732 733 if (cmd->cdw12_bits.copy.df != 0) { 734 response->status.sct = SPDK_NVME_SCT_GENERIC; 735 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 736 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 737 } 738 739 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 740 spdk_iov_xfer_to_buf(&ix, &range, sizeof(range)); 741 742 rc = spdk_bdev_copy_blocks(desc, ch, sdlba, range.slba, range.nlb + 1, 743 nvmf_bdev_ctrlr_complete_cmd, req); 744 if (spdk_unlikely(rc)) { 745 if (rc == -ENOMEM) { 746 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 747 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 748 } 749 750 response->status.sct = SPDK_NVME_SCT_GENERIC; 751 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 752 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 753 } 754 755 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 756 } 757 758 int 759 nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 760 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 761 { 762 int rc; 763 764 rc = spdk_bdev_nvme_iov_passthru_md(desc, ch, &req->cmd->nvme_cmd, req->iov, req->iovcnt, 765 req->length, NULL, 0, nvmf_bdev_ctrlr_complete_cmd, req); 766 767 if (spdk_unlikely(rc)) { 768 if (rc == -ENOMEM) { 769 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 770 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 771 } 772 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 773 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 774 req->rsp->nvme_cpl.status.dnr = 1; 775 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 776 } 777 778 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 779 } 780 781 int 782 spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 783 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 784 spdk_nvmf_nvme_passthru_cmd_cb cb_fn) 785 { 786 int rc; 787 788 if (spdk_unlikely(req->iovcnt > 1)) { 789 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 790 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 791 req->rsp->nvme_cpl.status.dnr = 1; 792 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 793 } 794 795 req->cmd_cb_fn = cb_fn; 796 797 rc = spdk_bdev_nvme_admin_passthru(desc, ch, &req->cmd->nvme_cmd, req->iov[0].iov_base, req->length, 798 nvmf_bdev_ctrlr_complete_admin_cmd, req); 799 if (spdk_unlikely(rc)) { 800 if (rc == -ENOMEM) { 801 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req); 802 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 803 } 804 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 805 if (rc == -ENOTSUP) { 806 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 807 } else { 808 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 809 } 810 811 req->rsp->nvme_cpl.status.dnr = 1; 812 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 813 } 814 815 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 816 } 817 818 static void 819 nvmf_bdev_ctrlr_complete_abort_cmd(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 820 { 821 struct spdk_nvmf_request *req = cb_arg; 822 823 if (success) { 824 req->rsp->nvme_cpl.cdw0 &= ~1U; 825 } 826 827 spdk_nvmf_request_complete(req); 828 spdk_bdev_free_io(bdev_io); 829 } 830 831 int 832 spdk_nvmf_bdev_ctrlr_abort_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 833 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 834 struct spdk_nvmf_request *req_to_abort) 835 { 836 int rc; 837 838 assert((req->rsp->nvme_cpl.cdw0 & 1U) != 0); 839 840 rc = spdk_bdev_abort(desc, ch, req_to_abort, nvmf_bdev_ctrlr_complete_abort_cmd, req); 841 if (spdk_likely(rc == 0)) { 842 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 843 } else if (rc == -ENOMEM) { 844 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req); 845 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 846 } else { 847 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 848 } 849 } 850 851 bool 852 nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 853 struct spdk_dif_ctx *dif_ctx) 854 { 855 uint32_t init_ref_tag, dif_check_flags = 0; 856 int rc; 857 struct spdk_dif_ctx_init_ext_opts dif_opts; 858 859 if (spdk_bdev_get_md_size(bdev) == 0) { 860 return false; 861 } 862 863 /* Initial Reference Tag is the lower 32 bits of the start LBA. */ 864 init_ref_tag = (uint32_t)from_le64(&cmd->cdw10); 865 866 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) { 867 dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK; 868 } 869 870 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) { 871 dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK; 872 } 873 874 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 875 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 876 rc = spdk_dif_ctx_init(dif_ctx, 877 spdk_bdev_get_block_size(bdev), 878 spdk_bdev_get_md_size(bdev), 879 spdk_bdev_is_md_interleaved(bdev), 880 spdk_bdev_is_dif_head_of_md(bdev), 881 spdk_bdev_get_dif_type(bdev), 882 dif_check_flags, 883 init_ref_tag, 0, 0, 0, 0, &dif_opts); 884 885 return (rc == 0) ? true : false; 886 } 887 888 static void 889 nvmf_bdev_ctrlr_zcopy_start_complete(struct spdk_bdev_io *bdev_io, bool success, 890 void *cb_arg) 891 { 892 struct spdk_nvmf_request *req = cb_arg; 893 struct iovec *iov; 894 int iovcnt = 0; 895 896 if (spdk_unlikely(!success)) { 897 int sc = 0, sct = 0; 898 uint32_t cdw0 = 0; 899 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 900 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 901 902 response->cdw0 = cdw0; 903 response->status.sc = sc; 904 response->status.sct = sct; 905 906 spdk_bdev_free_io(bdev_io); 907 spdk_nvmf_request_complete(req); 908 return; 909 } 910 911 spdk_bdev_io_get_iovec(bdev_io, &iov, &iovcnt); 912 913 assert(iovcnt <= NVMF_REQ_MAX_BUFFERS); 914 assert(iovcnt > 0); 915 916 req->iovcnt = iovcnt; 917 918 assert(req->iov == iov); 919 920 req->zcopy_bdev_io = bdev_io; /* Preserve the bdev_io for the end zcopy */ 921 922 spdk_nvmf_request_complete(req); 923 /* Don't free the bdev_io here as it is needed for the END ZCOPY */ 924 } 925 926 int 927 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 928 struct spdk_bdev_desc *desc, 929 struct spdk_io_channel *ch, 930 struct spdk_nvmf_request *req) 931 { 932 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 933 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 934 uint32_t block_size = spdk_bdev_get_block_size(bdev); 935 uint64_t start_lba; 936 uint64_t num_blocks; 937 int rc; 938 939 nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks); 940 941 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 942 SPDK_ERRLOG("end of media\n"); 943 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 944 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 945 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 946 } 947 948 if (spdk_unlikely(num_blocks * block_size > req->length)) { 949 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 950 num_blocks, block_size, req->length); 951 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 952 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 953 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 954 } 955 956 bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false; 957 958 rc = spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba, 959 num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req); 960 if (spdk_unlikely(rc != 0)) { 961 if (rc == -ENOMEM) { 962 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req); 963 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 964 } 965 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 966 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 967 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 968 } 969 970 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 971 } 972 973 static void 974 nvmf_bdev_ctrlr_zcopy_end_complete(struct spdk_bdev_io *bdev_io, bool success, 975 void *cb_arg) 976 { 977 struct spdk_nvmf_request *req = cb_arg; 978 979 if (spdk_unlikely(!success)) { 980 int sc = 0, sct = 0; 981 uint32_t cdw0 = 0; 982 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 983 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 984 985 response->cdw0 = cdw0; 986 response->status.sc = sc; 987 response->status.sct = sct; 988 } 989 990 spdk_bdev_free_io(bdev_io); 991 req->zcopy_bdev_io = NULL; 992 spdk_nvmf_request_complete(req); 993 } 994 995 void 996 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 997 { 998 int rc __attribute__((unused)); 999 1000 rc = spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req); 1001 1002 /* The only way spdk_bdev_zcopy_end() can fail is if we pass a bdev_io type that isn't ZCOPY */ 1003 assert(rc == 0); 1004 } 1005