1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 38 #include "spdk/bdev.h" 39 #include "spdk/endian.h" 40 #include "spdk/thread.h" 41 #include "spdk/likely.h" 42 #include "spdk/nvme.h" 43 #include "spdk/nvmf_spec.h" 44 #include "spdk/trace.h" 45 #include "spdk/scsi_spec.h" 46 #include "spdk/string.h" 47 #include "spdk/util.h" 48 49 #include "spdk_internal/nvmf.h" 50 #include "spdk_internal/log.h" 51 52 static bool 53 spdk_nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem, 54 enum spdk_bdev_io_type io_type) 55 { 56 struct spdk_nvmf_ns *ns; 57 58 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 59 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 60 if (ns->bdev == NULL) { 61 continue; 62 } 63 64 if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) { 65 SPDK_DEBUGLOG(SPDK_LOG_NVMF, 66 "Subsystem %s namespace %u (%s) does not support io_type %d\n", 67 spdk_nvmf_subsystem_get_nqn(subsystem), 68 ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type); 69 return false; 70 } 71 } 72 73 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "All devices in Subsystem %s support io_type %d\n", 74 spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type); 75 return true; 76 } 77 78 bool 79 spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr) 80 { 81 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP); 82 } 83 84 bool 85 spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr) 86 { 87 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); 88 } 89 90 static void 91 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success, 92 void *cb_arg) 93 { 94 struct spdk_nvmf_request *req = cb_arg; 95 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 96 int sc, sct; 97 uint32_t cdw0; 98 99 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 100 response->cdw0 = cdw0; 101 response->status.sc = sc; 102 response->status.sct = sct; 103 104 spdk_nvmf_request_complete(req); 105 spdk_bdev_free_io(bdev_io); 106 } 107 108 void 109 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 110 bool dif_insert_or_strip) 111 { 112 struct spdk_bdev *bdev = ns->bdev; 113 uint64_t num_blocks; 114 115 num_blocks = spdk_bdev_get_num_blocks(bdev); 116 117 nsdata->nsze = num_blocks; 118 nsdata->ncap = num_blocks; 119 nsdata->nuse = num_blocks; 120 nsdata->nlbaf = 0; 121 nsdata->flbas.format = 0; 122 if (!dif_insert_or_strip) { 123 nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev); 124 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev)); 125 if (nsdata->lbaf[0].ms != 0) { 126 nsdata->flbas.extended = 1; 127 nsdata->mc.extended = 1; 128 nsdata->mc.pointer = 0; 129 nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev); 130 131 switch (spdk_bdev_get_dif_type(bdev)) { 132 case SPDK_DIF_TYPE1: 133 nsdata->dpc.pit1 = 1; 134 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE1; 135 break; 136 case SPDK_DIF_TYPE2: 137 nsdata->dpc.pit2 = 1; 138 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE2; 139 break; 140 case SPDK_DIF_TYPE3: 141 nsdata->dpc.pit3 = 1; 142 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE3; 143 break; 144 default: 145 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Protection Disabled\n"); 146 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE; 147 break; 148 } 149 } 150 } else { 151 nsdata->lbaf[0].ms = 0; 152 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev)); 153 } 154 nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev); 155 nsdata->nmic.can_share = 1; 156 if (ns->ptpl_file != NULL) { 157 nsdata->nsrescap.rescap.persist = 1; 158 } 159 nsdata->nsrescap.rescap.write_exclusive = 1; 160 nsdata->nsrescap.rescap.exclusive_access = 1; 161 nsdata->nsrescap.rescap.write_exclusive_reg_only = 1; 162 nsdata->nsrescap.rescap.exclusive_access_reg_only = 1; 163 nsdata->nsrescap.rescap.write_exclusive_all_reg = 1; 164 nsdata->nsrescap.rescap.exclusive_access_all_reg = 1; 165 nsdata->nsrescap.rescap.ignore_existing_key = 1; 166 167 SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch"); 168 memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid)); 169 170 SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch"); 171 memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64)); 172 } 173 174 static void 175 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba, 176 uint64_t *num_blocks) 177 { 178 /* SLBA: CDW10 and CDW11 */ 179 *start_lba = from_le64(&cmd->cdw10); 180 181 /* NLB: CDW12 bits 15:00, 0's based */ 182 *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1; 183 } 184 185 static bool 186 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba, 187 uint64_t io_num_blocks) 188 { 189 if (io_start_lba + io_num_blocks > bdev_num_blocks || 190 io_start_lba + io_num_blocks < io_start_lba) { 191 return false; 192 } 193 194 return true; 195 } 196 197 static void 198 spdk_nvmf_ctrlr_process_io_cmd_resubmit(void *arg) 199 { 200 struct spdk_nvmf_request *req = arg; 201 202 spdk_nvmf_ctrlr_process_io_cmd(req); 203 } 204 205 static void 206 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 207 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg) 208 { 209 int rc; 210 211 req->bdev_io_wait.bdev = bdev; 212 req->bdev_io_wait.cb_fn = cb_fn; 213 req->bdev_io_wait.cb_arg = cb_arg; 214 215 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait); 216 if (rc != 0) { 217 assert(false); 218 } 219 req->qpair->group->stat.pending_bdev_io++; 220 } 221 222 int 223 spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 224 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 225 { 226 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 227 uint32_t block_size = spdk_bdev_get_block_size(bdev); 228 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 229 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 230 uint64_t start_lba; 231 uint64_t num_blocks; 232 int rc; 233 234 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 235 236 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 237 SPDK_ERRLOG("end of media\n"); 238 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 239 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 240 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 241 } 242 243 if (spdk_unlikely(num_blocks * block_size > req->length)) { 244 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 245 num_blocks, block_size, req->length); 246 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 247 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 248 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 249 } 250 251 rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 252 nvmf_bdev_ctrlr_complete_cmd, req); 253 if (spdk_unlikely(rc)) { 254 if (rc == -ENOMEM) { 255 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 256 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 257 } 258 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 259 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 260 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 261 } 262 263 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 264 } 265 266 int 267 spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 268 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 269 { 270 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 271 uint32_t block_size = spdk_bdev_get_block_size(bdev); 272 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 273 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 274 uint64_t start_lba; 275 uint64_t num_blocks; 276 int rc; 277 278 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 279 280 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 281 SPDK_ERRLOG("end of media\n"); 282 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 283 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 284 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 285 } 286 287 if (spdk_unlikely(num_blocks * block_size > req->length)) { 288 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 289 num_blocks, block_size, req->length); 290 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 291 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 292 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 293 } 294 295 rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 296 nvmf_bdev_ctrlr_complete_cmd, req); 297 if (spdk_unlikely(rc)) { 298 if (rc == -ENOMEM) { 299 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 300 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 301 } 302 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 303 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 304 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 305 } 306 307 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 308 } 309 310 int 311 spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 312 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 313 { 314 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 315 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 316 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 317 uint64_t start_lba; 318 uint64_t num_blocks; 319 int rc; 320 321 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 322 323 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 324 SPDK_ERRLOG("end of media\n"); 325 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 326 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 327 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 328 } 329 330 rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks, 331 nvmf_bdev_ctrlr_complete_cmd, req); 332 if (spdk_unlikely(rc)) { 333 if (rc == -ENOMEM) { 334 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 335 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 336 } 337 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 338 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 339 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 340 } 341 342 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 343 } 344 345 int 346 spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 347 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 348 { 349 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 350 int rc; 351 352 /* As for NVMeoF controller, SPDK always set volatile write 353 * cache bit to 1, return success for those block devices 354 * which can't support FLUSH command. 355 */ 356 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) { 357 response->status.sct = SPDK_NVME_SCT_GENERIC; 358 response->status.sc = SPDK_NVME_SC_SUCCESS; 359 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 360 } 361 362 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev), 363 nvmf_bdev_ctrlr_complete_cmd, req); 364 if (spdk_unlikely(rc)) { 365 if (rc == -ENOMEM) { 366 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 367 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 368 } 369 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 370 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 371 } 372 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 373 } 374 375 struct nvmf_bdev_ctrlr_unmap { 376 struct spdk_nvmf_request *req; 377 uint32_t count; 378 struct spdk_bdev_desc *desc; 379 struct spdk_bdev *bdev; 380 struct spdk_io_channel *ch; 381 uint32_t range_index; 382 }; 383 384 static void 385 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success, 386 void *cb_arg) 387 { 388 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg; 389 struct spdk_nvmf_request *req = unmap_ctx->req; 390 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 391 int sc, sct; 392 uint32_t cdw0; 393 394 unmap_ctx->count--; 395 396 if (response->status.sct == SPDK_NVME_SCT_GENERIC && 397 response->status.sc == SPDK_NVME_SC_SUCCESS) { 398 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 399 response->cdw0 = cdw0; 400 response->status.sc = sc; 401 response->status.sct = sct; 402 } 403 404 if (unmap_ctx->count == 0) { 405 spdk_nvmf_request_complete(req); 406 free(unmap_ctx); 407 } 408 spdk_bdev_free_io(bdev_io); 409 } 410 411 static int 412 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 413 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 414 struct nvmf_bdev_ctrlr_unmap *unmap_ctx); 415 static void 416 nvmf_bdev_ctrlr_unmap_resubmit(void *arg) 417 { 418 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg; 419 struct spdk_nvmf_request *req = unmap_ctx->req; 420 struct spdk_bdev_desc *desc = unmap_ctx->desc; 421 struct spdk_bdev *bdev = unmap_ctx->bdev; 422 struct spdk_io_channel *ch = unmap_ctx->ch; 423 424 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx); 425 } 426 427 static int 428 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 429 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 430 struct nvmf_bdev_ctrlr_unmap *unmap_ctx) 431 { 432 uint16_t nr, i; 433 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 434 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 435 struct spdk_nvme_dsm_range *dsm_range; 436 uint64_t lba; 437 uint32_t lba_count; 438 int rc; 439 440 nr = cmd->cdw10_bits.dsm.nr + 1; 441 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) { 442 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n"); 443 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 444 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 445 } 446 447 if (unmap_ctx == NULL) { 448 unmap_ctx = calloc(1, sizeof(*unmap_ctx)); 449 if (!unmap_ctx) { 450 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 451 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 452 } 453 454 unmap_ctx->req = req; 455 unmap_ctx->desc = desc; 456 unmap_ctx->ch = ch; 457 unmap_ctx->bdev = bdev; 458 459 response->status.sct = SPDK_NVME_SCT_GENERIC; 460 response->status.sc = SPDK_NVME_SC_SUCCESS; 461 } else { 462 unmap_ctx->count--; /* dequeued */ 463 } 464 465 dsm_range = (struct spdk_nvme_dsm_range *)req->data; 466 for (i = unmap_ctx->range_index; i < nr; i++) { 467 lba = dsm_range[i].starting_lba; 468 lba_count = dsm_range[i].length; 469 470 unmap_ctx->count++; 471 472 rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count, 473 nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx); 474 if (rc) { 475 if (rc == -ENOMEM) { 476 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx); 477 /* Unmap was not yet submitted to bdev */ 478 /* unmap_ctx->count will be decremented when the request is dequeued */ 479 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 480 } 481 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 482 unmap_ctx->count--; 483 /* We can't return here - we may have to wait for any other 484 * unmaps already sent to complete */ 485 break; 486 } 487 unmap_ctx->range_index++; 488 } 489 490 if (unmap_ctx->count == 0) { 491 free(unmap_ctx); 492 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 493 } 494 495 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 496 } 497 498 int 499 spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 500 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 501 { 502 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 503 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 504 505 if (cmd->cdw11_bits.dsm.ad) { 506 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL); 507 } 508 509 response->status.sct = SPDK_NVME_SCT_GENERIC; 510 response->status.sc = SPDK_NVME_SC_SUCCESS; 511 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 512 } 513 514 int 515 spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 516 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 517 { 518 int rc; 519 520 rc = spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length, 521 nvmf_bdev_ctrlr_complete_cmd, req); 522 if (spdk_unlikely(rc)) { 523 if (rc == -ENOMEM) { 524 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 525 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 526 } 527 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 528 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 529 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 530 } 531 532 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 533 } 534 535 bool 536 spdk_nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 537 struct spdk_dif_ctx *dif_ctx) 538 { 539 uint32_t init_ref_tag, dif_check_flags = 0; 540 int rc; 541 542 if (spdk_bdev_get_md_size(bdev) == 0) { 543 return false; 544 } 545 546 /* Initial Reference Tag is the lower 32 bits of the start LBA. */ 547 init_ref_tag = (uint32_t)from_le64(&cmd->cdw10); 548 549 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) { 550 dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK; 551 } 552 553 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) { 554 dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK; 555 } 556 557 rc = spdk_dif_ctx_init(dif_ctx, 558 spdk_bdev_get_block_size(bdev), 559 spdk_bdev_get_md_size(bdev), 560 spdk_bdev_is_md_interleaved(bdev), 561 spdk_bdev_is_dif_head_of_md(bdev), 562 spdk_bdev_get_dif_type(bdev), 563 dif_check_flags, 564 init_ref_tag, 0, 0, 0, 0); 565 566 return (rc == 0) ? true : false; 567 } 568