1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 38 #include "spdk/bdev.h" 39 #include "spdk/endian.h" 40 #include "spdk/thread.h" 41 #include "spdk/likely.h" 42 #include "spdk/nvme.h" 43 #include "spdk/nvmf_spec.h" 44 #include "spdk/trace.h" 45 #include "spdk/scsi_spec.h" 46 #include "spdk/string.h" 47 #include "spdk/util.h" 48 49 #include "spdk_internal/log.h" 50 51 static bool 52 spdk_nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem, 53 enum spdk_bdev_io_type io_type) 54 { 55 struct spdk_nvmf_ns *ns; 56 57 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 58 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 59 if (ns->bdev == NULL) { 60 continue; 61 } 62 63 if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) { 64 SPDK_DEBUGLOG(SPDK_LOG_NVMF, 65 "Subsystem %s namespace %u (%s) does not support io_type %d\n", 66 spdk_nvmf_subsystem_get_nqn(subsystem), 67 ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type); 68 return false; 69 } 70 } 71 72 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "All devices in Subsystem %s support io_type %d\n", 73 spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type); 74 return true; 75 } 76 77 bool 78 spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr) 79 { 80 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP); 81 } 82 83 bool 84 spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr) 85 { 86 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); 87 } 88 89 static void 90 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success, 91 void *cb_arg) 92 { 93 struct spdk_nvmf_request *req = cb_arg; 94 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 95 int sc, sct; 96 uint32_t cdw0; 97 98 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 99 response->cdw0 = cdw0; 100 response->status.sc = sc; 101 response->status.sct = sct; 102 103 spdk_nvmf_request_complete(req); 104 spdk_bdev_free_io(bdev_io); 105 } 106 107 void 108 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 109 bool dif_insert_or_strip) 110 { 111 struct spdk_bdev *bdev = ns->bdev; 112 uint64_t num_blocks; 113 114 num_blocks = spdk_bdev_get_num_blocks(bdev); 115 116 nsdata->nsze = num_blocks; 117 nsdata->ncap = num_blocks; 118 nsdata->nuse = num_blocks; 119 nsdata->nlbaf = 0; 120 nsdata->flbas.format = 0; 121 if (!dif_insert_or_strip) { 122 nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev); 123 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev)); 124 if (nsdata->lbaf[0].ms != 0) { 125 nsdata->flbas.extended = 1; 126 nsdata->mc.extended = 1; 127 nsdata->mc.pointer = 0; 128 nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev); 129 130 switch (spdk_bdev_get_dif_type(bdev)) { 131 case SPDK_DIF_TYPE1: 132 nsdata->dpc.pit1 = 1; 133 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE1; 134 break; 135 case SPDK_DIF_TYPE2: 136 nsdata->dpc.pit2 = 1; 137 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE2; 138 break; 139 case SPDK_DIF_TYPE3: 140 nsdata->dpc.pit3 = 1; 141 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE3; 142 break; 143 default: 144 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Protection Disabled\n"); 145 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE; 146 break; 147 } 148 } 149 } else { 150 nsdata->lbaf[0].ms = 0; 151 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev)); 152 } 153 nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev); 154 nsdata->nmic.can_share = 1; 155 if (ns->ptpl_file != NULL) { 156 nsdata->nsrescap.rescap.persist = 1; 157 } 158 nsdata->nsrescap.rescap.write_exclusive = 1; 159 nsdata->nsrescap.rescap.exclusive_access = 1; 160 nsdata->nsrescap.rescap.write_exclusive_reg_only = 1; 161 nsdata->nsrescap.rescap.exclusive_access_reg_only = 1; 162 nsdata->nsrescap.rescap.write_exclusive_all_reg = 1; 163 nsdata->nsrescap.rescap.exclusive_access_all_reg = 1; 164 nsdata->nsrescap.rescap.ignore_existing_key = 1; 165 166 SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch"); 167 memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid)); 168 169 SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch"); 170 memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64)); 171 } 172 173 static void 174 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba, 175 uint64_t *num_blocks) 176 { 177 /* SLBA: CDW10 and CDW11 */ 178 *start_lba = from_le64(&cmd->cdw10); 179 180 /* NLB: CDW12 bits 15:00, 0's based */ 181 *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1; 182 } 183 184 static bool 185 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba, 186 uint64_t io_num_blocks) 187 { 188 if (io_start_lba + io_num_blocks > bdev_num_blocks || 189 io_start_lba + io_num_blocks < io_start_lba) { 190 return false; 191 } 192 193 return true; 194 } 195 196 static void 197 spdk_nvmf_ctrlr_process_io_cmd_resubmit(void *arg) 198 { 199 struct spdk_nvmf_request *req = arg; 200 201 spdk_nvmf_ctrlr_process_io_cmd(req); 202 } 203 204 static void 205 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 206 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg) 207 { 208 int rc; 209 210 req->bdev_io_wait.bdev = bdev; 211 req->bdev_io_wait.cb_fn = cb_fn; 212 req->bdev_io_wait.cb_arg = cb_arg; 213 214 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait); 215 if (rc != 0) { 216 assert(false); 217 } 218 req->qpair->group->stat.pending_bdev_io++; 219 } 220 221 int 222 spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 223 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 224 { 225 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 226 uint32_t block_size = spdk_bdev_get_block_size(bdev); 227 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 228 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 229 uint64_t start_lba; 230 uint64_t num_blocks; 231 int rc; 232 233 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 234 235 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 236 SPDK_ERRLOG("end of media\n"); 237 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 238 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 239 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 240 } 241 242 if (spdk_unlikely(num_blocks * block_size > req->length)) { 243 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 244 num_blocks, block_size, req->length); 245 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 246 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 247 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 248 } 249 250 rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 251 nvmf_bdev_ctrlr_complete_cmd, req); 252 if (spdk_unlikely(rc)) { 253 if (rc == -ENOMEM) { 254 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 255 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 256 } 257 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 258 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 259 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 260 } 261 262 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 263 } 264 265 int 266 spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 267 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 268 { 269 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 270 uint32_t block_size = spdk_bdev_get_block_size(bdev); 271 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 272 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 273 uint64_t start_lba; 274 uint64_t num_blocks; 275 int rc; 276 277 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 278 279 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 280 SPDK_ERRLOG("end of media\n"); 281 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 282 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 283 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 284 } 285 286 if (spdk_unlikely(num_blocks * block_size > req->length)) { 287 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 288 num_blocks, block_size, req->length); 289 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 290 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 291 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 292 } 293 294 rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 295 nvmf_bdev_ctrlr_complete_cmd, req); 296 if (spdk_unlikely(rc)) { 297 if (rc == -ENOMEM) { 298 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 299 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 300 } 301 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 302 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 303 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 304 } 305 306 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 307 } 308 309 int 310 spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 311 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 312 { 313 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 314 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 315 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 316 uint64_t start_lba; 317 uint64_t num_blocks; 318 int rc; 319 320 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 321 322 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 323 SPDK_ERRLOG("end of media\n"); 324 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 325 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 326 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 327 } 328 329 rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks, 330 nvmf_bdev_ctrlr_complete_cmd, req); 331 if (spdk_unlikely(rc)) { 332 if (rc == -ENOMEM) { 333 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 334 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 335 } 336 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 337 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 338 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 339 } 340 341 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 342 } 343 344 int 345 spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 346 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 347 { 348 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 349 int rc; 350 351 /* As for NVMeoF controller, SPDK always set volatile write 352 * cache bit to 1, return success for those block devices 353 * which can't support FLUSH command. 354 */ 355 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) { 356 response->status.sct = SPDK_NVME_SCT_GENERIC; 357 response->status.sc = SPDK_NVME_SC_SUCCESS; 358 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 359 } 360 361 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev), 362 nvmf_bdev_ctrlr_complete_cmd, req); 363 if (spdk_unlikely(rc)) { 364 if (rc == -ENOMEM) { 365 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 366 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 367 } 368 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 369 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 370 } 371 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 372 } 373 374 struct nvmf_bdev_ctrlr_unmap { 375 struct spdk_nvmf_request *req; 376 uint32_t count; 377 struct spdk_bdev_desc *desc; 378 struct spdk_bdev *bdev; 379 struct spdk_io_channel *ch; 380 uint32_t range_index; 381 }; 382 383 static void 384 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success, 385 void *cb_arg) 386 { 387 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg; 388 struct spdk_nvmf_request *req = unmap_ctx->req; 389 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 390 int sc, sct; 391 uint32_t cdw0; 392 393 unmap_ctx->count--; 394 395 if (response->status.sct == SPDK_NVME_SCT_GENERIC && 396 response->status.sc == SPDK_NVME_SC_SUCCESS) { 397 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); 398 response->cdw0 = cdw0; 399 response->status.sc = sc; 400 response->status.sct = sct; 401 } 402 403 if (unmap_ctx->count == 0) { 404 spdk_nvmf_request_complete(req); 405 free(unmap_ctx); 406 } 407 spdk_bdev_free_io(bdev_io); 408 } 409 410 static int 411 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 412 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 413 struct nvmf_bdev_ctrlr_unmap *unmap_ctx); 414 static void 415 nvmf_bdev_ctrlr_unmap_resubmit(void *arg) 416 { 417 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg; 418 struct spdk_nvmf_request *req = unmap_ctx->req; 419 struct spdk_bdev_desc *desc = unmap_ctx->desc; 420 struct spdk_bdev *bdev = unmap_ctx->bdev; 421 struct spdk_io_channel *ch = unmap_ctx->ch; 422 423 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx); 424 } 425 426 static int 427 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 428 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 429 struct nvmf_bdev_ctrlr_unmap *unmap_ctx) 430 { 431 uint16_t nr, i; 432 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 433 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 434 struct spdk_nvme_dsm_range *dsm_range; 435 uint64_t lba; 436 uint32_t lba_count; 437 int rc; 438 439 nr = ((cmd->cdw10 & 0x000000ff) + 1); 440 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) { 441 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n"); 442 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 443 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 444 } 445 446 if (unmap_ctx == NULL) { 447 unmap_ctx = calloc(1, sizeof(*unmap_ctx)); 448 if (!unmap_ctx) { 449 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 450 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 451 } 452 453 unmap_ctx->req = req; 454 unmap_ctx->desc = desc; 455 unmap_ctx->ch = ch; 456 unmap_ctx->bdev = bdev; 457 458 response->status.sct = SPDK_NVME_SCT_GENERIC; 459 response->status.sc = SPDK_NVME_SC_SUCCESS; 460 } else { 461 unmap_ctx->count--; /* dequeued */ 462 } 463 464 dsm_range = (struct spdk_nvme_dsm_range *)req->data; 465 for (i = unmap_ctx->range_index; i < nr; i++) { 466 lba = dsm_range[i].starting_lba; 467 lba_count = dsm_range[i].length; 468 469 unmap_ctx->count++; 470 471 rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count, 472 nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx); 473 if (rc) { 474 if (rc == -ENOMEM) { 475 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx); 476 /* Unmap was not yet submitted to bdev */ 477 /* unmap_ctx->count will be decremented when the request is dequeued */ 478 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 479 } 480 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 481 unmap_ctx->count--; 482 /* We can't return here - we may have to wait for any other 483 * unmaps already sent to complete */ 484 break; 485 } 486 unmap_ctx->range_index++; 487 } 488 489 if (unmap_ctx->count == 0) { 490 free(unmap_ctx); 491 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 492 } 493 494 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 495 } 496 497 int 498 spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 499 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 500 { 501 uint32_t attribute; 502 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 503 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 504 505 attribute = cmd->cdw11 & 0x00000007; 506 if (attribute & SPDK_NVME_DSM_ATTR_DEALLOCATE) { 507 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL); 508 } 509 510 response->status.sct = SPDK_NVME_SCT_GENERIC; 511 response->status.sc = SPDK_NVME_SC_SUCCESS; 512 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 513 } 514 515 int 516 spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 517 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 518 { 519 int rc; 520 521 rc = spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length, 522 nvmf_bdev_ctrlr_complete_cmd, req); 523 if (spdk_unlikely(rc)) { 524 if (rc == -ENOMEM) { 525 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 526 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 527 } 528 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 529 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 530 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 531 } 532 533 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 534 } 535 536 bool 537 spdk_nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 538 struct spdk_dif_ctx *dif_ctx) 539 { 540 uint32_t init_ref_tag, dif_check_flags = 0; 541 int rc; 542 543 if (spdk_bdev_get_md_size(bdev) == 0) { 544 return false; 545 } 546 547 /* Initial Reference Tag is the lower 32 bits of the start LBA. */ 548 init_ref_tag = (uint32_t)from_le64(&cmd->cdw10); 549 550 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) { 551 dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK; 552 } 553 554 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) { 555 dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK; 556 } 557 558 rc = spdk_dif_ctx_init(dif_ctx, 559 spdk_bdev_get_block_size(bdev), 560 spdk_bdev_get_md_size(bdev), 561 spdk_bdev_is_md_interleaved(bdev), 562 spdk_bdev_is_dif_head_of_md(bdev), 563 spdk_bdev_get_dif_type(bdev), 564 dif_check_flags, 565 init_ref_tag, 0, 0, 0, 0); 566 567 return (rc == 0) ? true : false; 568 } 569