1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 38 #include "spdk/bdev.h" 39 #include "spdk/endian.h" 40 #include "spdk/thread.h" 41 #include "spdk/likely.h" 42 #include "spdk/nvme.h" 43 #include "spdk/nvmf_spec.h" 44 #include "spdk/trace.h" 45 #include "spdk/scsi_spec.h" 46 #include "spdk/string.h" 47 #include "spdk/util.h" 48 49 #include "spdk_internal/log.h" 50 51 static bool 52 spdk_nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem, 53 enum spdk_bdev_io_type io_type) 54 { 55 struct spdk_nvmf_ns *ns; 56 57 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 58 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 59 if (ns->bdev == NULL) { 60 continue; 61 } 62 63 if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) { 64 SPDK_DEBUGLOG(SPDK_LOG_NVMF, 65 "Subsystem %s namespace %u (%s) does not support io_type %d\n", 66 spdk_nvmf_subsystem_get_nqn(subsystem), 67 ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type); 68 return false; 69 } 70 } 71 72 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "All devices in Subsystem %s support io_type %d\n", 73 spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type); 74 return true; 75 } 76 77 bool 78 spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr) 79 { 80 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP); 81 } 82 83 bool 84 spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr) 85 { 86 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); 87 } 88 89 static void 90 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success, 91 void *cb_arg) 92 { 93 struct spdk_nvmf_request *req = cb_arg; 94 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 95 int sc, sct; 96 97 spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc); 98 response->status.sc = sc; 99 response->status.sct = sct; 100 101 spdk_nvmf_request_complete(req); 102 spdk_bdev_free_io(bdev_io); 103 } 104 105 void 106 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata) 107 { 108 struct spdk_bdev *bdev = ns->bdev; 109 uint64_t num_blocks; 110 111 num_blocks = spdk_bdev_get_num_blocks(bdev); 112 113 nsdata->nsze = num_blocks; 114 nsdata->ncap = num_blocks; 115 nsdata->nuse = num_blocks; 116 nsdata->nlbaf = 0; 117 nsdata->flbas.format = 0; 118 nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev); 119 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev)); 120 if (nsdata->lbaf[0].ms != 0) { 121 nsdata->flbas.extended = 1; 122 nsdata->mc.extended = 1; 123 nsdata->mc.pointer = 0; 124 nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev); 125 126 switch (spdk_bdev_get_dif_type(bdev)) { 127 case SPDK_DIF_TYPE1: 128 nsdata->dpc.pit1 = 1; 129 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE1; 130 break; 131 case SPDK_DIF_TYPE2: 132 nsdata->dpc.pit2 = 1; 133 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE2; 134 break; 135 case SPDK_DIF_TYPE3: 136 nsdata->dpc.pit3 = 1; 137 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE3; 138 break; 139 default: 140 SPDK_ERRLOG("Unknown DIF type: %d\n", spdk_bdev_get_dif_type(bdev)); 141 assert(false); 142 break; 143 } 144 } 145 nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev); 146 nsdata->nmic.can_share = 1; 147 nsdata->nsrescap.rescap.persist = 0; /* TODO: don't support for now */ 148 nsdata->nsrescap.rescap.write_exclusive = 1; 149 nsdata->nsrescap.rescap.exclusive_access = 1; 150 nsdata->nsrescap.rescap.write_exclusive_reg_only = 1; 151 nsdata->nsrescap.rescap.exclusive_access_reg_only = 1; 152 nsdata->nsrescap.rescap.write_exclusive_all_reg = 1; 153 nsdata->nsrescap.rescap.exclusive_access_all_reg = 1; 154 nsdata->nsrescap.rescap.ignore_existing_key = 1; 155 156 SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch"); 157 memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid)); 158 159 SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch"); 160 memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64)); 161 } 162 163 static void 164 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba, 165 uint64_t *num_blocks) 166 { 167 /* SLBA: CDW10 and CDW11 */ 168 *start_lba = from_le64(&cmd->cdw10); 169 170 /* NLB: CDW12 bits 15:00, 0's based */ 171 *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1; 172 } 173 174 static bool 175 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba, 176 uint64_t io_num_blocks) 177 { 178 if (io_start_lba + io_num_blocks > bdev_num_blocks || 179 io_start_lba + io_num_blocks < io_start_lba) { 180 return false; 181 } 182 183 return true; 184 } 185 186 static void 187 spdk_nvmf_ctrlr_process_io_cmd_resubmit(void *arg) 188 { 189 struct spdk_nvmf_request *req = arg; 190 191 spdk_nvmf_ctrlr_process_io_cmd(req); 192 } 193 194 static void 195 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 196 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg) 197 { 198 int rc; 199 200 req->bdev_io_wait.bdev = bdev; 201 req->bdev_io_wait.cb_fn = cb_fn; 202 req->bdev_io_wait.cb_arg = cb_arg; 203 204 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait); 205 if (rc != 0) { 206 assert(false); 207 } 208 } 209 210 int 211 spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 212 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 213 { 214 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 215 uint32_t block_size = spdk_bdev_get_block_size(bdev); 216 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 217 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 218 uint64_t start_lba; 219 uint64_t num_blocks; 220 int rc; 221 222 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 223 224 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 225 SPDK_ERRLOG("end of media\n"); 226 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 227 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 228 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 229 } 230 231 if (spdk_unlikely(num_blocks * block_size > req->length)) { 232 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 233 num_blocks, block_size, req->length); 234 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 235 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 236 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 237 } 238 239 rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 240 nvmf_bdev_ctrlr_complete_cmd, req); 241 if (spdk_unlikely(rc)) { 242 if (rc == -ENOMEM) { 243 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 244 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 245 } 246 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 247 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 248 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 249 } 250 251 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 252 } 253 254 int 255 spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 256 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 257 { 258 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 259 uint32_t block_size = spdk_bdev_get_block_size(bdev); 260 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 261 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 262 uint64_t start_lba; 263 uint64_t num_blocks; 264 int rc; 265 266 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 267 268 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 269 SPDK_ERRLOG("end of media\n"); 270 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 271 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 272 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 273 } 274 275 if (spdk_unlikely(num_blocks * block_size > req->length)) { 276 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n", 277 num_blocks, block_size, req->length); 278 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 279 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 280 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 281 } 282 283 rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks, 284 nvmf_bdev_ctrlr_complete_cmd, req); 285 if (spdk_unlikely(rc)) { 286 if (rc == -ENOMEM) { 287 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 288 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 289 } 290 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 291 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 292 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 293 } 294 295 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 296 } 297 298 int 299 spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 300 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 301 { 302 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev); 303 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 304 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 305 uint64_t start_lba; 306 uint64_t num_blocks; 307 int rc; 308 309 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks); 310 311 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) { 312 SPDK_ERRLOG("end of media\n"); 313 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 314 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 315 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 316 } 317 318 rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks, 319 nvmf_bdev_ctrlr_complete_cmd, req); 320 if (spdk_unlikely(rc)) { 321 if (rc == -ENOMEM) { 322 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 323 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 324 } 325 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 326 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 327 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 328 } 329 330 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 331 } 332 333 int 334 spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 335 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 336 { 337 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 338 int rc; 339 340 /* As for NVMeoF controller, SPDK always set volatile write 341 * cache bit to 1, return success for those block devices 342 * which can't support FLUSH command. 343 */ 344 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) { 345 response->status.sct = SPDK_NVME_SCT_GENERIC; 346 response->status.sc = SPDK_NVME_SC_SUCCESS; 347 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 348 } 349 350 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev), 351 nvmf_bdev_ctrlr_complete_cmd, req); 352 if (spdk_unlikely(rc)) { 353 if (rc == -ENOMEM) { 354 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 355 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 356 } 357 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 358 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 359 } 360 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 361 } 362 363 struct nvmf_bdev_ctrlr_unmap { 364 struct spdk_nvmf_request *req; 365 uint32_t count; 366 struct spdk_bdev_desc *desc; 367 struct spdk_bdev *bdev; 368 struct spdk_io_channel *ch; 369 }; 370 371 static void 372 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success, 373 void *cb_arg) 374 { 375 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg; 376 struct spdk_nvmf_request *req = unmap_ctx->req; 377 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 378 int sc, sct; 379 380 unmap_ctx->count--; 381 382 if (response->status.sct == SPDK_NVME_SCT_GENERIC && 383 response->status.sc == SPDK_NVME_SC_SUCCESS) { 384 spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc); 385 response->status.sc = sc; 386 response->status.sct = sct; 387 } 388 389 if (unmap_ctx->count == 0) { 390 spdk_nvmf_request_complete(req); 391 free(unmap_ctx); 392 } 393 spdk_bdev_free_io(bdev_io); 394 } 395 396 static int 397 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 398 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 399 struct nvmf_bdev_ctrlr_unmap *unmap_ctx); 400 static void 401 nvmf_bdev_ctrlr_unmap_resubmit(void *arg) 402 { 403 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg; 404 struct spdk_nvmf_request *req = unmap_ctx->req; 405 struct spdk_bdev_desc *desc = unmap_ctx->desc; 406 struct spdk_bdev *bdev = unmap_ctx->bdev; 407 struct spdk_io_channel *ch = unmap_ctx->ch; 408 409 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx); 410 } 411 412 static int 413 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 414 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 415 struct nvmf_bdev_ctrlr_unmap *unmap_ctx) 416 { 417 uint16_t nr, i; 418 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 419 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 420 struct spdk_nvme_dsm_range *dsm_range; 421 uint64_t lba; 422 uint32_t lba_count; 423 int rc; 424 425 nr = ((cmd->cdw10 & 0x000000ff) + 1); 426 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) { 427 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n"); 428 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 429 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 430 } 431 432 if (unmap_ctx == NULL) { 433 unmap_ctx = calloc(1, sizeof(*unmap_ctx)); 434 if (!unmap_ctx) { 435 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 436 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 437 } 438 439 unmap_ctx->req = req; 440 unmap_ctx->desc = desc; 441 unmap_ctx->ch = ch; 442 } 443 444 response->status.sct = SPDK_NVME_SCT_GENERIC; 445 response->status.sc = SPDK_NVME_SC_SUCCESS; 446 447 dsm_range = (struct spdk_nvme_dsm_range *)req->data; 448 for (i = unmap_ctx->count; i < nr; i++) { 449 lba = dsm_range[i].starting_lba; 450 lba_count = dsm_range[i].length; 451 452 unmap_ctx->count++; 453 454 rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count, 455 nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx); 456 if (rc) { 457 if (rc == -ENOMEM) { 458 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx); 459 /* Unmap was not yet submitted to bdev */ 460 unmap_ctx->count--; 461 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 462 } 463 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 464 unmap_ctx->count--; 465 /* We can't return here - we may have to wait for any other 466 * unmaps already sent to complete */ 467 break; 468 } 469 } 470 471 if (unmap_ctx->count == 0) { 472 free(unmap_ctx); 473 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 474 } 475 476 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 477 } 478 479 int 480 spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 481 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 482 { 483 uint32_t attribute; 484 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 485 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 486 487 attribute = cmd->cdw11 & 0x00000007; 488 if (attribute & SPDK_NVME_DSM_ATTR_DEALLOCATE) { 489 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL); 490 } 491 492 response->status.sct = SPDK_NVME_SCT_GENERIC; 493 response->status.sc = SPDK_NVME_SC_SUCCESS; 494 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 495 } 496 497 int 498 spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 499 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 500 { 501 int rc; 502 503 rc = spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length, 504 nvmf_bdev_ctrlr_complete_cmd, req); 505 if (spdk_unlikely(rc)) { 506 if (rc == -ENOMEM) { 507 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req); 508 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 509 } 510 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 511 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 512 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 513 } 514 515 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 516 } 517