1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "subsystem.h" 37 #include "session.h" 38 #include "request.h" 39 40 #include "spdk/bdev.h" 41 #include "spdk/endian.h" 42 #include "spdk/io_channel.h" 43 #include "spdk/nvme.h" 44 #include "spdk/nvmf_spec.h" 45 #include "spdk/trace.h" 46 #include "spdk/scsi_spec.h" 47 #include "spdk/string.h" 48 #include "spdk/util.h" 49 50 #include "spdk_internal/log.h" 51 52 #define MODEL_NUMBER "SPDK bdev Controller" 53 #define FW_VERSION "FFFFFFFF" 54 55 /* read command dword 12 */ 56 struct __attribute__((packed)) nvme_read_cdw12 { 57 uint16_t nlb; /* number of logical blocks */ 58 uint16_t rsvd : 10; 59 uint8_t prinfo : 4; /* protection information field */ 60 uint8_t fua : 1; /* force unit access */ 61 uint8_t lr : 1; /* limited retry */ 62 }; 63 64 static void nvmf_bdev_set_dsm(struct spdk_nvmf_session *session) 65 { 66 uint32_t i; 67 68 for (i = 0; i < session->subsys->dev.max_nsid; i++) { 69 struct spdk_bdev *bdev = session->subsys->dev.ns_list[i]; 70 71 if (bdev == NULL) { 72 continue; 73 } 74 75 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) { 76 SPDK_TRACELOG(SPDK_TRACE_NVMF, 77 "Subsystem%u Namespace %s does not support unmap - not enabling DSM\n", 78 i, spdk_bdev_get_name(bdev)); 79 return; 80 } 81 } 82 83 SPDK_TRACELOG(SPDK_TRACE_NVMF, "All devices in Subsystem %s support unmap - enabling DSM\n", 84 spdk_nvmf_subsystem_get_nqn(session->subsys)); 85 session->vcdata.oncs.dsm = 1; 86 } 87 88 static void 89 nvmf_bdev_ctrlr_get_data(struct spdk_nvmf_session *session) 90 { 91 struct spdk_nvmf_subsystem *subsys = session->subsys; 92 93 memset(&session->vcdata, 0, sizeof(struct spdk_nvme_ctrlr_data)); 94 spdk_strcpy_pad(session->vcdata.fr, FW_VERSION, sizeof(session->vcdata.fr), ' '); 95 spdk_strcpy_pad(session->vcdata.mn, MODEL_NUMBER, sizeof(session->vcdata.mn), ' '); 96 spdk_strcpy_pad(session->vcdata.sn, spdk_nvmf_subsystem_get_sn(subsys), 97 sizeof(session->vcdata.sn), ' '); 98 session->vcdata.rab = 6; 99 session->vcdata.ver.bits.mjr = 1; 100 session->vcdata.ver.bits.mnr = 2; 101 session->vcdata.ver.bits.ter = 1; 102 session->vcdata.ctratt.host_id_exhid_supported = 1; 103 session->vcdata.aerl = 0; 104 session->vcdata.frmw.slot1_ro = 1; 105 session->vcdata.frmw.num_slots = 1; 106 session->vcdata.lpa.edlp = 1; 107 session->vcdata.elpe = 127; 108 session->vcdata.sqes.min = 0x06; 109 session->vcdata.sqes.max = 0x06; 110 session->vcdata.cqes.min = 0x04; 111 session->vcdata.cqes.max = 0x04; 112 session->vcdata.maxcmd = 1024; 113 session->vcdata.nn = subsys->dev.max_nsid; 114 session->vcdata.vwc.present = 1; 115 session->vcdata.sgls.supported = 1; 116 strncpy(session->vcdata.subnqn, session->subsys->subnqn, sizeof(session->vcdata.subnqn)); 117 nvmf_bdev_set_dsm(session); 118 } 119 120 static void 121 nvmf_bdev_ctrlr_poll_for_completions(struct spdk_nvmf_subsystem *subsystem) 122 { 123 return; 124 } 125 126 static void 127 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success, 128 void *cb_arg) 129 { 130 struct spdk_nvmf_request *req = cb_arg; 131 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 132 int sc, sct; 133 134 spdk_bdev_io_get_nvme_status(bdev_io, &sc, &sct); 135 response->status.sc = sc; 136 response->status.sct = sct; 137 138 spdk_nvmf_request_complete(req); 139 spdk_bdev_free_io(bdev_io); 140 } 141 142 static int 143 nvmf_bdev_ctrlr_get_log_page(struct spdk_nvmf_request *req) 144 { 145 uint8_t lid; 146 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 147 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 148 uint64_t log_page_offset; 149 150 if (req->data == NULL) { 151 SPDK_ERRLOG("get log command with no buffer\n"); 152 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 153 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 154 } 155 156 memset(req->data, 0, req->length); 157 158 log_page_offset = (uint64_t)cmd->cdw12 | ((uint64_t)cmd->cdw13 << 32); 159 if (log_page_offset & 3) { 160 SPDK_ERRLOG("Invalid log page offset 0x%" PRIx64 "\n", log_page_offset); 161 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 162 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 163 } 164 165 lid = cmd->cdw10 & 0xFF; 166 switch (lid) { 167 case SPDK_NVME_LOG_ERROR: 168 case SPDK_NVME_LOG_HEALTH_INFORMATION: 169 case SPDK_NVME_LOG_FIRMWARE_SLOT: 170 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 171 default: 172 SPDK_ERRLOG("Unsupported Get Log Page 0x%02X\n", lid); 173 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 174 response->status.sc = SPDK_NVME_SC_INVALID_LOG_PAGE; 175 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 176 } 177 } 178 179 static int 180 identify_ns(struct spdk_nvmf_subsystem *subsystem, 181 struct spdk_nvme_cmd *cmd, 182 struct spdk_nvme_cpl *rsp, 183 struct spdk_nvme_ns_data *nsdata) 184 { 185 struct spdk_bdev *bdev; 186 uint64_t num_blocks; 187 188 if (cmd->nsid > subsystem->dev.max_nsid || cmd->nsid == 0) { 189 SPDK_ERRLOG("Identify Namespace for invalid NSID %u\n", cmd->nsid); 190 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 191 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 192 } 193 194 bdev = subsystem->dev.ns_list[cmd->nsid - 1]; 195 196 if (bdev == NULL) { 197 memset(nsdata, 0, sizeof(*nsdata)); 198 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 199 } 200 201 num_blocks = spdk_bdev_get_num_blocks(bdev); 202 203 nsdata->nsze = num_blocks; 204 nsdata->ncap = num_blocks; 205 nsdata->nuse = num_blocks; 206 nsdata->nlbaf = 0; 207 nsdata->flbas.format = 0; 208 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev)); 209 210 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 211 } 212 213 static int 214 identify_ctrlr(struct spdk_nvmf_session *session, struct spdk_nvme_ctrlr_data *cdata) 215 { 216 *cdata = session->vcdata; 217 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 218 } 219 220 static int 221 identify_active_ns_list(struct spdk_nvmf_subsystem *subsystem, 222 struct spdk_nvme_cmd *cmd, 223 struct spdk_nvme_cpl *rsp, 224 struct spdk_nvme_ns_list *ns_list) 225 { 226 uint32_t i, num_ns, count = 0; 227 228 if (cmd->nsid >= 0xfffffffeUL) { 229 SPDK_ERRLOG("Identify Active Namespace List with invalid NSID %u\n", cmd->nsid); 230 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 231 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 232 } 233 234 num_ns = subsystem->dev.max_nsid; 235 236 for (i = 1; i <= num_ns; i++) { 237 if (i <= cmd->nsid) { 238 continue; 239 } 240 if (subsystem->dev.ns_list[i - 1] == NULL) { 241 continue; 242 } 243 ns_list->ns_list[count++] = i; 244 if (count == SPDK_COUNTOF(ns_list->ns_list)) { 245 break; 246 } 247 } 248 249 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 250 } 251 252 static int 253 nvmf_bdev_ctrlr_identify(struct spdk_nvmf_request *req) 254 { 255 uint8_t cns; 256 struct spdk_nvmf_session *session = req->conn->sess; 257 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 258 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 259 struct spdk_nvmf_subsystem *subsystem = session->subsys; 260 261 if (req->data == NULL || req->length < 4096) { 262 SPDK_ERRLOG("identify command with invalid buffer\n"); 263 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 264 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 265 } 266 267 memset(req->data, 0, req->length); 268 269 cns = cmd->cdw10 & 0xFF; 270 switch (cns) { 271 case SPDK_NVME_IDENTIFY_NS: 272 return identify_ns(subsystem, cmd, rsp, req->data); 273 case SPDK_NVME_IDENTIFY_CTRLR: 274 return identify_ctrlr(session, req->data); 275 case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST: 276 return identify_active_ns_list(subsystem, cmd, rsp, req->data); 277 default: 278 SPDK_ERRLOG("Identify command with unsupported CNS 0x%02x\n", cns); 279 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 280 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 281 } 282 } 283 284 static int 285 nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req) 286 { 287 struct spdk_nvmf_session *session = req->conn->sess; 288 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 289 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 290 uint32_t cdw10 = cmd->cdw10; 291 uint16_t cid = cdw10 >> 16; 292 uint16_t sqid = cdw10 & 0xFFFFu; 293 struct spdk_nvmf_conn *conn; 294 struct spdk_nvmf_request *req_to_abort; 295 296 SPDK_TRACELOG(SPDK_TRACE_NVMF, "abort sqid=%u cid=%u\n", sqid, cid); 297 298 rsp->cdw0 = 1; /* Command not aborted */ 299 300 conn = spdk_nvmf_session_get_conn(session, sqid); 301 if (conn == NULL) { 302 SPDK_TRACELOG(SPDK_TRACE_NVMF, "sqid %u not found\n", sqid); 303 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 304 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 305 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 306 } 307 308 /* 309 * NOTE: This relies on the assumption that all connections for a session will be handled 310 * on the same thread. If this assumption becomes untrue, this will need to pass a message 311 * to the thread handling conn, and the abort will need to be asynchronous. 312 */ 313 req_to_abort = spdk_nvmf_conn_get_request(conn, cid); 314 if (req_to_abort == NULL) { 315 SPDK_TRACELOG(SPDK_TRACE_NVMF, "cid %u not found\n", cid); 316 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 317 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 318 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 319 } 320 321 if (spdk_nvmf_request_abort(req_to_abort) == 0) { 322 SPDK_TRACELOG(SPDK_TRACE_NVMF, "abort session=%p req=%p sqid=%u cid=%u successful\n", 323 session, req_to_abort, sqid, cid); 324 rsp->cdw0 = 0; /* Command successfully aborted */ 325 } 326 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 327 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 328 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 329 } 330 331 static int 332 nvmf_bdev_ctrlr_get_features(struct spdk_nvmf_request *req) 333 { 334 uint8_t feature; 335 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 336 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 337 338 feature = cmd->cdw10 & 0xff; /* mask out the FID value */ 339 switch (feature) { 340 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 341 return spdk_nvmf_session_get_features_number_of_queues(req); 342 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 343 response->cdw0 = 1; 344 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 345 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 346 return spdk_nvmf_session_get_features_keep_alive_timer(req); 347 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 348 return spdk_nvmf_session_get_features_async_event_configuration(req); 349 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 350 return spdk_nvmf_session_get_features_host_identifier(req); 351 default: 352 SPDK_ERRLOG("Get Features command with unsupported feature ID 0x%02x\n", feature); 353 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 354 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 355 } 356 } 357 358 static int 359 nvmf_bdev_ctrlr_set_features(struct spdk_nvmf_request *req) 360 { 361 uint8_t feature; 362 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 363 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 364 365 feature = cmd->cdw10 & 0xff; /* mask out the FID value */ 366 switch (feature) { 367 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 368 return spdk_nvmf_session_set_features_number_of_queues(req); 369 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 370 return spdk_nvmf_session_set_features_keep_alive_timer(req); 371 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 372 return spdk_nvmf_session_set_features_async_event_configuration(req); 373 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 374 return spdk_nvmf_session_set_features_host_identifier(req); 375 default: 376 SPDK_ERRLOG("Set Features command with unsupported feature ID 0x%02x\n", feature); 377 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 378 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 379 } 380 } 381 382 static int 383 nvmf_bdev_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req) 384 { 385 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 386 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 387 388 /* pre-set response details for this command */ 389 response->status.sc = SPDK_NVME_SC_SUCCESS; 390 391 switch (cmd->opc) { 392 case SPDK_NVME_OPC_GET_LOG_PAGE: 393 return nvmf_bdev_ctrlr_get_log_page(req); 394 case SPDK_NVME_OPC_IDENTIFY: 395 return nvmf_bdev_ctrlr_identify(req); 396 case SPDK_NVME_OPC_ABORT: 397 return nvmf_bdev_ctrlr_abort(req); 398 case SPDK_NVME_OPC_GET_FEATURES: 399 return nvmf_bdev_ctrlr_get_features(req); 400 case SPDK_NVME_OPC_SET_FEATURES: 401 return nvmf_bdev_ctrlr_set_features(req); 402 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: 403 return spdk_nvmf_session_async_event_request(req); 404 case SPDK_NVME_OPC_KEEP_ALIVE: 405 SPDK_TRACELOG(SPDK_TRACE_NVMF, "Keep Alive\n"); 406 /* 407 * To handle keep alive just clear or reset the 408 * session based keep alive duration counter. 409 * When added, a separate timer based process 410 * will monitor if the time since last recorded 411 * keep alive has exceeded the max duration and 412 * take appropriate action. 413 */ 414 //session->keep_alive_timestamp = ; 415 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 416 417 case SPDK_NVME_OPC_CREATE_IO_SQ: 418 case SPDK_NVME_OPC_CREATE_IO_CQ: 419 case SPDK_NVME_OPC_DELETE_IO_SQ: 420 case SPDK_NVME_OPC_DELETE_IO_CQ: 421 SPDK_ERRLOG("Admin opc 0x%02X not allowed in NVMf\n", cmd->opc); 422 response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 423 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 424 default: 425 SPDK_ERRLOG("Unsupported admin command\n"); 426 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 427 } 428 429 } 430 431 static int 432 nvmf_bdev_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 433 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 434 { 435 uint64_t lba_address; 436 uint64_t blockcnt; 437 uint64_t io_bytes; 438 uint64_t offset; 439 uint64_t llen; 440 uint32_t block_size = spdk_bdev_get_block_size(bdev); 441 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 442 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 443 struct nvme_read_cdw12 *cdw12 = (struct nvme_read_cdw12 *)&cmd->cdw12; 444 445 blockcnt = spdk_bdev_get_num_blocks(bdev); 446 lba_address = cmd->cdw11; 447 lba_address = (lba_address << 32) + cmd->cdw10; 448 offset = lba_address * block_size; 449 llen = cdw12->nlb + 1; 450 451 if (lba_address >= blockcnt || llen > blockcnt || lba_address > (blockcnt - llen)) { 452 SPDK_ERRLOG("end of media\n"); 453 response->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 454 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 455 } 456 457 io_bytes = llen * block_size; 458 if (io_bytes > req->length) { 459 SPDK_ERRLOG("Read/Write NLB > SGL length\n"); 460 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 461 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 462 } 463 464 if (cmd->opc == SPDK_NVME_OPC_READ) { 465 spdk_trace_record(TRACE_NVMF_LIB_READ_START, 0, 0, (uint64_t)req, 0); 466 if (spdk_bdev_read(desc, ch, req->data, offset, req->length, nvmf_bdev_ctrlr_complete_cmd, 467 req)) { 468 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 469 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 470 } 471 } else { 472 spdk_trace_record(TRACE_NVMF_LIB_WRITE_START, 0, 0, (uint64_t)req, 0); 473 if (spdk_bdev_write(desc, ch, req->data, offset, req->length, nvmf_bdev_ctrlr_complete_cmd, 474 req)) { 475 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 476 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 477 } 478 } 479 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 480 481 } 482 483 static int 484 nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 485 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 486 { 487 uint64_t nbytes; 488 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 489 490 nbytes = spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev); 491 if (spdk_bdev_flush(desc, ch, 0, nbytes, nvmf_bdev_ctrlr_complete_cmd, req)) { 492 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 493 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 494 } 495 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 496 } 497 498 struct nvmf_virtual_ctrlr_unmap { 499 struct spdk_nvmf_request *req; 500 uint32_t count; 501 }; 502 503 static void 504 nvmf_virtual_ctrlr_dsm_cpl(struct spdk_bdev_io *bdev_io, bool success, 505 void *cb_arg) 506 { 507 struct nvmf_virtual_ctrlr_unmap *unmap_ctx = cb_arg; 508 struct spdk_nvmf_request *req = unmap_ctx->req; 509 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 510 int sc, sct; 511 512 unmap_ctx->count--; 513 514 if (response->status.sct == SPDK_NVME_SCT_GENERIC && 515 response->status.sc == SPDK_NVME_SC_SUCCESS) { 516 spdk_bdev_io_get_nvme_status(bdev_io, &sc, &sct); 517 response->status.sc = sc; 518 response->status.sct = sct; 519 } 520 521 if (unmap_ctx->count == 0) { 522 spdk_nvmf_request_complete(req); 523 spdk_bdev_free_io(bdev_io); 524 free(unmap_ctx); 525 } 526 } 527 528 static int 529 nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 530 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 531 { 532 uint32_t attribute; 533 uint16_t nr, i; 534 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 535 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 536 537 nr = ((cmd->cdw10 & 0x000000ff) + 1); 538 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) { 539 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n"); 540 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 541 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 542 } 543 544 attribute = cmd->cdw11 & 0x00000007; 545 if (attribute & SPDK_NVME_DSM_ATTR_DEALLOCATE) { 546 struct nvmf_virtual_ctrlr_unmap *unmap_ctx; 547 struct spdk_nvme_dsm_range *dsm_range; 548 uint64_t lba; 549 uint32_t lba_count; 550 uint32_t block_size = spdk_bdev_get_block_size(bdev); 551 552 unmap_ctx = calloc(1, sizeof(*unmap_ctx)); 553 if (!unmap_ctx) { 554 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 555 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 556 } 557 558 unmap_ctx->req = req; 559 560 response->status.sct = SPDK_NVME_SCT_GENERIC; 561 response->status.sc = SPDK_NVME_SC_SUCCESS; 562 563 dsm_range = (struct spdk_nvme_dsm_range *)req->data; 564 for (i = 0; i < nr; i++) { 565 lba = dsm_range[i].starting_lba; 566 lba_count = dsm_range[i].length; 567 568 unmap_ctx->count++; 569 570 if (spdk_bdev_unmap(desc, ch, lba * block_size, lba_count * block_size, 571 nvmf_virtual_ctrlr_dsm_cpl, unmap_ctx)) { 572 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 573 unmap_ctx->count--; 574 /* We can't return here - we may have to wait for any other 575 * unmaps already sent to complete */ 576 break; 577 } 578 } 579 580 if (unmap_ctx->count == 0) { 581 free(unmap_ctx); 582 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 583 } 584 585 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 586 } 587 588 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 589 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 590 } 591 592 static int 593 nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 594 struct spdk_io_channel *ch, struct spdk_nvmf_request *req) 595 { 596 if (spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length, 597 nvmf_bdev_ctrlr_complete_cmd, req)) { 598 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 599 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 600 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 601 } 602 603 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 604 } 605 606 static int 607 nvmf_bdev_ctrlr_process_io_cmd(struct spdk_nvmf_request *req) 608 { 609 uint32_t nsid; 610 struct spdk_bdev *bdev; 611 struct spdk_bdev_desc *desc; 612 struct spdk_io_channel *ch; 613 struct spdk_nvmf_subsystem *subsystem = req->conn->sess->subsys; 614 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 615 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 616 617 /* pre-set response details for this command */ 618 response->status.sc = SPDK_NVME_SC_SUCCESS; 619 nsid = cmd->nsid; 620 621 if (nsid > subsystem->dev.max_nsid || nsid == 0) { 622 SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd->nsid); 623 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 624 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 625 } 626 627 bdev = subsystem->dev.ns_list[nsid - 1]; 628 if (bdev == NULL) { 629 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 630 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 631 } 632 633 desc = subsystem->dev.desc[nsid - 1]; 634 ch = subsystem->dev.ch[nsid - 1]; 635 switch (cmd->opc) { 636 case SPDK_NVME_OPC_READ: 637 case SPDK_NVME_OPC_WRITE: 638 return nvmf_bdev_ctrlr_rw_cmd(bdev, desc, ch, req); 639 case SPDK_NVME_OPC_FLUSH: 640 return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req); 641 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 642 return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req); 643 default: 644 return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req); 645 } 646 } 647 648 static int 649 nvmf_bdev_ctrlr_attach(struct spdk_nvmf_subsystem *subsystem) 650 { 651 struct spdk_bdev *bdev; 652 struct spdk_io_channel *ch; 653 uint32_t i; 654 655 for (i = 0; i < subsystem->dev.max_nsid; i++) { 656 bdev = subsystem->dev.ns_list[i]; 657 if (bdev == NULL) { 658 continue; 659 } 660 661 ch = spdk_bdev_get_io_channel(subsystem->dev.desc[i]); 662 if (ch == NULL) { 663 SPDK_ERRLOG("io_channel allocation failed\n"); 664 return -1; 665 } 666 subsystem->dev.ch[i] = ch; 667 } 668 669 return 0; 670 } 671 672 static void 673 nvmf_bdev_ctrlr_detach(struct spdk_nvmf_subsystem *subsystem) 674 { 675 uint32_t i; 676 677 for (i = 0; i < subsystem->dev.max_nsid; i++) { 678 if (subsystem->dev.ns_list[i]) { 679 spdk_put_io_channel(subsystem->dev.ch[i]); 680 spdk_bdev_close(subsystem->dev.desc[i]); 681 subsystem->dev.ch[i] = NULL; 682 subsystem->dev.ns_list[i] = NULL; 683 } 684 } 685 subsystem->dev.max_nsid = 0; 686 } 687 688 const struct spdk_nvmf_ctrlr_ops spdk_nvmf_bdev_ctrlr_ops = { 689 .attach = nvmf_bdev_ctrlr_attach, 690 .ctrlr_get_data = nvmf_bdev_ctrlr_get_data, 691 .process_admin_cmd = nvmf_bdev_ctrlr_process_admin_cmd, 692 .process_io_cmd = nvmf_bdev_ctrlr_process_io_cmd, 693 .poll_for_completions = nvmf_bdev_ctrlr_poll_for_completions, 694 .detach = nvmf_bdev_ctrlr_detach, 695 }; 696