1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 #include "transport.h" 38 39 #include "spdk/bit_array.h" 40 #include "spdk/endian.h" 41 #include "spdk/thread.h" 42 #include "spdk/trace.h" 43 #include "spdk/nvme_spec.h" 44 #include "spdk/string.h" 45 #include "spdk/util.h" 46 #include "spdk/version.h" 47 48 #include "spdk_internal/log.h" 49 50 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS 10000 51 #define NVMF_DISC_KATO_IN_MS 120000 52 #define KAS_TIME_UNIT_IN_MS 100 53 #define KAS_DEFAULT_VALUE (MIN_KEEP_ALIVE_TIMEOUT_IN_MS / KAS_TIME_UNIT_IN_MS) 54 55 /* 56 * Report the SPDK version as the firmware revision. 57 * SPDK_VERSION_STRING won't fit into FR (only 8 bytes), so try to fit the most important parts. 58 */ 59 #define FW_VERSION SPDK_VERSION_MAJOR_STRING SPDK_VERSION_MINOR_STRING SPDK_VERSION_PATCH_STRING 60 61 static inline void 62 spdk_nvmf_invalid_connect_response(struct spdk_nvmf_fabric_connect_rsp *rsp, 63 uint8_t iattr, uint16_t ipo) 64 { 65 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 66 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 67 rsp->status_code_specific.invalid.iattr = iattr; 68 rsp->status_code_specific.invalid.ipo = ipo; 69 } 70 71 #define SPDK_NVMF_INVALID_CONNECT_CMD(rsp, field) \ 72 spdk_nvmf_invalid_connect_response(rsp, 0, offsetof(struct spdk_nvmf_fabric_connect_cmd, field)) 73 #define SPDK_NVMF_INVALID_CONNECT_DATA(rsp, field) \ 74 spdk_nvmf_invalid_connect_response(rsp, 1, offsetof(struct spdk_nvmf_fabric_connect_data, field)) 75 76 static void 77 spdk_nvmf_ctrlr_stop_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr) 78 { 79 if (!ctrlr) { 80 SPDK_ERRLOG("Controller is NULL\n"); 81 return; 82 } 83 84 if (ctrlr->keep_alive_poller == NULL) { 85 return; 86 } 87 88 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Stop keep alive poller\n"); 89 spdk_poller_unregister(&ctrlr->keep_alive_poller); 90 } 91 92 static void 93 spdk_nvmf_ctrlr_disconnect_qpairs_done(struct spdk_io_channel_iter *i, int status) 94 { 95 if (status == 0) { 96 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "ctrlr disconnect qpairs complete successfully\n"); 97 } else { 98 SPDK_ERRLOG("Fail to disconnect ctrlr qpairs\n"); 99 } 100 } 101 102 static void 103 spdk_nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i) 104 { 105 int rc = 0; 106 struct spdk_nvmf_ctrlr *ctrlr; 107 struct spdk_nvmf_qpair *qpair, *temp_qpair; 108 struct spdk_io_channel *ch; 109 struct spdk_nvmf_poll_group *group; 110 111 ctrlr = spdk_io_channel_iter_get_ctx(i); 112 ch = spdk_io_channel_iter_get_channel(i); 113 group = spdk_io_channel_get_ctx(ch); 114 115 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, temp_qpair) { 116 if (qpair->ctrlr == ctrlr) { 117 rc = spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 118 if (rc) { 119 SPDK_ERRLOG("Qpair disconnect failed\n"); 120 goto next_channel; 121 } 122 } 123 } 124 125 next_channel: 126 spdk_for_each_channel_continue(i, rc); 127 } 128 129 static int 130 spdk_nvmf_ctrlr_keep_alive_poll(void *ctx) 131 { 132 uint64_t keep_alive_timeout_tick; 133 uint64_t now = spdk_get_ticks(); 134 struct spdk_nvmf_ctrlr *ctrlr = ctx; 135 136 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Polling ctrlr keep alive timeout\n"); 137 138 /* If the Keep alive feature is in use and the timer expires */ 139 keep_alive_timeout_tick = ctrlr->last_keep_alive_tick + 140 ctrlr->feat.keep_alive_timer.bits.kato * spdk_get_ticks_hz() / UINT64_C(1000); 141 if (now > keep_alive_timeout_tick) { 142 SPDK_NOTICELOG("Disconnecting host from subsystem %s due to keep alive timeout.\n", 143 ctrlr->subsys->subnqn); 144 /* set the Controller Fatal Status bit to '1' */ 145 if (ctrlr->vcprop.csts.bits.cfs == 0) { 146 ctrlr->vcprop.csts.bits.cfs = 1; 147 148 /* 149 * disconnect qpairs, terminate Transport connection 150 * destroy ctrlr, break the host to controller association 151 * disconnect qpairs with qpair->ctrlr == ctrlr 152 */ 153 spdk_for_each_channel(ctrlr->subsys->tgt, 154 spdk_nvmf_ctrlr_disconnect_qpairs_on_pg, 155 ctrlr, 156 spdk_nvmf_ctrlr_disconnect_qpairs_done); 157 } 158 } 159 160 return 1; 161 } 162 163 static void 164 spdk_nvmf_ctrlr_start_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr) 165 { 166 if (!ctrlr) { 167 SPDK_ERRLOG("Controller is NULL\n"); 168 return; 169 } 170 171 /* if cleared to 0 then the Keep Alive Timer is disabled */ 172 if (ctrlr->feat.keep_alive_timer.bits.kato != 0) { 173 174 ctrlr->last_keep_alive_tick = spdk_get_ticks(); 175 176 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Ctrlr add keep alive poller\n"); 177 ctrlr->keep_alive_poller = spdk_poller_register(spdk_nvmf_ctrlr_keep_alive_poll, ctrlr, 178 ctrlr->feat.keep_alive_timer.bits.kato * 1000); 179 } 180 } 181 182 static void 183 ctrlr_add_qpair_and_update_rsp(struct spdk_nvmf_qpair *qpair, 184 struct spdk_nvmf_ctrlr *ctrlr, 185 struct spdk_nvmf_fabric_connect_rsp *rsp) 186 { 187 assert(ctrlr->admin_qpair->group->thread == spdk_get_thread()); 188 189 /* check if we would exceed ctrlr connection limit */ 190 if (qpair->qid >= spdk_bit_array_capacity(ctrlr->qpair_mask)) { 191 SPDK_ERRLOG("Requested QID %u but Max QID is %u\n", 192 qpair->qid, spdk_bit_array_capacity(ctrlr->qpair_mask) - 1); 193 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 194 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 195 return; 196 } 197 198 if (spdk_bit_array_get(ctrlr->qpair_mask, qpair->qid)) { 199 SPDK_ERRLOG("Got I/O connect with duplicate QID %u\n", qpair->qid); 200 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 201 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 202 return; 203 } 204 205 qpair->ctrlr = ctrlr; 206 spdk_bit_array_set(ctrlr->qpair_mask, qpair->qid); 207 208 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 209 rsp->status_code_specific.success.cntlid = ctrlr->cntlid; 210 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "connect capsule response: cntlid = 0x%04x\n", 211 rsp->status_code_specific.success.cntlid); 212 } 213 214 static void 215 _spdk_nvmf_request_complete(void *ctx) 216 { 217 struct spdk_nvmf_request *req = ctx; 218 219 spdk_nvmf_request_complete(req); 220 } 221 222 static void 223 _spdk_nvmf_ctrlr_add_admin_qpair(void *ctx) 224 { 225 struct spdk_nvmf_request *req = ctx; 226 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 227 struct spdk_nvmf_qpair *qpair = req->qpair; 228 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 229 230 ctrlr->admin_qpair = qpair; 231 spdk_nvmf_ctrlr_start_keep_alive_timer(ctrlr); 232 ctrlr_add_qpair_and_update_rsp(qpair, ctrlr, rsp); 233 spdk_nvmf_request_complete(req); 234 } 235 236 static void 237 _spdk_nvmf_subsystem_add_ctrlr(void *ctx) 238 { 239 struct spdk_nvmf_request *req = ctx; 240 struct spdk_nvmf_qpair *qpair = req->qpair; 241 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 242 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 243 244 if (spdk_nvmf_subsystem_add_ctrlr(ctrlr->subsys, ctrlr)) { 245 SPDK_ERRLOG("Unable to add controller to subsystem\n"); 246 spdk_bit_array_free(&ctrlr->qpair_mask); 247 free(ctrlr); 248 qpair->ctrlr = NULL; 249 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 250 spdk_thread_send_msg(qpair->group->thread, _spdk_nvmf_request_complete, req); 251 return; 252 } 253 254 spdk_thread_send_msg(ctrlr->thread, _spdk_nvmf_ctrlr_add_admin_qpair, req); 255 } 256 257 static struct spdk_nvmf_ctrlr * 258 spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem, 259 struct spdk_nvmf_request *req, 260 struct spdk_nvmf_fabric_connect_cmd *connect_cmd, 261 struct spdk_nvmf_fabric_connect_data *connect_data) 262 { 263 struct spdk_nvmf_ctrlr *ctrlr; 264 struct spdk_nvmf_transport *transport; 265 266 ctrlr = calloc(1, sizeof(*ctrlr)); 267 if (ctrlr == NULL) { 268 SPDK_ERRLOG("Memory allocation failed\n"); 269 return NULL; 270 } 271 272 TAILQ_INIT(&ctrlr->log_head); 273 ctrlr->subsys = subsystem; 274 ctrlr->thread = req->qpair->group->thread; 275 276 transport = req->qpair->transport; 277 ctrlr->qpair_mask = spdk_bit_array_create(transport->opts.max_qpairs_per_ctrlr); 278 if (!ctrlr->qpair_mask) { 279 SPDK_ERRLOG("Failed to allocate controller qpair mask\n"); 280 free(ctrlr); 281 return NULL; 282 } 283 284 /* 285 * KAS: this field indicates the granularity of the Keep Alive Timer in 100ms units 286 * keep-alive timeout in milliseconds 287 */ 288 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(connect_cmd->kato, 289 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) * 290 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS; 291 ctrlr->feat.async_event_configuration.bits.ns_attr_notice = 1; 292 ctrlr->feat.volatile_write_cache.bits.wce = 1; 293 294 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 295 /* Don't accept keep-alive timeout for discovery controllers */ 296 if (ctrlr->feat.keep_alive_timer.bits.kato != 0) { 297 SPDK_ERRLOG("Discovery controller don't accept keep-alive timeout\n"); 298 spdk_bit_array_free(&ctrlr->qpair_mask); 299 free(ctrlr); 300 return NULL; 301 } 302 303 /* 304 * Discovery controllers use some arbitrary high value in order 305 * to cleanup stale discovery sessions 306 * 307 * From the 1.0a nvme-of spec: 308 * "The Keep Alive command is reserved for 309 * Discovery controllers. A transport may specify a 310 * fixed Discovery controller activity timeout value 311 * (e.g., 2 minutes). If no commands are received 312 * by a Discovery controller within that time 313 * period, the controller may perform the 314 * actions for Keep Alive Timer expiration". 315 * kato is in millisecond. 316 */ 317 ctrlr->feat.keep_alive_timer.bits.kato = NVMF_DISC_KATO_IN_MS; 318 } 319 320 /* Subtract 1 for admin queue, 1 for 0's based */ 321 ctrlr->feat.number_of_queues.bits.ncqr = transport->opts.max_qpairs_per_ctrlr - 1 - 322 1; 323 ctrlr->feat.number_of_queues.bits.nsqr = transport->opts.max_qpairs_per_ctrlr - 1 - 324 1; 325 326 spdk_uuid_copy(&ctrlr->hostid, (struct spdk_uuid *)connect_data->hostid); 327 328 ctrlr->vcprop.cap.raw = 0; 329 ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */ 330 ctrlr->vcprop.cap.bits.mqes = transport->opts.max_queue_depth - 331 1; /* max queue depth */ 332 ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */ 333 ctrlr->vcprop.cap.bits.to = 1; /* ready timeout - 500 msec units */ 334 ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */ 335 ctrlr->vcprop.cap.bits.css = SPDK_NVME_CAP_CSS_NVM; /* NVM command set */ 336 ctrlr->vcprop.cap.bits.mpsmin = 0; /* 2 ^ (12 + mpsmin) == 4k */ 337 ctrlr->vcprop.cap.bits.mpsmax = 0; /* 2 ^ (12 + mpsmax) == 4k */ 338 339 /* Version Supported: 1.3 */ 340 ctrlr->vcprop.vs.bits.mjr = 1; 341 ctrlr->vcprop.vs.bits.mnr = 3; 342 ctrlr->vcprop.vs.bits.ter = 0; 343 344 ctrlr->vcprop.cc.raw = 0; 345 ctrlr->vcprop.cc.bits.en = 0; /* Init controller disabled */ 346 347 ctrlr->vcprop.csts.raw = 0; 348 ctrlr->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */ 349 350 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "cap 0x%" PRIx64 "\n", ctrlr->vcprop.cap.raw); 351 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "vs 0x%x\n", ctrlr->vcprop.vs.raw); 352 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "cc 0x%x\n", ctrlr->vcprop.cc.raw); 353 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "csts 0x%x\n", ctrlr->vcprop.csts.raw); 354 355 req->qpair->ctrlr = ctrlr; 356 spdk_thread_send_msg(subsystem->thread, _spdk_nvmf_subsystem_add_ctrlr, req); 357 358 return ctrlr; 359 } 360 361 static void 362 _spdk_nvmf_ctrlr_destruct(void *ctx) 363 { 364 struct spdk_nvmf_ctrlr *ctrlr = ctx; 365 struct spdk_nvmf_reservation_log *log, *log_tmp; 366 367 spdk_nvmf_ctrlr_stop_keep_alive_timer(ctrlr); 368 369 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) { 370 TAILQ_REMOVE(&ctrlr->log_head, log, link); 371 free(log); 372 } 373 free(ctrlr); 374 } 375 376 void 377 spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr) 378 { 379 spdk_nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr); 380 381 spdk_thread_send_msg(ctrlr->thread, _spdk_nvmf_ctrlr_destruct, ctrlr); 382 } 383 384 static void 385 spdk_nvmf_ctrlr_add_io_qpair(void *ctx) 386 { 387 struct spdk_nvmf_request *req = ctx; 388 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 389 struct spdk_nvmf_qpair *qpair = req->qpair; 390 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 391 392 /* Unit test will check qpair->ctrlr after calling spdk_nvmf_ctrlr_connect. 393 * For error case, the value should be NULL. So set it to NULL at first. 394 */ 395 qpair->ctrlr = NULL; 396 397 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 398 SPDK_ERRLOG("I/O connect not allowed on discovery controller\n"); 399 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 400 goto end; 401 } 402 403 if (!ctrlr->vcprop.cc.bits.en) { 404 SPDK_ERRLOG("Got I/O connect before ctrlr was enabled\n"); 405 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 406 goto end; 407 } 408 409 if (1u << ctrlr->vcprop.cc.bits.iosqes != sizeof(struct spdk_nvme_cmd)) { 410 SPDK_ERRLOG("Got I/O connect with invalid IOSQES %u\n", 411 ctrlr->vcprop.cc.bits.iosqes); 412 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 413 goto end; 414 } 415 416 if (1u << ctrlr->vcprop.cc.bits.iocqes != sizeof(struct spdk_nvme_cpl)) { 417 SPDK_ERRLOG("Got I/O connect with invalid IOCQES %u\n", 418 ctrlr->vcprop.cc.bits.iocqes); 419 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 420 goto end; 421 } 422 423 ctrlr_add_qpair_and_update_rsp(qpair, ctrlr, rsp); 424 end: 425 spdk_thread_send_msg(qpair->group->thread, _spdk_nvmf_request_complete, req); 426 } 427 428 static void 429 _spdk_nvmf_ctrlr_add_io_qpair(void *ctx) 430 { 431 struct spdk_nvmf_request *req = ctx; 432 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 433 struct spdk_nvmf_fabric_connect_data *data = req->data; 434 struct spdk_nvmf_ctrlr *ctrlr; 435 struct spdk_nvmf_qpair *qpair = req->qpair; 436 struct spdk_nvmf_qpair *admin_qpair; 437 struct spdk_nvmf_tgt *tgt = qpair->transport->tgt; 438 struct spdk_nvmf_subsystem *subsystem; 439 440 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Connect I/O Queue for controller id 0x%x\n", data->cntlid); 441 442 subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); 443 /* We already checked this in spdk_nvmf_ctrlr_connect */ 444 assert(subsystem != NULL); 445 446 ctrlr = spdk_nvmf_subsystem_get_ctrlr(subsystem, data->cntlid); 447 if (ctrlr == NULL) { 448 SPDK_ERRLOG("Unknown controller ID 0x%x\n", data->cntlid); 449 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid); 450 spdk_thread_send_msg(qpair->group->thread, _spdk_nvmf_request_complete, req); 451 return; 452 } 453 454 admin_qpair = ctrlr->admin_qpair; 455 qpair->ctrlr = ctrlr; 456 spdk_thread_send_msg(admin_qpair->group->thread, spdk_nvmf_ctrlr_add_io_qpair, req); 457 } 458 459 static int 460 spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req) 461 { 462 struct spdk_nvmf_fabric_connect_data *data = req->data; 463 struct spdk_nvmf_fabric_connect_cmd *cmd = &req->cmd->connect_cmd; 464 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 465 struct spdk_nvmf_qpair *qpair = req->qpair; 466 struct spdk_nvmf_transport *transport = qpair->transport; 467 struct spdk_nvmf_tgt *tgt = transport->tgt; 468 struct spdk_nvmf_ctrlr *ctrlr; 469 struct spdk_nvmf_subsystem *subsystem; 470 const char *subnqn, *hostnqn; 471 struct spdk_nvme_transport_id listen_trid = {}; 472 void *end; 473 474 if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) { 475 SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length); 476 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 477 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 478 } 479 480 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "recfmt 0x%x qid %u sqsize %u\n", 481 cmd->recfmt, cmd->qid, cmd->sqsize); 482 483 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Connect data:\n"); 484 SPDK_DEBUGLOG(SPDK_LOG_NVMF, " cntlid: 0x%04x\n", data->cntlid); 485 SPDK_DEBUGLOG(SPDK_LOG_NVMF, " hostid: %08x-%04x-%04x-%02x%02x-%04x%08x ***\n", 486 ntohl(*(uint32_t *)&data->hostid[0]), 487 ntohs(*(uint16_t *)&data->hostid[4]), 488 ntohs(*(uint16_t *)&data->hostid[6]), 489 data->hostid[8], 490 data->hostid[9], 491 ntohs(*(uint16_t *)&data->hostid[10]), 492 ntohl(*(uint32_t *)&data->hostid[12])); 493 494 if (cmd->recfmt != 0) { 495 SPDK_ERRLOG("Connect command unsupported RECFMT %u\n", cmd->recfmt); 496 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 497 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT; 498 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 499 } 500 501 /* Ensure that subnqn is null terminated */ 502 end = memchr(data->subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1); 503 if (!end) { 504 SPDK_ERRLOG("Connect SUBNQN is not null terminated\n"); 505 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 506 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 507 } 508 subnqn = data->subnqn; 509 SPDK_DEBUGLOG(SPDK_LOG_NVMF, " subnqn: \"%s\"\n", subnqn); 510 511 subsystem = spdk_nvmf_tgt_find_subsystem(tgt, subnqn); 512 if (subsystem == NULL) { 513 SPDK_ERRLOG("Could not find subsystem '%s'\n", subnqn); 514 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 515 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 516 } 517 518 if ((subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE) || 519 (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSING) || 520 (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED) || 521 (subsystem->state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING)) { 522 SPDK_ERRLOG("Subsystem '%s' is not ready\n", subnqn); 523 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 524 rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY; 525 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 526 } 527 528 /* Ensure that hostnqn is null terminated */ 529 end = memchr(data->hostnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1); 530 if (!end) { 531 SPDK_ERRLOG("Connect HOSTNQN is not null terminated\n"); 532 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, hostnqn); 533 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 534 } 535 hostnqn = data->hostnqn; 536 SPDK_DEBUGLOG(SPDK_LOG_NVMF, " hostnqn: \"%s\"\n", hostnqn); 537 538 if (!spdk_nvmf_subsystem_host_allowed(subsystem, hostnqn)) { 539 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s'\n", subnqn, hostnqn); 540 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 541 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_HOST; 542 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 543 } 544 545 if (spdk_nvmf_qpair_get_listen_trid(qpair, &listen_trid)) { 546 SPDK_ERRLOG("Subsystem '%s' is unable to enforce access control due to an internal error.\n", 547 subnqn); 548 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 549 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_HOST; 550 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 551 } 552 553 if (!spdk_nvmf_subsystem_listener_allowed(subsystem, &listen_trid)) { 554 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s' to connect at this address.\n", subnqn, 555 hostnqn); 556 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 557 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_HOST; 558 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 559 } 560 561 /* 562 * SQSIZE is a 0-based value, so it must be at least 1 (minimum queue depth is 2) and 563 * strictly less than max_aq_depth (admin queues) or max_queue_depth (io queues). 564 */ 565 if (cmd->sqsize == 0) { 566 SPDK_ERRLOG("Invalid SQSIZE = 0\n"); 567 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 568 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 569 } 570 571 if (spdk_nvmf_qpair_is_admin_queue(qpair)) { 572 if (cmd->sqsize >= transport->opts.max_aq_depth) { 573 SPDK_ERRLOG("Invalid SQSIZE for admin queue %u (min 1, max %u)\n", 574 cmd->sqsize, transport->opts.max_aq_depth - 1); 575 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 576 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 577 } 578 } else if (cmd->sqsize >= transport->opts.max_queue_depth) { 579 SPDK_ERRLOG("Invalid SQSIZE %u (min 1, max %u)\n", 580 cmd->sqsize, transport->opts.max_queue_depth - 1); 581 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 582 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 583 } 584 585 qpair->sq_head_max = cmd->sqsize; 586 qpair->qid = cmd->qid; 587 588 if (spdk_nvmf_transport_qpair_set_sqsize(qpair)) { 589 SPDK_ERRLOG("Can not create SQSIZE %u for qpair=%p\n", cmd->sqsize, qpair); 590 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 591 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 592 } 593 594 if (cmd->qid == 0) { 595 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Connect Admin Queue for controller ID 0x%x\n", data->cntlid); 596 597 if (data->cntlid != 0xFFFF) { 598 /* This NVMf target only supports dynamic mode. */ 599 SPDK_ERRLOG("The NVMf target only supports dynamic mode (CNTLID = 0x%x).\n", data->cntlid); 600 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid); 601 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 602 } 603 604 /* Establish a new ctrlr */ 605 ctrlr = spdk_nvmf_ctrlr_create(subsystem, req, cmd, data); 606 if (!ctrlr) { 607 SPDK_ERRLOG("spdk_nvmf_ctrlr_create() failed\n"); 608 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 609 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 610 } else { 611 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 612 } 613 } else { 614 spdk_thread_send_msg(subsystem->thread, _spdk_nvmf_ctrlr_add_io_qpair, req); 615 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 616 } 617 } 618 619 static uint64_t 620 nvmf_prop_get_cap(struct spdk_nvmf_ctrlr *ctrlr) 621 { 622 return ctrlr->vcprop.cap.raw; 623 } 624 625 static uint64_t 626 nvmf_prop_get_vs(struct spdk_nvmf_ctrlr *ctrlr) 627 { 628 return ctrlr->vcprop.vs.raw; 629 } 630 631 static uint64_t 632 nvmf_prop_get_cc(struct spdk_nvmf_ctrlr *ctrlr) 633 { 634 return ctrlr->vcprop.cc.raw; 635 } 636 637 static bool 638 nvmf_prop_set_cc(struct spdk_nvmf_ctrlr *ctrlr, uint64_t value) 639 { 640 union spdk_nvme_cc_register cc, diff; 641 642 cc.raw = (uint32_t)value; 643 644 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "cur CC: 0x%08x\n", ctrlr->vcprop.cc.raw); 645 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "new CC: 0x%08x\n", cc.raw); 646 647 /* 648 * Calculate which bits changed between the current and new CC. 649 * Mark each bit as 0 once it is handled to determine if any unhandled bits were changed. 650 */ 651 diff.raw = cc.raw ^ ctrlr->vcprop.cc.raw; 652 653 if (diff.bits.en) { 654 if (cc.bits.en) { 655 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Property Set CC Enable!\n"); 656 ctrlr->vcprop.cc.bits.en = 1; 657 ctrlr->vcprop.csts.bits.rdy = 1; 658 } else { 659 SPDK_ERRLOG("CC.EN transition from 1 to 0 (reset) not implemented!\n"); 660 661 } 662 diff.bits.en = 0; 663 } 664 665 if (diff.bits.shn) { 666 if (cc.bits.shn == SPDK_NVME_SHN_NORMAL || 667 cc.bits.shn == SPDK_NVME_SHN_ABRUPT) { 668 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Property Set CC Shutdown %u%ub!\n", 669 cc.bits.shn >> 1, cc.bits.shn & 1); 670 ctrlr->vcprop.cc.bits.shn = cc.bits.shn; 671 ctrlr->vcprop.cc.bits.en = 0; 672 ctrlr->vcprop.csts.bits.rdy = 0; 673 ctrlr->vcprop.csts.bits.shst = SPDK_NVME_SHST_COMPLETE; 674 } else if (cc.bits.shn == 0) { 675 ctrlr->vcprop.cc.bits.shn = 0; 676 } else { 677 SPDK_ERRLOG("Prop Set CC: Invalid SHN value %u%ub\n", 678 cc.bits.shn >> 1, cc.bits.shn & 1); 679 return false; 680 } 681 diff.bits.shn = 0; 682 } 683 684 if (diff.bits.iosqes) { 685 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Prop Set IOSQES = %u (%u bytes)\n", 686 cc.bits.iosqes, 1u << cc.bits.iosqes); 687 ctrlr->vcprop.cc.bits.iosqes = cc.bits.iosqes; 688 diff.bits.iosqes = 0; 689 } 690 691 if (diff.bits.iocqes) { 692 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Prop Set IOCQES = %u (%u bytes)\n", 693 cc.bits.iocqes, 1u << cc.bits.iocqes); 694 ctrlr->vcprop.cc.bits.iocqes = cc.bits.iocqes; 695 diff.bits.iocqes = 0; 696 } 697 698 if (diff.raw != 0) { 699 SPDK_ERRLOG("Prop Set CC toggled reserved bits 0x%x!\n", diff.raw); 700 return false; 701 } 702 703 return true; 704 } 705 706 static uint64_t 707 nvmf_prop_get_csts(struct spdk_nvmf_ctrlr *ctrlr) 708 { 709 return ctrlr->vcprop.csts.raw; 710 } 711 712 struct nvmf_prop { 713 uint32_t ofst; 714 uint8_t size; 715 char name[11]; 716 uint64_t (*get_cb)(struct spdk_nvmf_ctrlr *ctrlr); 717 bool (*set_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint64_t value); 718 }; 719 720 #define PROP(field, size, get_cb, set_cb) \ 721 { \ 722 offsetof(struct spdk_nvme_registers, field), \ 723 SPDK_NVMF_PROP_SIZE_##size, \ 724 #field, \ 725 get_cb, set_cb \ 726 } 727 728 static const struct nvmf_prop nvmf_props[] = { 729 PROP(cap, 8, nvmf_prop_get_cap, NULL), 730 PROP(vs, 4, nvmf_prop_get_vs, NULL), 731 PROP(cc, 4, nvmf_prop_get_cc, nvmf_prop_set_cc), 732 PROP(csts, 4, nvmf_prop_get_csts, NULL), 733 }; 734 735 static const struct nvmf_prop * 736 find_prop(uint32_t ofst) 737 { 738 size_t i; 739 740 for (i = 0; i < SPDK_COUNTOF(nvmf_props); i++) { 741 const struct nvmf_prop *prop = &nvmf_props[i]; 742 743 if (prop->ofst == ofst) { 744 return prop; 745 } 746 } 747 748 return NULL; 749 } 750 751 static int 752 spdk_nvmf_property_get(struct spdk_nvmf_request *req) 753 { 754 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 755 struct spdk_nvmf_fabric_prop_get_cmd *cmd = &req->cmd->prop_get_cmd; 756 struct spdk_nvmf_fabric_prop_get_rsp *response = &req->rsp->prop_get_rsp; 757 const struct nvmf_prop *prop; 758 759 response->status.sc = 0; 760 response->value.u64 = 0; 761 762 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "size %d, offset 0x%x\n", 763 cmd->attrib.size, cmd->ofst); 764 765 if (cmd->attrib.size != SPDK_NVMF_PROP_SIZE_4 && 766 cmd->attrib.size != SPDK_NVMF_PROP_SIZE_8) { 767 SPDK_ERRLOG("Invalid size value %d\n", cmd->attrib.size); 768 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 769 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 770 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 771 } 772 773 prop = find_prop(cmd->ofst); 774 if (prop == NULL || prop->get_cb == NULL) { 775 /* Reserved properties return 0 when read */ 776 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 777 } 778 779 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "name: %s\n", prop->name); 780 if (cmd->attrib.size != prop->size) { 781 SPDK_ERRLOG("offset 0x%x size mismatch: cmd %u, prop %u\n", 782 cmd->ofst, cmd->attrib.size, prop->size); 783 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 784 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 785 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 786 } 787 788 response->value.u64 = prop->get_cb(ctrlr); 789 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "response value: 0x%" PRIx64 "\n", response->value.u64); 790 791 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 792 } 793 794 static int 795 spdk_nvmf_property_set(struct spdk_nvmf_request *req) 796 { 797 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 798 struct spdk_nvmf_fabric_prop_set_cmd *cmd = &req->cmd->prop_set_cmd; 799 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 800 const struct nvmf_prop *prop; 801 uint64_t value; 802 803 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "size %d, offset 0x%x, value 0x%" PRIx64 "\n", 804 cmd->attrib.size, cmd->ofst, cmd->value.u64); 805 806 prop = find_prop(cmd->ofst); 807 if (prop == NULL || prop->set_cb == NULL) { 808 SPDK_ERRLOG("Invalid offset 0x%x\n", cmd->ofst); 809 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 810 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 811 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 812 } 813 814 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "name: %s\n", prop->name); 815 if (cmd->attrib.size != prop->size) { 816 SPDK_ERRLOG("offset 0x%x size mismatch: cmd %u, prop %u\n", 817 cmd->ofst, cmd->attrib.size, prop->size); 818 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 819 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 820 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 821 } 822 823 value = cmd->value.u64; 824 if (prop->size == SPDK_NVMF_PROP_SIZE_4) { 825 value = (uint32_t)value; 826 } 827 828 if (!prop->set_cb(ctrlr, value)) { 829 SPDK_ERRLOG("prop set_cb failed\n"); 830 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 831 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 832 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 833 } 834 835 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 836 } 837 838 static int 839 spdk_nvmf_ctrlr_set_features_arbitration(struct spdk_nvmf_request *req) 840 { 841 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 842 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 843 844 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Arbitration (cdw11 = 0x%0x)\n", cmd->cdw11); 845 846 ctrlr->feat.arbitration.raw = cmd->cdw11; 847 ctrlr->feat.arbitration.bits.reserved = 0; 848 849 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 850 } 851 852 static int 853 spdk_nvmf_ctrlr_set_features_power_management(struct spdk_nvmf_request *req) 854 { 855 union spdk_nvme_feat_power_management opts; 856 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 857 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 858 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 859 860 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Power Management (cdw11 = 0x%0x)\n", cmd->cdw11); 861 opts.raw = cmd->cdw11; 862 863 /* Only PS = 0 is allowed, since we report NPSS = 0 */ 864 if (opts.bits.ps != 0) { 865 SPDK_ERRLOG("Invalid power state %u\n", opts.bits.ps); 866 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 867 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 868 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 869 } 870 871 ctrlr->feat.power_management.raw = cmd->cdw11; 872 ctrlr->feat.power_management.bits.reserved = 0; 873 874 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 875 } 876 877 static bool 878 temp_threshold_opts_valid(const union spdk_nvme_feat_temperature_threshold *opts) 879 { 880 /* 881 * Valid TMPSEL values: 882 * 0000b - 1000b: temperature sensors 883 * 1111b: set all implemented temperature sensors 884 */ 885 if (opts->bits.tmpsel >= 9 && opts->bits.tmpsel != 15) { 886 /* 1001b - 1110b: reserved */ 887 SPDK_ERRLOG("Invalid TMPSEL %u\n", opts->bits.tmpsel); 888 return false; 889 } 890 891 /* 892 * Valid THSEL values: 893 * 00b: over temperature threshold 894 * 01b: under temperature threshold 895 */ 896 if (opts->bits.thsel > 1) { 897 /* 10b - 11b: reserved */ 898 SPDK_ERRLOG("Invalid THSEL %u\n", opts->bits.thsel); 899 return false; 900 } 901 902 return true; 903 } 904 905 static int 906 spdk_nvmf_ctrlr_set_features_temperature_threshold(struct spdk_nvmf_request *req) 907 { 908 union spdk_nvme_feat_temperature_threshold opts; 909 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 910 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 911 912 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11); 913 opts.raw = cmd->cdw11; 914 915 if (!temp_threshold_opts_valid(&opts)) { 916 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 917 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 918 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 919 } 920 921 /* TODO: no sensors implemented - ignore new values */ 922 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 923 } 924 925 static int 926 spdk_nvmf_ctrlr_get_features_temperature_threshold(struct spdk_nvmf_request *req) 927 { 928 union spdk_nvme_feat_temperature_threshold opts; 929 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 930 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 931 932 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Get Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11); 933 opts.raw = cmd->cdw11; 934 935 if (!temp_threshold_opts_valid(&opts)) { 936 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 937 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 938 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 939 } 940 941 /* TODO: no sensors implemented - return 0 for all thresholds */ 942 rsp->cdw0 = 0; 943 944 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 945 } 946 947 static int 948 spdk_nvmf_ctrlr_set_features_error_recovery(struct spdk_nvmf_request *req) 949 { 950 union spdk_nvme_feat_error_recovery opts; 951 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 952 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 953 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 954 955 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Error Recovery (cdw11 = 0x%0x)\n", cmd->cdw11); 956 opts.raw = cmd->cdw11; 957 958 if (opts.bits.dulbe) { 959 /* 960 * Host is not allowed to set this bit, since we don't advertise it in 961 * Identify Namespace. 962 */ 963 SPDK_ERRLOG("Host set unsupported DULBE bit\n"); 964 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 965 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 966 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 967 } 968 969 ctrlr->feat.error_recovery.raw = cmd->cdw11; 970 ctrlr->feat.error_recovery.bits.reserved = 0; 971 972 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 973 } 974 975 static int 976 spdk_nvmf_ctrlr_set_features_volatile_write_cache(struct spdk_nvmf_request *req) 977 { 978 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 979 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 980 981 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Volatile Write Cache (cdw11 = 0x%0x)\n", cmd->cdw11); 982 983 ctrlr->feat.volatile_write_cache.raw = cmd->cdw11; 984 ctrlr->feat.volatile_write_cache.bits.reserved = 0; 985 986 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Volatile Write Cache %s\n", 987 ctrlr->feat.volatile_write_cache.bits.wce ? "Enabled" : "Disabled"); 988 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 989 } 990 991 static int 992 spdk_nvmf_ctrlr_set_features_write_atomicity(struct spdk_nvmf_request *req) 993 { 994 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 995 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 996 997 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Write Atomicity (cdw11 = 0x%0x)\n", cmd->cdw11); 998 999 ctrlr->feat.write_atomicity.raw = cmd->cdw11; 1000 ctrlr->feat.write_atomicity.bits.reserved = 0; 1001 1002 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1003 } 1004 1005 static int 1006 spdk_nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req) 1007 { 1008 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1009 1010 SPDK_ERRLOG("Set Features - Host Identifier not allowed\n"); 1011 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1012 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1013 } 1014 1015 static int 1016 spdk_nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req) 1017 { 1018 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1019 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1020 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1021 union spdk_nvme_feat_host_identifier opts; 1022 1023 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Get Features - Host Identifier\n"); 1024 1025 opts.raw = cmd->cdw11; 1026 if (!opts.bits.exhid) { 1027 /* NVMe over Fabrics requires EXHID=1 (128-bit/16-byte host ID) */ 1028 SPDK_ERRLOG("Get Features - Host Identifier with EXHID=0 not allowed\n"); 1029 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1030 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1031 } 1032 1033 if (req->data == NULL || req->length < sizeof(ctrlr->hostid)) { 1034 SPDK_ERRLOG("Invalid data buffer for Get Features - Host Identifier\n"); 1035 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1036 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1037 } 1038 1039 spdk_uuid_copy((struct spdk_uuid *)req->data, &ctrlr->hostid); 1040 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1041 } 1042 1043 static int 1044 spdk_nvmf_ctrlr_get_features_reservation_notification_mask(struct spdk_nvmf_request *req) 1045 { 1046 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1047 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1048 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1049 struct spdk_nvmf_ns *ns; 1050 1051 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "get Features - Reservation Notificaton Mask\n"); 1052 1053 if (cmd->nsid == 0xffffffffu) { 1054 SPDK_ERRLOG("get Features - Invalid Namespace ID\n"); 1055 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1056 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1057 } 1058 1059 ns = _spdk_nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1060 if (ns == NULL) { 1061 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n"); 1062 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1063 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1064 } 1065 rsp->cdw0 = ns->mask; 1066 1067 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1068 } 1069 1070 static int 1071 spdk_nvmf_ctrlr_set_features_reservation_notification_mask(struct spdk_nvmf_request *req) 1072 { 1073 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1074 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1075 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1076 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1077 struct spdk_nvmf_ns *ns; 1078 1079 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Reservation Notificaton Mask\n"); 1080 1081 if (cmd->nsid == 0xffffffffu) { 1082 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 1083 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 1084 ns->mask = cmd->cdw11; 1085 } 1086 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1087 } 1088 1089 ns = _spdk_nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1090 if (ns == NULL) { 1091 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n"); 1092 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1093 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1094 } 1095 ns->mask = cmd->cdw11; 1096 1097 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1098 } 1099 1100 static int 1101 spdk_nvmf_ctrlr_get_features_reservation_persistence(struct spdk_nvmf_request *req) 1102 { 1103 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1104 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1105 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1106 struct spdk_nvmf_ns *ns; 1107 1108 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Get Features - Reservation Persistence\n"); 1109 1110 ns = _spdk_nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1111 /* NSID with 0xffffffffu also included */ 1112 if (ns == NULL) { 1113 SPDK_ERRLOG("Get Features - Invalid Namespace ID\n"); 1114 response->status.sct = SPDK_NVME_SCT_GENERIC; 1115 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1116 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1117 } 1118 1119 /* TODO: Persistence feature can't support for now */ 1120 response->cdw0 = 0; 1121 1122 response->status.sct = SPDK_NVME_SCT_GENERIC; 1123 response->status.sc = SPDK_NVME_SC_SUCCESS; 1124 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1125 } 1126 1127 static int 1128 spdk_nvmf_ctrlr_set_features_reservation_persistence(struct spdk_nvmf_request *req) 1129 { 1130 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1131 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1132 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1133 struct spdk_nvmf_ns *ns; 1134 1135 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Reservation Persistence\n"); 1136 1137 ns = _spdk_nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1138 if (cmd->nsid != 0xffffffffu && ns == NULL) { 1139 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n"); 1140 response->status.sct = SPDK_NVME_SCT_GENERIC; 1141 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1142 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1143 } 1144 1145 /* TODO: Feature not changeable for now */ 1146 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1147 response->status.sc = SPDK_NVME_SC_FEATURE_NOT_CHANGEABLE; 1148 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1149 } 1150 1151 static int 1152 spdk_nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req) 1153 { 1154 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1155 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1156 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1157 1158 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Keep Alive Timer (%u ms)\n", cmd->cdw11); 1159 1160 /* 1161 * if attempts to disable keep alive by setting kato to 0h 1162 * a status value of keep alive invalid shall be returned 1163 */ 1164 if (cmd->cdw11 == 0) { 1165 rsp->status.sc = SPDK_NVME_SC_KEEP_ALIVE_INVALID; 1166 } else if (cmd->cdw11 < MIN_KEEP_ALIVE_TIMEOUT_IN_MS) { 1167 ctrlr->feat.keep_alive_timer.bits.kato = MIN_KEEP_ALIVE_TIMEOUT_IN_MS; 1168 } else { 1169 /* round up to milliseconds */ 1170 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(cmd->cdw11, 1171 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) * 1172 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS; 1173 } 1174 1175 /* 1176 * if change the keep alive timeout value successfully 1177 * update the keep alive poller. 1178 */ 1179 if (cmd->cdw11 != 0) { 1180 if (ctrlr->keep_alive_poller != NULL) { 1181 spdk_poller_unregister(&ctrlr->keep_alive_poller); 1182 } 1183 ctrlr->keep_alive_poller = spdk_poller_register(spdk_nvmf_ctrlr_keep_alive_poll, ctrlr, 1184 ctrlr->feat.keep_alive_timer.bits.kato * 1000); 1185 } 1186 1187 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Keep Alive Timer set to %u ms\n", 1188 ctrlr->feat.keep_alive_timer.bits.kato); 1189 1190 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1191 } 1192 1193 static int 1194 spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req) 1195 { 1196 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1197 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1198 uint32_t count; 1199 1200 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Number of Queues, cdw11 0x%x\n", 1201 req->cmd->nvme_cmd.cdw11); 1202 1203 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 1204 /* verify that the controller is ready to process commands */ 1205 if (count > 1) { 1206 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Queue pairs already active!\n"); 1207 rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1208 } else { 1209 /* 1210 * Ignore the value requested by the host - 1211 * always return the pre-configured value based on max_qpairs_allowed. 1212 */ 1213 rsp->cdw0 = ctrlr->feat.number_of_queues.raw; 1214 } 1215 1216 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1217 } 1218 1219 static int 1220 spdk_nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req) 1221 { 1222 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1223 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1224 1225 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Async Event Configuration, cdw11 0x%08x\n", 1226 cmd->cdw11); 1227 ctrlr->feat.async_event_configuration.raw = cmd->cdw11; 1228 ctrlr->feat.async_event_configuration.bits.reserved = 0; 1229 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1230 } 1231 1232 static int 1233 spdk_nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req) 1234 { 1235 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1236 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1237 struct spdk_nvmf_subsystem_poll_group *sgroup; 1238 1239 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Async Event Request\n"); 1240 1241 /* Only one asynchronous event is supported for now */ 1242 if (ctrlr->aer_req != NULL) { 1243 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "AERL exceeded\n"); 1244 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1245 rsp->status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED; 1246 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1247 } 1248 1249 if (ctrlr->notice_event.bits.async_event_type == 1250 SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) { 1251 rsp->cdw0 = ctrlr->notice_event.raw; 1252 ctrlr->notice_event.raw = 0; 1253 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1254 } 1255 1256 if (ctrlr->reservation_event.bits.async_event_type == 1257 SPDK_NVME_ASYNC_EVENT_TYPE_IO) { 1258 rsp->cdw0 = ctrlr->reservation_event.raw; 1259 ctrlr->reservation_event.raw = 0; 1260 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1261 } 1262 1263 /* AER cmd is an exception */ 1264 sgroup = &req->qpair->group->sgroups[ctrlr->subsys->id]; 1265 sgroup->io_outstanding--; 1266 1267 ctrlr->aer_req = req; 1268 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 1269 } 1270 1271 static void 1272 spdk_nvmf_get_firmware_slot_log_page(void *buffer, uint64_t offset, uint32_t length) 1273 { 1274 struct spdk_nvme_firmware_page fw_page; 1275 size_t copy_len; 1276 1277 memset(&fw_page, 0, sizeof(fw_page)); 1278 fw_page.afi.active_slot = 1; 1279 fw_page.afi.next_reset_slot = 0; 1280 spdk_strcpy_pad(fw_page.revision[0], FW_VERSION, sizeof(fw_page.revision[0]), ' '); 1281 1282 if (offset < sizeof(fw_page)) { 1283 copy_len = spdk_min(sizeof(fw_page) - offset, length); 1284 if (copy_len > 0) { 1285 memcpy(buffer, (const char *)&fw_page + offset, copy_len); 1286 } 1287 } 1288 } 1289 1290 void 1291 spdk_nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 1292 { 1293 uint16_t max_changes = SPDK_COUNTOF(ctrlr->changed_ns_list.ns_list); 1294 uint16_t i; 1295 bool found = false; 1296 1297 for (i = 0; i < ctrlr->changed_ns_list_count; i++) { 1298 if (ctrlr->changed_ns_list.ns_list[i] == nsid) { 1299 /* nsid is already in the list */ 1300 found = true; 1301 break; 1302 } 1303 } 1304 1305 if (!found) { 1306 if (ctrlr->changed_ns_list_count == max_changes) { 1307 /* Out of space - set first entry to FFFFFFFFh and zero-fill the rest. */ 1308 ctrlr->changed_ns_list.ns_list[0] = 0xFFFFFFFFu; 1309 for (i = 1; i < max_changes; i++) { 1310 ctrlr->changed_ns_list.ns_list[i] = 0; 1311 } 1312 } else { 1313 ctrlr->changed_ns_list.ns_list[ctrlr->changed_ns_list_count++] = nsid; 1314 } 1315 } 1316 1317 spdk_nvmf_ctrlr_async_event_ns_notice(ctrlr); 1318 } 1319 1320 static void 1321 spdk_nvmf_get_changed_ns_list_log_page(struct spdk_nvmf_ctrlr *ctrlr, 1322 void *buffer, uint64_t offset, uint32_t length) 1323 { 1324 size_t copy_length; 1325 1326 if (offset < sizeof(ctrlr->changed_ns_list)) { 1327 copy_length = spdk_min(length, sizeof(ctrlr->changed_ns_list) - offset); 1328 if (copy_length) { 1329 memcpy(buffer, (char *)&ctrlr->changed_ns_list + offset, copy_length); 1330 } 1331 } 1332 1333 /* Clear log page each time it is read */ 1334 ctrlr->changed_ns_list_count = 0; 1335 memset(&ctrlr->changed_ns_list, 0, sizeof(ctrlr->changed_ns_list)); 1336 } 1337 1338 /* The structure can be modified if we provide support for other commands in future */ 1339 static const struct spdk_nvme_cmds_and_effect_log_page g_cmds_and_effect_log_page = { 1340 .admin_cmds_supported = { 1341 /* CSUPP, LBCC, NCC, NIC, CCC, CSE */ 1342 /* Get Log Page */ 1343 [SPDK_NVME_OPC_GET_LOG_PAGE] = {1, 0, 0, 0, 0, 0, 0, 0}, 1344 /* Identify */ 1345 [SPDK_NVME_OPC_IDENTIFY] = {1, 0, 0, 0, 0, 0, 0, 0}, 1346 /* Abort */ 1347 [SPDK_NVME_OPC_ABORT] = {1, 0, 0, 0, 0, 0, 0, 0}, 1348 /* Set Features */ 1349 [SPDK_NVME_OPC_SET_FEATURES] = {1, 0, 0, 0, 0, 0, 0, 0}, 1350 /* Get Features */ 1351 [SPDK_NVME_OPC_GET_FEATURES] = {1, 0, 0, 0, 0, 0, 0, 0}, 1352 /* Async Event Request */ 1353 [SPDK_NVME_OPC_ASYNC_EVENT_REQUEST] = {1, 0, 0, 0, 0, 0, 0, 0}, 1354 /* Keep Alive */ 1355 [SPDK_NVME_OPC_KEEP_ALIVE] = {1, 0, 0, 0, 0, 0, 0, 0}, 1356 }, 1357 .io_cmds_supported = { 1358 /* FLUSH */ 1359 [SPDK_NVME_OPC_FLUSH] = {1, 1, 0, 0, 0, 0, 0, 0}, 1360 /* WRITE */ 1361 [SPDK_NVME_OPC_WRITE] = {1, 1, 0, 0, 0, 0, 0, 0}, 1362 /* READ */ 1363 [SPDK_NVME_OPC_READ] = {1, 0, 0, 0, 0, 0, 0, 0}, 1364 /* WRITE ZEROES */ 1365 [SPDK_NVME_OPC_WRITE_ZEROES] = {1, 1, 0, 0, 0, 0, 0, 0}, 1366 /* DATASET MANAGEMENT */ 1367 [SPDK_NVME_OPC_DATASET_MANAGEMENT] = {1, 1, 0, 0, 0, 0, 0, 0}, 1368 }, 1369 }; 1370 1371 static void 1372 spdk_nvmf_get_cmds_and_effects_log_page(void *buffer, 1373 uint64_t offset, uint32_t length) 1374 { 1375 uint32_t page_size = sizeof(struct spdk_nvme_cmds_and_effect_log_page); 1376 size_t copy_len = 0; 1377 size_t zero_len = length; 1378 1379 if (offset < page_size) { 1380 copy_len = spdk_min(page_size - offset, length); 1381 zero_len -= copy_len; 1382 memcpy(buffer, (char *)(&g_cmds_and_effect_log_page) + offset, copy_len); 1383 } 1384 1385 if (zero_len) { 1386 memset((char *)buffer + copy_len, 0, zero_len); 1387 } 1388 } 1389 1390 static void 1391 spdk_nvmf_get_reservation_notification_log_page(struct spdk_nvmf_ctrlr *ctrlr, 1392 void *data, uint64_t offset, uint32_t length) 1393 { 1394 uint32_t unit_log_len, avail_log_len, next_pos, copy_len; 1395 struct spdk_nvmf_reservation_log *log, *log_tmp; 1396 uint8_t *buf = data; 1397 1398 unit_log_len = sizeof(struct spdk_nvme_reservation_notification_log); 1399 /* No available log, return 1 zeroed log page */ 1400 if (!ctrlr->num_avail_log_pages) { 1401 memset(buf, 0, spdk_min(length, unit_log_len)); 1402 return; 1403 } 1404 1405 avail_log_len = ctrlr->num_avail_log_pages * unit_log_len; 1406 if (offset >= avail_log_len) { 1407 return; 1408 } 1409 1410 next_pos = copy_len = 0; 1411 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) { 1412 TAILQ_REMOVE(&ctrlr->log_head, log, link); 1413 ctrlr->num_avail_log_pages--; 1414 1415 next_pos += unit_log_len; 1416 if (next_pos > offset) { 1417 copy_len = spdk_min(next_pos - offset, length); 1418 memcpy(buf, &log->log, copy_len); 1419 length -= copy_len; 1420 offset += copy_len; 1421 buf += copy_len; 1422 } 1423 free(log); 1424 1425 if (length == 0) { 1426 break; 1427 } 1428 } 1429 return; 1430 } 1431 1432 static int 1433 spdk_nvmf_ctrlr_get_log_page(struct spdk_nvmf_request *req) 1434 { 1435 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1436 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1437 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1438 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1439 uint64_t offset, len; 1440 uint32_t numdl, numdu; 1441 uint8_t lid; 1442 1443 if (req->data == NULL) { 1444 SPDK_ERRLOG("get log command with no buffer\n"); 1445 response->status.sct = SPDK_NVME_SCT_GENERIC; 1446 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1447 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1448 } 1449 1450 offset = (uint64_t)cmd->cdw12 | ((uint64_t)cmd->cdw13 << 32); 1451 if (offset & 3) { 1452 SPDK_ERRLOG("Invalid log page offset 0x%" PRIx64 "\n", offset); 1453 response->status.sct = SPDK_NVME_SCT_GENERIC; 1454 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1455 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1456 } 1457 1458 numdl = (cmd->cdw10 >> 16) & 0xFFFFu; 1459 numdu = (cmd->cdw11) & 0xFFFFu; 1460 len = ((numdu << 16) + numdl + (uint64_t)1) * 4; 1461 if (len > req->length) { 1462 SPDK_ERRLOG("Get log page: len (%" PRIu64 ") > buf size (%u)\n", 1463 len, req->length); 1464 response->status.sct = SPDK_NVME_SCT_GENERIC; 1465 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1466 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1467 } 1468 1469 lid = cmd->cdw10 & 0xFF; 1470 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Get log page: LID=0x%02X offset=0x%" PRIx64 " len=0x%" PRIx64 "\n", 1471 lid, offset, len); 1472 1473 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 1474 switch (lid) { 1475 case SPDK_NVME_LOG_DISCOVERY: 1476 spdk_nvmf_get_discovery_log_page(subsystem->tgt, req->iov, req->iovcnt, offset, len); 1477 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1478 default: 1479 goto invalid_log_page; 1480 } 1481 } else { 1482 switch (lid) { 1483 case SPDK_NVME_LOG_ERROR: 1484 case SPDK_NVME_LOG_HEALTH_INFORMATION: 1485 /* TODO: actually fill out log page data */ 1486 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1487 case SPDK_NVME_LOG_FIRMWARE_SLOT: 1488 spdk_nvmf_get_firmware_slot_log_page(req->data, offset, len); 1489 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1490 case SPDK_NVME_LOG_COMMAND_EFFECTS_LOG: 1491 spdk_nvmf_get_cmds_and_effects_log_page(req->data, offset, len); 1492 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1493 case SPDK_NVME_LOG_CHANGED_NS_LIST: 1494 spdk_nvmf_get_changed_ns_list_log_page(ctrlr, req->data, offset, len); 1495 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1496 case SPDK_NVME_LOG_RESERVATION_NOTIFICATION: 1497 spdk_nvmf_get_reservation_notification_log_page(ctrlr, req->data, offset, len); 1498 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1499 default: 1500 goto invalid_log_page; 1501 } 1502 } 1503 1504 invalid_log_page: 1505 SPDK_ERRLOG("Unsupported Get Log Page 0x%02X\n", lid); 1506 response->status.sct = SPDK_NVME_SCT_GENERIC; 1507 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1508 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1509 } 1510 1511 static int 1512 spdk_nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr *ctrlr, 1513 struct spdk_nvme_cmd *cmd, 1514 struct spdk_nvme_cpl *rsp, 1515 struct spdk_nvme_ns_data *nsdata) 1516 { 1517 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1518 struct spdk_nvmf_ns *ns; 1519 uint32_t max_num_blocks; 1520 1521 if (cmd->nsid == 0 || cmd->nsid > subsystem->max_nsid) { 1522 SPDK_ERRLOG("Identify Namespace for invalid NSID %u\n", cmd->nsid); 1523 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1524 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 1525 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1526 } 1527 1528 ns = _spdk_nvmf_subsystem_get_ns(subsystem, cmd->nsid); 1529 if (ns == NULL || ns->bdev == NULL) { 1530 /* 1531 * Inactive namespaces should return a zero filled data structure. 1532 * The data buffer is already zeroed by spdk_nvmf_ctrlr_process_admin_cmd(), 1533 * so we can just return early here. 1534 */ 1535 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Identify Namespace for inactive NSID %u\n", cmd->nsid); 1536 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1537 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 1538 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1539 } 1540 1541 spdk_nvmf_bdev_ctrlr_identify_ns(ns, nsdata); 1542 1543 /* Due to bug in the Linux kernel NVMe driver we have to set noiob no larger than mdts */ 1544 max_num_blocks = ctrlr->admin_qpair->transport->opts.max_io_size / 1545 (1U << nsdata->lbaf[nsdata->flbas.format].lbads); 1546 if (nsdata->noiob > max_num_blocks) { 1547 nsdata->noiob = max_num_blocks; 1548 } 1549 1550 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1551 } 1552 1553 static int 1554 spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata) 1555 { 1556 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1557 struct spdk_nvmf_transport *transport = ctrlr->admin_qpair->transport; 1558 1559 /* 1560 * Common fields for discovery and NVM subsystems 1561 */ 1562 spdk_strcpy_pad(cdata->fr, FW_VERSION, sizeof(cdata->fr), ' '); 1563 assert((transport->opts.max_io_size % 4096) == 0); 1564 cdata->mdts = spdk_u32log2(transport->opts.max_io_size / 4096); 1565 cdata->cntlid = ctrlr->cntlid; 1566 cdata->ver = ctrlr->vcprop.vs; 1567 cdata->lpa.edlp = 1; 1568 cdata->elpe = 127; 1569 cdata->maxcmd = transport->opts.max_queue_depth; 1570 cdata->sgls.supported = 1; 1571 cdata->sgls.keyed_sgl = 1; 1572 cdata->sgls.sgl_offset = 1; 1573 spdk_strcpy_pad(cdata->subnqn, subsystem->subnqn, sizeof(cdata->subnqn), '\0'); 1574 1575 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "ctrlr data: maxcmd 0x%x\n", cdata->maxcmd); 1576 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "sgls data: 0x%x\n", from_le32(&cdata->sgls)); 1577 1578 /* 1579 * NVM subsystem fields (reserved for discovery subsystems) 1580 */ 1581 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_NVME) { 1582 spdk_strcpy_pad(cdata->mn, spdk_nvmf_subsystem_get_mn(subsystem), sizeof(cdata->mn), ' '); 1583 spdk_strcpy_pad(cdata->sn, spdk_nvmf_subsystem_get_sn(subsystem), sizeof(cdata->sn), ' '); 1584 cdata->kas = KAS_DEFAULT_VALUE; 1585 1586 cdata->rab = 6; 1587 cdata->cmic.multi_port = 1; 1588 cdata->cmic.multi_host = 1; 1589 cdata->oaes.ns_attribute_notices = 1; 1590 cdata->ctratt.host_id_exhid_supported = 1; 1591 cdata->aerl = 0; 1592 cdata->frmw.slot1_ro = 1; 1593 cdata->frmw.num_slots = 1; 1594 1595 cdata->lpa.celp = 1; /* Command Effects log page supported */ 1596 1597 cdata->sqes.min = 6; 1598 cdata->sqes.max = 6; 1599 cdata->cqes.min = 4; 1600 cdata->cqes.max = 4; 1601 cdata->nn = subsystem->max_nsid; 1602 cdata->vwc.present = 1; 1603 cdata->vwc.flush_broadcast = SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED; 1604 1605 cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16; 1606 cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16; 1607 cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */ 1608 cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC; 1609 /* The RDMA transport supports up to SPDK_NVMF_MAX_SGL_ENTRIES descriptors. */ 1610 if (transport->ops->type == SPDK_NVME_TRANSPORT_RDMA) { 1611 cdata->nvmf_specific.msdbd = SPDK_NVMF_MAX_SGL_ENTRIES; 1612 } else { 1613 cdata->nvmf_specific.msdbd = 1; 1614 } 1615 1616 /* TODO: this should be set by the transport */ 1617 cdata->nvmf_specific.ioccsz += transport->opts.in_capsule_data_size / 16; 1618 1619 cdata->oncs.dsm = spdk_nvmf_ctrlr_dsm_supported(ctrlr); 1620 cdata->oncs.write_zeroes = spdk_nvmf_ctrlr_write_zeroes_supported(ctrlr); 1621 cdata->oncs.reservations = 1; 1622 1623 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "ext ctrlr data: ioccsz 0x%x\n", 1624 cdata->nvmf_specific.ioccsz); 1625 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "ext ctrlr data: iorcsz 0x%x\n", 1626 cdata->nvmf_specific.iorcsz); 1627 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "ext ctrlr data: icdoff 0x%x\n", 1628 cdata->nvmf_specific.icdoff); 1629 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "ext ctrlr data: ctrattr 0x%x\n", 1630 *(uint8_t *)&cdata->nvmf_specific.ctrattr); 1631 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "ext ctrlr data: msdbd 0x%x\n", 1632 cdata->nvmf_specific.msdbd); 1633 } 1634 1635 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1636 } 1637 1638 static int 1639 spdk_nvmf_ctrlr_identify_active_ns_list(struct spdk_nvmf_subsystem *subsystem, 1640 struct spdk_nvme_cmd *cmd, 1641 struct spdk_nvme_cpl *rsp, 1642 struct spdk_nvme_ns_list *ns_list) 1643 { 1644 struct spdk_nvmf_ns *ns; 1645 uint32_t count = 0; 1646 1647 if (cmd->nsid >= 0xfffffffeUL) { 1648 SPDK_ERRLOG("Identify Active Namespace List with invalid NSID %u\n", cmd->nsid); 1649 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 1650 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1651 } 1652 1653 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 1654 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 1655 if (ns->opts.nsid <= cmd->nsid) { 1656 continue; 1657 } 1658 1659 ns_list->ns_list[count++] = ns->opts.nsid; 1660 if (count == SPDK_COUNTOF(ns_list->ns_list)) { 1661 break; 1662 } 1663 } 1664 1665 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1666 } 1667 1668 static void 1669 _add_ns_id_desc(void **buf_ptr, size_t *buf_remain, 1670 enum spdk_nvme_nidt type, 1671 const void *data, size_t data_size) 1672 { 1673 struct spdk_nvme_ns_id_desc *desc; 1674 size_t desc_size = sizeof(*desc) + data_size; 1675 1676 /* 1677 * These should never fail in practice, since all valid NS ID descriptors 1678 * should be defined so that they fit in the available 4096-byte buffer. 1679 */ 1680 assert(data_size > 0); 1681 assert(data_size <= UINT8_MAX); 1682 assert(desc_size < *buf_remain); 1683 if (data_size == 0 || data_size > UINT8_MAX || desc_size > *buf_remain) { 1684 return; 1685 } 1686 1687 desc = *buf_ptr; 1688 desc->nidt = type; 1689 desc->nidl = data_size; 1690 memcpy(desc->nid, data, data_size); 1691 1692 *buf_ptr += desc_size; 1693 *buf_remain -= desc_size; 1694 } 1695 1696 static int 1697 spdk_nvmf_ctrlr_identify_ns_id_descriptor_list( 1698 struct spdk_nvmf_subsystem *subsystem, 1699 struct spdk_nvme_cmd *cmd, 1700 struct spdk_nvme_cpl *rsp, 1701 void *id_desc_list, size_t id_desc_list_size) 1702 { 1703 struct spdk_nvmf_ns *ns; 1704 size_t buf_remain = id_desc_list_size; 1705 void *buf_ptr = id_desc_list; 1706 1707 ns = _spdk_nvmf_subsystem_get_ns(subsystem, cmd->nsid); 1708 if (ns == NULL || ns->bdev == NULL) { 1709 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1710 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 1711 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1712 } 1713 1714 #define ADD_ID_DESC(type, data, size) \ 1715 do { \ 1716 if (!spdk_mem_all_zero(data, size)) { \ 1717 _add_ns_id_desc(&buf_ptr, &buf_remain, type, data, size); \ 1718 } \ 1719 } while (0) 1720 1721 ADD_ID_DESC(SPDK_NVME_NIDT_EUI64, ns->opts.eui64, sizeof(ns->opts.eui64)); 1722 ADD_ID_DESC(SPDK_NVME_NIDT_NGUID, ns->opts.nguid, sizeof(ns->opts.nguid)); 1723 ADD_ID_DESC(SPDK_NVME_NIDT_UUID, &ns->opts.uuid, sizeof(ns->opts.uuid)); 1724 1725 /* 1726 * The list is automatically 0-terminated because controller to host buffers in 1727 * admin commands always get zeroed in spdk_nvmf_ctrlr_process_admin_cmd(). 1728 */ 1729 1730 #undef ADD_ID_DESC 1731 1732 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1733 } 1734 1735 static int 1736 spdk_nvmf_ctrlr_identify(struct spdk_nvmf_request *req) 1737 { 1738 uint8_t cns; 1739 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1740 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1741 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1742 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1743 1744 if (req->data == NULL || req->length < 4096) { 1745 SPDK_ERRLOG("identify command with invalid buffer\n"); 1746 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1747 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1748 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1749 } 1750 1751 cns = cmd->cdw10 & 0xFF; 1752 1753 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY && 1754 cns != SPDK_NVME_IDENTIFY_CTRLR) { 1755 /* Discovery controllers only support Identify Controller */ 1756 goto invalid_cns; 1757 } 1758 1759 switch (cns) { 1760 case SPDK_NVME_IDENTIFY_NS: 1761 return spdk_nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, req->data); 1762 case SPDK_NVME_IDENTIFY_CTRLR: 1763 return spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, req->data); 1764 case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST: 1765 return spdk_nvmf_ctrlr_identify_active_ns_list(subsystem, cmd, rsp, req->data); 1766 case SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST: 1767 return spdk_nvmf_ctrlr_identify_ns_id_descriptor_list(subsystem, cmd, rsp, req->data, req->length); 1768 default: 1769 goto invalid_cns; 1770 } 1771 1772 invalid_cns: 1773 SPDK_ERRLOG("Identify command with unsupported CNS 0x%02x\n", cns); 1774 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1775 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1776 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1777 } 1778 1779 1780 static struct spdk_nvmf_request * 1781 spdk_nvmf_qpair_abort(struct spdk_nvmf_qpair *qpair, uint16_t cid) 1782 { 1783 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 1784 struct spdk_nvmf_request *req; 1785 1786 if (spdk_nvmf_qpair_is_admin_queue(qpair)) { 1787 if (ctrlr->aer_req && ctrlr->aer_req->cmd->nvme_cmd.cid == cid) { 1788 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Aborting AER request\n"); 1789 req = ctrlr->aer_req; 1790 ctrlr->aer_req = NULL; 1791 return req; 1792 } 1793 } 1794 1795 /* TODO: track list of outstanding requests in qpair? */ 1796 return NULL; 1797 } 1798 1799 static void 1800 spdk_nvmf_ctrlr_abort_done(struct spdk_io_channel_iter *i, int status) 1801 { 1802 struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i); 1803 1804 spdk_nvmf_request_complete(req); 1805 } 1806 1807 static void 1808 spdk_nvmf_ctrlr_abort_on_pg(struct spdk_io_channel_iter *i) 1809 { 1810 struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i); 1811 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1812 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 1813 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1814 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1815 uint16_t sqid = cmd->cdw10 & 0xFFFFu; 1816 struct spdk_nvmf_qpair *qpair; 1817 1818 TAILQ_FOREACH(qpair, &group->qpairs, link) { 1819 if (qpair->ctrlr == req->qpair->ctrlr && qpair->qid == sqid) { 1820 struct spdk_nvmf_request *req_to_abort; 1821 uint16_t cid = cmd->cdw10 >> 16; 1822 1823 /* Found the qpair */ 1824 1825 req_to_abort = spdk_nvmf_qpair_abort(qpair, cid); 1826 if (req_to_abort == NULL) { 1827 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "cid %u not found\n", cid); 1828 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1829 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1830 spdk_for_each_channel_continue(i, -EINVAL); 1831 return; 1832 } 1833 1834 /* Complete the request with aborted status */ 1835 req_to_abort->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 1836 req_to_abort->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 1837 spdk_nvmf_request_complete(req_to_abort); 1838 1839 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "abort ctrlr=%p req=%p sqid=%u cid=%u successful\n", 1840 qpair->ctrlr, req_to_abort, sqid, cid); 1841 rsp->cdw0 = 0; /* Command successfully aborted */ 1842 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1843 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 1844 /* Return -1 for the status so the iteration across threads stops. */ 1845 spdk_for_each_channel_continue(i, -1); 1846 1847 } 1848 } 1849 1850 spdk_for_each_channel_continue(i, 0); 1851 } 1852 1853 static int 1854 spdk_nvmf_ctrlr_abort(struct spdk_nvmf_request *req) 1855 { 1856 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1857 1858 rsp->cdw0 = 1; /* Command not aborted */ 1859 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1860 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 1861 1862 /* Send a message to each poll group, searching for this ctrlr, sqid, and command. */ 1863 spdk_for_each_channel(req->qpair->ctrlr->subsys->tgt, 1864 spdk_nvmf_ctrlr_abort_on_pg, 1865 req, 1866 spdk_nvmf_ctrlr_abort_done 1867 ); 1868 1869 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 1870 } 1871 1872 static int 1873 get_features_generic(struct spdk_nvmf_request *req, uint32_t cdw0) 1874 { 1875 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1876 1877 rsp->cdw0 = cdw0; 1878 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1879 } 1880 1881 static int 1882 spdk_nvmf_ctrlr_get_features(struct spdk_nvmf_request *req) 1883 { 1884 uint8_t feature; 1885 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1886 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1887 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1888 1889 feature = cmd->cdw10 & 0xff; /* mask out the FID value */ 1890 switch (feature) { 1891 case SPDK_NVME_FEAT_ARBITRATION: 1892 return get_features_generic(req, ctrlr->feat.arbitration.raw); 1893 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 1894 return get_features_generic(req, ctrlr->feat.power_management.raw); 1895 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 1896 return spdk_nvmf_ctrlr_get_features_temperature_threshold(req); 1897 case SPDK_NVME_FEAT_ERROR_RECOVERY: 1898 return get_features_generic(req, ctrlr->feat.error_recovery.raw); 1899 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 1900 return get_features_generic(req, ctrlr->feat.volatile_write_cache.raw); 1901 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 1902 return get_features_generic(req, ctrlr->feat.number_of_queues.raw); 1903 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 1904 return get_features_generic(req, ctrlr->feat.write_atomicity.raw); 1905 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 1906 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw); 1907 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 1908 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw); 1909 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 1910 return spdk_nvmf_ctrlr_get_features_host_identifier(req); 1911 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 1912 return spdk_nvmf_ctrlr_get_features_reservation_notification_mask(req); 1913 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 1914 return spdk_nvmf_ctrlr_get_features_reservation_persistence(req); 1915 default: 1916 SPDK_ERRLOG("Get Features command with unsupported feature ID 0x%02x\n", feature); 1917 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1918 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1919 } 1920 } 1921 1922 static int 1923 spdk_nvmf_ctrlr_set_features(struct spdk_nvmf_request *req) 1924 { 1925 uint8_t feature; 1926 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1927 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1928 1929 feature = cmd->cdw10 & 0xff; /* mask out the FID value */ 1930 switch (feature) { 1931 case SPDK_NVME_FEAT_ARBITRATION: 1932 return spdk_nvmf_ctrlr_set_features_arbitration(req); 1933 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 1934 return spdk_nvmf_ctrlr_set_features_power_management(req); 1935 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 1936 return spdk_nvmf_ctrlr_set_features_temperature_threshold(req); 1937 case SPDK_NVME_FEAT_ERROR_RECOVERY: 1938 return spdk_nvmf_ctrlr_set_features_error_recovery(req); 1939 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 1940 return spdk_nvmf_ctrlr_set_features_volatile_write_cache(req); 1941 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 1942 return spdk_nvmf_ctrlr_set_features_number_of_queues(req); 1943 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 1944 return spdk_nvmf_ctrlr_set_features_write_atomicity(req); 1945 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 1946 return spdk_nvmf_ctrlr_set_features_async_event_configuration(req); 1947 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 1948 return spdk_nvmf_ctrlr_set_features_keep_alive_timer(req); 1949 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 1950 return spdk_nvmf_ctrlr_set_features_host_identifier(req); 1951 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 1952 return spdk_nvmf_ctrlr_set_features_reservation_notification_mask(req); 1953 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 1954 return spdk_nvmf_ctrlr_set_features_reservation_persistence(req); 1955 default: 1956 SPDK_ERRLOG("Set Features command with unsupported feature ID 0x%02x\n", feature); 1957 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1958 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1959 } 1960 } 1961 1962 static int 1963 spdk_nvmf_ctrlr_keep_alive(struct spdk_nvmf_request *req) 1964 { 1965 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1966 1967 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Keep Alive\n"); 1968 /* 1969 * To handle keep alive just clear or reset the 1970 * ctrlr based keep alive duration counter. 1971 * When added, a separate timer based process 1972 * will monitor if the time since last recorded 1973 * keep alive has exceeded the max duration and 1974 * take appropriate action. 1975 */ 1976 ctrlr->last_keep_alive_tick = spdk_get_ticks(); 1977 1978 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1979 } 1980 1981 int 1982 spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req) 1983 { 1984 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1985 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1986 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1987 1988 if (ctrlr == NULL) { 1989 SPDK_ERRLOG("Admin command sent before CONNECT\n"); 1990 response->status.sct = SPDK_NVME_SCT_GENERIC; 1991 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1992 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1993 } 1994 1995 if (ctrlr->vcprop.cc.bits.en != 1) { 1996 SPDK_ERRLOG("Admin command sent to disabled controller\n"); 1997 response->status.sct = SPDK_NVME_SCT_GENERIC; 1998 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1999 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2000 } 2001 2002 if (req->data && spdk_nvme_opc_get_data_transfer(cmd->opc) == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 2003 memset(req->data, 0, req->length); 2004 } 2005 2006 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2007 /* Discovery controllers only support Get Log Page and Identify */ 2008 switch (cmd->opc) { 2009 case SPDK_NVME_OPC_IDENTIFY: 2010 case SPDK_NVME_OPC_GET_LOG_PAGE: 2011 break; 2012 default: 2013 goto invalid_opcode; 2014 } 2015 } 2016 2017 switch (cmd->opc) { 2018 case SPDK_NVME_OPC_GET_LOG_PAGE: 2019 return spdk_nvmf_ctrlr_get_log_page(req); 2020 case SPDK_NVME_OPC_IDENTIFY: 2021 return spdk_nvmf_ctrlr_identify(req); 2022 case SPDK_NVME_OPC_ABORT: 2023 return spdk_nvmf_ctrlr_abort(req); 2024 case SPDK_NVME_OPC_GET_FEATURES: 2025 return spdk_nvmf_ctrlr_get_features(req); 2026 case SPDK_NVME_OPC_SET_FEATURES: 2027 return spdk_nvmf_ctrlr_set_features(req); 2028 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: 2029 return spdk_nvmf_ctrlr_async_event_request(req); 2030 case SPDK_NVME_OPC_KEEP_ALIVE: 2031 return spdk_nvmf_ctrlr_keep_alive(req); 2032 2033 case SPDK_NVME_OPC_CREATE_IO_SQ: 2034 case SPDK_NVME_OPC_CREATE_IO_CQ: 2035 case SPDK_NVME_OPC_DELETE_IO_SQ: 2036 case SPDK_NVME_OPC_DELETE_IO_CQ: 2037 /* Create and Delete I/O CQ/SQ not allowed in NVMe-oF */ 2038 goto invalid_opcode; 2039 2040 default: 2041 goto invalid_opcode; 2042 } 2043 2044 invalid_opcode: 2045 SPDK_ERRLOG("Unsupported admin opcode 0x%x\n", cmd->opc); 2046 response->status.sct = SPDK_NVME_SCT_GENERIC; 2047 response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 2048 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2049 } 2050 2051 int 2052 spdk_nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req) 2053 { 2054 struct spdk_nvmf_qpair *qpair = req->qpair; 2055 struct spdk_nvmf_capsule_cmd *cap_hdr; 2056 2057 cap_hdr = &req->cmd->nvmf_cmd; 2058 2059 if (qpair->ctrlr == NULL) { 2060 /* No ctrlr established yet; the only valid command is Connect */ 2061 if (cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) { 2062 return spdk_nvmf_ctrlr_connect(req); 2063 } else { 2064 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Got fctype 0x%x, expected Connect\n", 2065 cap_hdr->fctype); 2066 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2067 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2068 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2069 } 2070 } else if (spdk_nvmf_qpair_is_admin_queue(qpair)) { 2071 /* 2072 * Controller session is established, and this is an admin queue. 2073 * Disallow Connect and allow other fabrics commands. 2074 */ 2075 switch (cap_hdr->fctype) { 2076 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET: 2077 return spdk_nvmf_property_set(req); 2078 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET: 2079 return spdk_nvmf_property_get(req); 2080 default: 2081 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "unknown fctype 0x%02x\n", 2082 cap_hdr->fctype); 2083 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2084 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 2085 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2086 } 2087 } else { 2088 /* Controller session is established, and this is an I/O queue */ 2089 /* For now, no I/O-specific Fabrics commands are implemented (other than Connect) */ 2090 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Unexpected I/O fctype 0x%x\n", cap_hdr->fctype); 2091 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2092 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 2093 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2094 } 2095 } 2096 2097 int 2098 spdk_nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr) 2099 { 2100 struct spdk_nvmf_request *req; 2101 struct spdk_nvme_cpl *rsp; 2102 union spdk_nvme_async_event_completion event = {0}; 2103 2104 /* Users may disable the event notification */ 2105 if (!ctrlr->feat.async_event_configuration.bits.ns_attr_notice) { 2106 return 0; 2107 } 2108 2109 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2110 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2111 event.bits.log_page_identifier = SPDK_NVME_LOG_CHANGED_NS_LIST; 2112 2113 /* If there is no outstanding AER request, queue the event. Then 2114 * if an AER is later submitted, this event can be sent as a 2115 * response. 2116 */ 2117 if (!ctrlr->aer_req) { 2118 if (ctrlr->notice_event.bits.async_event_type == 2119 SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) { 2120 return 0; 2121 } 2122 2123 ctrlr->notice_event.raw = event.raw; 2124 return 0; 2125 } 2126 2127 req = ctrlr->aer_req; 2128 rsp = &req->rsp->nvme_cpl; 2129 2130 rsp->cdw0 = event.raw; 2131 2132 spdk_nvmf_request_complete(req); 2133 ctrlr->aer_req = NULL; 2134 2135 return 0; 2136 } 2137 2138 void 2139 spdk_nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr) 2140 { 2141 struct spdk_nvmf_request *req; 2142 struct spdk_nvme_cpl *rsp; 2143 union spdk_nvme_async_event_completion event = {0}; 2144 2145 if (!ctrlr->num_avail_log_pages) { 2146 return; 2147 } 2148 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_IO; 2149 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL; 2150 event.bits.log_page_identifier = SPDK_NVME_LOG_RESERVATION_NOTIFICATION; 2151 2152 /* If there is no outstanding AER request, queue the event. Then 2153 * if an AER is later submitted, this event can be sent as a 2154 * response. 2155 */ 2156 if (!ctrlr->aer_req) { 2157 if (ctrlr->reservation_event.bits.async_event_type == 2158 SPDK_NVME_ASYNC_EVENT_TYPE_IO) { 2159 return; 2160 } 2161 2162 ctrlr->reservation_event.raw = event.raw; 2163 return; 2164 } 2165 2166 req = ctrlr->aer_req; 2167 rsp = &req->rsp->nvme_cpl; 2168 2169 rsp->cdw0 = event.raw; 2170 2171 spdk_nvmf_request_complete(req); 2172 ctrlr->aer_req = NULL; 2173 } 2174 2175 void 2176 spdk_nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair) 2177 { 2178 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 2179 2180 if (!spdk_nvmf_qpair_is_admin_queue(qpair)) { 2181 return; 2182 } 2183 2184 if (ctrlr->aer_req != NULL) { 2185 spdk_nvmf_request_free(ctrlr->aer_req); 2186 ctrlr->aer_req = NULL; 2187 } 2188 } 2189 2190 void 2191 spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr) 2192 { 2193 if (!ctrlr->aer_req) { 2194 return; 2195 } 2196 2197 spdk_nvmf_request_complete(ctrlr->aer_req); 2198 ctrlr->aer_req = NULL; 2199 } 2200 2201 static void 2202 _nvmf_ctrlr_add_reservation_log(void *ctx) 2203 { 2204 struct spdk_nvmf_reservation_log *log = (struct spdk_nvmf_reservation_log *)ctx; 2205 struct spdk_nvmf_ctrlr *ctrlr = log->ctrlr; 2206 2207 ctrlr->log_page_count++; 2208 2209 /* Maximum number of queued log pages is 255 */ 2210 if (ctrlr->num_avail_log_pages == 0xff) { 2211 struct spdk_nvmf_reservation_log *entry; 2212 entry = TAILQ_LAST(&ctrlr->log_head, log_page_head); 2213 entry->log.log_page_count = ctrlr->log_page_count; 2214 free(log); 2215 return; 2216 } 2217 2218 log->log.log_page_count = ctrlr->log_page_count; 2219 log->log.num_avail_log_pages = ctrlr->num_avail_log_pages++; 2220 TAILQ_INSERT_TAIL(&ctrlr->log_head, log, link); 2221 2222 spdk_nvmf_ctrlr_async_event_reservation_notification(ctrlr); 2223 } 2224 2225 void 2226 spdk_nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 2227 struct spdk_nvmf_ns *ns, 2228 enum spdk_nvme_reservation_notification_log_page_type type) 2229 { 2230 struct spdk_nvmf_reservation_log *log; 2231 2232 switch (type) { 2233 case SPDK_NVME_RESERVATION_LOG_PAGE_EMPTY: 2234 return; 2235 case SPDK_NVME_REGISTRATION_PREEMPTED: 2236 if (ns->mask & SPDK_NVME_REGISTRATION_PREEMPTED_MASK) { 2237 return; 2238 } 2239 break; 2240 case SPDK_NVME_RESERVATION_RELEASED: 2241 if (ns->mask & SPDK_NVME_RESERVATION_RELEASED_MASK) { 2242 return; 2243 } 2244 break; 2245 case SPDK_NVME_RESERVATION_PREEMPTED: 2246 if (ns->mask & SPDK_NVME_RESERVATION_PREEMPTED_MASK) { 2247 return; 2248 } 2249 break; 2250 default: 2251 return; 2252 } 2253 2254 log = calloc(1, sizeof(*log)); 2255 if (!log) { 2256 SPDK_ERRLOG("Alloc log page failed, ignore the log\n"); 2257 return; 2258 } 2259 log->ctrlr = ctrlr; 2260 log->log.type = type; 2261 log->log.nsid = ns->nsid; 2262 2263 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_reservation_log, log); 2264 } 2265 2266 /* Check from subsystem poll group's namespace information data structure */ 2267 static bool 2268 nvmf_ns_info_ctrlr_is_registrant(struct spdk_nvmf_subsystem_pg_ns_info *ns_info, 2269 struct spdk_nvmf_ctrlr *ctrlr) 2270 { 2271 uint32_t i; 2272 2273 for (i = 0; i < SPDK_NVMF_MAX_NUM_REGISTRANTS; i++) { 2274 if (!spdk_uuid_compare(&ns_info->reg_hostid[i], &ctrlr->hostid)) { 2275 return true; 2276 } 2277 } 2278 2279 return false; 2280 } 2281 2282 /* 2283 * Check the NVMe command is permitted or not for current controller(Host). 2284 */ 2285 static int 2286 nvmf_ns_reservation_request_check(struct spdk_nvmf_subsystem_pg_ns_info *ns_info, 2287 struct spdk_nvmf_ctrlr *ctrlr, 2288 struct spdk_nvmf_request *req) 2289 { 2290 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2291 enum spdk_nvme_reservation_type rtype = ns_info->rtype; 2292 uint8_t status = SPDK_NVME_SC_SUCCESS; 2293 uint8_t racqa; 2294 bool is_registrant; 2295 2296 /* No valid reservation */ 2297 if (!rtype) { 2298 return 0; 2299 } 2300 2301 is_registrant = nvmf_ns_info_ctrlr_is_registrant(ns_info, ctrlr); 2302 /* All registrants type and current ctrlr is a valid registrant */ 2303 if ((rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 2304 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && is_registrant) { 2305 return 0; 2306 } else if (!spdk_uuid_compare(&ns_info->holder_id, &ctrlr->hostid)) { 2307 return 0; 2308 } 2309 2310 /* Non-holder for current controller */ 2311 switch (cmd->opc) { 2312 case SPDK_NVME_OPC_READ: 2313 case SPDK_NVME_OPC_COMPARE: 2314 if (rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 2315 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2316 goto exit; 2317 } 2318 if ((rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY || 2319 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && !is_registrant) { 2320 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2321 goto exit; 2322 } 2323 break; 2324 case SPDK_NVME_OPC_FLUSH: 2325 case SPDK_NVME_OPC_WRITE: 2326 case SPDK_NVME_OPC_WRITE_UNCORRECTABLE: 2327 case SPDK_NVME_OPC_WRITE_ZEROES: 2328 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 2329 if (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE || 2330 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 2331 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2332 goto exit; 2333 } 2334 if (!is_registrant) { 2335 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2336 goto exit; 2337 } 2338 break; 2339 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 2340 case SPDK_NVME_OPC_RESERVATION_RELEASE: 2341 racqa = cmd->cdw10 & 0x7u; 2342 if (cmd->opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE && 2343 racqa == SPDK_NVME_RESERVE_ACQUIRE) { 2344 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2345 goto exit; 2346 } 2347 if (!is_registrant) { 2348 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2349 goto exit; 2350 } 2351 break; 2352 default: 2353 break; 2354 } 2355 2356 exit: 2357 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2358 req->rsp->nvme_cpl.status.sc = status; 2359 if (status == SPDK_NVME_SC_RESERVATION_CONFLICT) { 2360 return -EPERM; 2361 } 2362 2363 return 0; 2364 } 2365 2366 int 2367 spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req) 2368 { 2369 uint32_t nsid; 2370 struct spdk_nvmf_ns *ns; 2371 struct spdk_bdev *bdev; 2372 struct spdk_bdev_desc *desc; 2373 struct spdk_io_channel *ch; 2374 struct spdk_nvmf_poll_group *group = req->qpair->group; 2375 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2376 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2377 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2378 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 2379 2380 /* pre-set response details for this command */ 2381 response->status.sc = SPDK_NVME_SC_SUCCESS; 2382 nsid = cmd->nsid; 2383 2384 if (spdk_unlikely(ctrlr == NULL)) { 2385 SPDK_ERRLOG("I/O command sent before CONNECT\n"); 2386 response->status.sct = SPDK_NVME_SCT_GENERIC; 2387 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2388 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2389 } 2390 2391 if (spdk_unlikely(ctrlr->vcprop.cc.bits.en != 1)) { 2392 SPDK_ERRLOG("I/O command sent to disabled controller\n"); 2393 response->status.sct = SPDK_NVME_SCT_GENERIC; 2394 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2395 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2396 } 2397 2398 ns = _spdk_nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 2399 if (ns == NULL || ns->bdev == NULL) { 2400 SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd->nsid); 2401 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 2402 response->status.dnr = 1; 2403 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2404 } 2405 2406 /* scan-build falsely reporting dereference of null pointer */ 2407 assert(group != NULL && group->sgroups != NULL); 2408 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1]; 2409 if (nvmf_ns_reservation_request_check(ns_info, ctrlr, req)) { 2410 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Reservation Conflict for nsid %u, opcode %u\n", 2411 cmd->nsid, cmd->opc); 2412 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2413 } 2414 2415 bdev = ns->bdev; 2416 desc = ns->desc; 2417 ch = ns_info->channel; 2418 switch (cmd->opc) { 2419 case SPDK_NVME_OPC_READ: 2420 return spdk_nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req); 2421 case SPDK_NVME_OPC_WRITE: 2422 return spdk_nvmf_bdev_ctrlr_write_cmd(bdev, desc, ch, req); 2423 case SPDK_NVME_OPC_WRITE_ZEROES: 2424 return spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(bdev, desc, ch, req); 2425 case SPDK_NVME_OPC_FLUSH: 2426 return spdk_nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req); 2427 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 2428 return spdk_nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req); 2429 case SPDK_NVME_OPC_RESERVATION_REGISTER: 2430 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 2431 case SPDK_NVME_OPC_RESERVATION_RELEASE: 2432 case SPDK_NVME_OPC_RESERVATION_REPORT: 2433 spdk_thread_send_msg(ctrlr->subsys->thread, spdk_nvmf_ns_reservation_request, req); 2434 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2435 default: 2436 return spdk_nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req); 2437 } 2438 } 2439 2440 static void 2441 spdk_nvmf_qpair_request_cleanup(struct spdk_nvmf_qpair *qpair) 2442 { 2443 if (qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING) { 2444 assert(qpair->state_cb != NULL); 2445 2446 if (TAILQ_EMPTY(&qpair->outstanding)) { 2447 qpair->state_cb(qpair->state_cb_arg, 0); 2448 } 2449 } else { 2450 assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE); 2451 } 2452 } 2453 2454 int 2455 spdk_nvmf_request_free(struct spdk_nvmf_request *req) 2456 { 2457 struct spdk_nvmf_qpair *qpair = req->qpair; 2458 2459 TAILQ_REMOVE(&qpair->outstanding, req, link); 2460 if (spdk_nvmf_transport_req_free(req)) { 2461 SPDK_ERRLOG("Unable to free transport level request resources.\n"); 2462 } 2463 2464 spdk_nvmf_qpair_request_cleanup(qpair); 2465 2466 return 0; 2467 } 2468 2469 int 2470 spdk_nvmf_request_complete(struct spdk_nvmf_request *req) 2471 { 2472 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2473 struct spdk_nvmf_qpair *qpair; 2474 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 2475 2476 rsp->sqid = 0; 2477 rsp->status.p = 0; 2478 rsp->cid = req->cmd->nvme_cmd.cid; 2479 2480 qpair = req->qpair; 2481 if (qpair->ctrlr) { 2482 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 2483 } 2484 2485 SPDK_DEBUGLOG(SPDK_LOG_NVMF, 2486 "cpl: cid=%u cdw0=0x%08x rsvd1=%u status=0x%04x\n", 2487 rsp->cid, rsp->cdw0, rsp->rsvd1, 2488 *(uint16_t *)&rsp->status); 2489 2490 TAILQ_REMOVE(&qpair->outstanding, req, link); 2491 if (spdk_nvmf_transport_req_complete(req)) { 2492 SPDK_ERRLOG("Transport request completion error!\n"); 2493 } 2494 2495 /* AER cmd and fabric connect are exceptions */ 2496 if (sgroup != NULL && qpair->ctrlr->aer_req != req && 2497 !(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC && 2498 req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT)) { 2499 assert(sgroup->io_outstanding > 0); 2500 sgroup->io_outstanding--; 2501 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSING && 2502 sgroup->io_outstanding == 0) { 2503 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 2504 sgroup->cb_fn(sgroup->cb_arg, 0); 2505 } 2506 } 2507 2508 spdk_nvmf_qpair_request_cleanup(qpair); 2509 2510 return 0; 2511 } 2512 2513 static void 2514 nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, bool is_admin_queue) 2515 { 2516 struct spdk_nvmf_capsule_cmd *cap_hdr = &h2c_msg->nvmf_cmd; 2517 struct spdk_nvme_cmd *cmd = &h2c_msg->nvme_cmd; 2518 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1; 2519 uint8_t opc; 2520 2521 if (cmd->opc == SPDK_NVME_OPC_FABRIC) { 2522 opc = cap_hdr->fctype; 2523 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "%s Fabrics cmd: fctype 0x%02x cid %u\n", 2524 is_admin_queue ? "Admin" : "I/O", 2525 cap_hdr->fctype, cap_hdr->cid); 2526 } else { 2527 opc = cmd->opc; 2528 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "%s cmd: opc 0x%02x fuse %u cid %u nsid %u cdw10 0x%08x\n", 2529 is_admin_queue ? "Admin" : "I/O", 2530 cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10); 2531 if (cmd->mptr) { 2532 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "mptr 0x%" PRIx64 "\n", cmd->mptr); 2533 } 2534 if (cmd->psdt != SPDK_NVME_PSDT_SGL_MPTR_CONTIG && 2535 cmd->psdt != SPDK_NVME_PSDT_SGL_MPTR_SGL) { 2536 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "psdt %u\n", cmd->psdt); 2537 } 2538 } 2539 2540 if (spdk_nvme_opc_get_data_transfer(opc) != SPDK_NVME_DATA_NONE) { 2541 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) { 2542 SPDK_DEBUGLOG(SPDK_LOG_NVMF, 2543 "SGL: Keyed%s: addr 0x%" PRIx64 " key 0x%x len 0x%x\n", 2544 sgl->generic.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY ? " (Inv)" : "", 2545 sgl->address, sgl->keyed.key, sgl->keyed.length); 2546 } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) { 2547 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "SGL: Data block: %s 0x%" PRIx64 " len 0x%x\n", 2548 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET ? "offs" : "addr", 2549 sgl->address, sgl->unkeyed.length); 2550 } else { 2551 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "SGL type 0x%x subtype 0x%x\n", 2552 sgl->generic.type, sgl->generic.subtype); 2553 } 2554 } 2555 } 2556 2557 void 2558 spdk_nvmf_request_exec(struct spdk_nvmf_request *req) 2559 { 2560 struct spdk_nvmf_qpair *qpair = req->qpair; 2561 spdk_nvmf_request_exec_status status; 2562 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 2563 2564 nvmf_trace_command(req->cmd, spdk_nvmf_qpair_is_admin_queue(qpair)); 2565 2566 if (qpair->ctrlr) { 2567 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 2568 } 2569 2570 if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) { 2571 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2572 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2573 /* Place the request on the outstanding list so we can keep track of it */ 2574 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 2575 /* Still increment io_outstanding because request_complete decrements it */ 2576 if (sgroup != NULL) { 2577 sgroup->io_outstanding++; 2578 } 2579 spdk_nvmf_request_complete(req); 2580 return; 2581 } 2582 2583 /* Check if the subsystem is paused (if there is a subsystem) */ 2584 if (sgroup != NULL) { 2585 if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) { 2586 /* The subsystem is not currently active. Queue this request. */ 2587 TAILQ_INSERT_TAIL(&sgroup->queued, req, link); 2588 return; 2589 } 2590 2591 sgroup->io_outstanding++; 2592 } 2593 2594 /* Place the request on the outstanding list so we can keep track of it */ 2595 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 2596 2597 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) { 2598 status = spdk_nvmf_ctrlr_process_fabrics_cmd(req); 2599 } else if (spdk_unlikely(spdk_nvmf_qpair_is_admin_queue(qpair))) { 2600 status = spdk_nvmf_ctrlr_process_admin_cmd(req); 2601 } else { 2602 status = spdk_nvmf_ctrlr_process_io_cmd(req); 2603 } 2604 2605 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 2606 spdk_nvmf_request_complete(req); 2607 } 2608 } 2609