1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 #include "transport.h" 38 39 #include "spdk/bit_array.h" 40 #include "spdk/endian.h" 41 #include "spdk/thread.h" 42 #include "spdk/trace.h" 43 #include "spdk/nvme_spec.h" 44 #include "spdk/nvmf_cmd.h" 45 #include "spdk/string.h" 46 #include "spdk/util.h" 47 #include "spdk/version.h" 48 49 #include "spdk/log.h" 50 51 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS 10000 52 #define NVMF_DISC_KATO_IN_MS 120000 53 #define KAS_TIME_UNIT_IN_MS 100 54 #define KAS_DEFAULT_VALUE (MIN_KEEP_ALIVE_TIMEOUT_IN_MS / KAS_TIME_UNIT_IN_MS) 55 56 /* 57 * Report the SPDK version as the firmware revision. 58 * SPDK_VERSION_STRING won't fit into FR (only 8 bytes), so try to fit the most important parts. 59 */ 60 #define FW_VERSION SPDK_VERSION_MAJOR_STRING SPDK_VERSION_MINOR_STRING SPDK_VERSION_PATCH_STRING 61 62 #define ANA_TRANSITION_TIME_IN_SEC 10 63 64 /* 65 * Support for custom admin command handlers 66 */ 67 struct spdk_nvmf_custom_admin_cmd { 68 spdk_nvmf_custom_cmd_hdlr hdlr; 69 uint32_t nsid; /* nsid to forward */ 70 }; 71 72 static struct spdk_nvmf_custom_admin_cmd g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_MAX_OPC + 1]; 73 74 static void _nvmf_request_complete(void *ctx); 75 76 static inline void 77 nvmf_invalid_connect_response(struct spdk_nvmf_fabric_connect_rsp *rsp, 78 uint8_t iattr, uint16_t ipo) 79 { 80 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 81 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 82 rsp->status_code_specific.invalid.iattr = iattr; 83 rsp->status_code_specific.invalid.ipo = ipo; 84 } 85 86 #define SPDK_NVMF_INVALID_CONNECT_CMD(rsp, field) \ 87 nvmf_invalid_connect_response(rsp, 0, offsetof(struct spdk_nvmf_fabric_connect_cmd, field)) 88 #define SPDK_NVMF_INVALID_CONNECT_DATA(rsp, field) \ 89 nvmf_invalid_connect_response(rsp, 1, offsetof(struct spdk_nvmf_fabric_connect_data, field)) 90 91 static void 92 nvmf_ctrlr_stop_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr) 93 { 94 if (!ctrlr) { 95 SPDK_ERRLOG("Controller is NULL\n"); 96 return; 97 } 98 99 if (ctrlr->keep_alive_poller == NULL) { 100 return; 101 } 102 103 SPDK_DEBUGLOG(nvmf, "Stop keep alive poller\n"); 104 spdk_poller_unregister(&ctrlr->keep_alive_poller); 105 } 106 107 static void 108 nvmf_ctrlr_stop_association_timer(struct spdk_nvmf_ctrlr *ctrlr) 109 { 110 if (!ctrlr) { 111 SPDK_ERRLOG("Controller is NULL\n"); 112 assert(false); 113 return; 114 } 115 116 if (ctrlr->association_timer == NULL) { 117 return; 118 } 119 120 SPDK_DEBUGLOG(nvmf, "Stop association timer\n"); 121 spdk_poller_unregister(&ctrlr->association_timer); 122 } 123 124 static void 125 nvmf_ctrlr_disconnect_qpairs_done(struct spdk_io_channel_iter *i, int status) 126 { 127 if (status == 0) { 128 SPDK_DEBUGLOG(nvmf, "ctrlr disconnect qpairs complete successfully\n"); 129 } else { 130 SPDK_ERRLOG("Fail to disconnect ctrlr qpairs\n"); 131 } 132 } 133 134 static int 135 _nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i, bool include_admin) 136 { 137 int rc = 0; 138 struct spdk_nvmf_ctrlr *ctrlr; 139 struct spdk_nvmf_qpair *qpair, *temp_qpair; 140 struct spdk_io_channel *ch; 141 struct spdk_nvmf_poll_group *group; 142 143 ctrlr = spdk_io_channel_iter_get_ctx(i); 144 ch = spdk_io_channel_iter_get_channel(i); 145 group = spdk_io_channel_get_ctx(ch); 146 147 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, temp_qpair) { 148 if (qpair->ctrlr == ctrlr && (include_admin || !nvmf_qpair_is_admin_queue(qpair))) { 149 rc = spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 150 if (rc) { 151 SPDK_ERRLOG("Qpair disconnect failed\n"); 152 return rc; 153 } 154 } 155 } 156 157 return rc; 158 } 159 160 static void 161 nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i) 162 { 163 spdk_for_each_channel_continue(i, _nvmf_ctrlr_disconnect_qpairs_on_pg(i, true)); 164 } 165 166 static void 167 nvmf_ctrlr_disconnect_io_qpairs_on_pg(struct spdk_io_channel_iter *i) 168 { 169 spdk_for_each_channel_continue(i, _nvmf_ctrlr_disconnect_qpairs_on_pg(i, false)); 170 } 171 172 static int 173 nvmf_ctrlr_keep_alive_poll(void *ctx) 174 { 175 uint64_t keep_alive_timeout_tick; 176 uint64_t now = spdk_get_ticks(); 177 struct spdk_nvmf_ctrlr *ctrlr = ctx; 178 179 SPDK_DEBUGLOG(nvmf, "Polling ctrlr keep alive timeout\n"); 180 181 /* If the Keep alive feature is in use and the timer expires */ 182 keep_alive_timeout_tick = ctrlr->last_keep_alive_tick + 183 ctrlr->feat.keep_alive_timer.bits.kato * spdk_get_ticks_hz() / UINT64_C(1000); 184 if (now > keep_alive_timeout_tick) { 185 SPDK_NOTICELOG("Disconnecting host from subsystem %s due to keep alive timeout.\n", 186 ctrlr->subsys->subnqn); 187 /* set the Controller Fatal Status bit to '1' */ 188 if (ctrlr->vcprop.csts.bits.cfs == 0) { 189 ctrlr->vcprop.csts.bits.cfs = 1; 190 191 /* 192 * disconnect qpairs, terminate Transport connection 193 * destroy ctrlr, break the host to controller association 194 * disconnect qpairs with qpair->ctrlr == ctrlr 195 */ 196 spdk_for_each_channel(ctrlr->subsys->tgt, 197 nvmf_ctrlr_disconnect_qpairs_on_pg, 198 ctrlr, 199 nvmf_ctrlr_disconnect_qpairs_done); 200 } 201 } 202 203 return SPDK_POLLER_BUSY; 204 } 205 206 static void 207 nvmf_ctrlr_start_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr) 208 { 209 if (!ctrlr) { 210 SPDK_ERRLOG("Controller is NULL\n"); 211 return; 212 } 213 214 /* if cleared to 0 then the Keep Alive Timer is disabled */ 215 if (ctrlr->feat.keep_alive_timer.bits.kato != 0) { 216 217 ctrlr->last_keep_alive_tick = spdk_get_ticks(); 218 219 SPDK_DEBUGLOG(nvmf, "Ctrlr add keep alive poller\n"); 220 ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr, 221 ctrlr->feat.keep_alive_timer.bits.kato * 1000); 222 } 223 } 224 225 static void 226 ctrlr_add_qpair_and_update_rsp(struct spdk_nvmf_qpair *qpair, 227 struct spdk_nvmf_ctrlr *ctrlr, 228 struct spdk_nvmf_fabric_connect_rsp *rsp) 229 { 230 assert(ctrlr->admin_qpair->group->thread == spdk_get_thread()); 231 232 /* check if we would exceed ctrlr connection limit */ 233 if (qpair->qid >= spdk_bit_array_capacity(ctrlr->qpair_mask)) { 234 SPDK_ERRLOG("Requested QID %u but Max QID is %u\n", 235 qpair->qid, spdk_bit_array_capacity(ctrlr->qpair_mask) - 1); 236 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 237 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 238 return; 239 } 240 241 if (spdk_bit_array_get(ctrlr->qpair_mask, qpair->qid)) { 242 SPDK_ERRLOG("Got I/O connect with duplicate QID %u\n", qpair->qid); 243 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 244 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 245 return; 246 } 247 248 qpair->ctrlr = ctrlr; 249 spdk_bit_array_set(ctrlr->qpair_mask, qpair->qid); 250 251 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 252 rsp->status_code_specific.success.cntlid = ctrlr->cntlid; 253 SPDK_DEBUGLOG(nvmf, "connect capsule response: cntlid = 0x%04x\n", 254 rsp->status_code_specific.success.cntlid); 255 } 256 257 static void 258 _nvmf_ctrlr_add_admin_qpair(void *ctx) 259 { 260 struct spdk_nvmf_request *req = ctx; 261 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 262 struct spdk_nvmf_qpair *qpair = req->qpair; 263 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 264 265 ctrlr->admin_qpair = qpair; 266 nvmf_ctrlr_start_keep_alive_timer(ctrlr); 267 ctrlr_add_qpair_and_update_rsp(qpair, ctrlr, rsp); 268 _nvmf_request_complete(req); 269 } 270 271 static void 272 _nvmf_subsystem_add_ctrlr(void *ctx) 273 { 274 struct spdk_nvmf_request *req = ctx; 275 struct spdk_nvmf_qpair *qpair = req->qpair; 276 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 277 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 278 279 if (nvmf_subsystem_add_ctrlr(ctrlr->subsys, ctrlr)) { 280 SPDK_ERRLOG("Unable to add controller to subsystem\n"); 281 spdk_bit_array_free(&ctrlr->qpair_mask); 282 free(ctrlr); 283 qpair->ctrlr = NULL; 284 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 285 spdk_nvmf_request_complete(req); 286 return; 287 } 288 289 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_admin_qpair, req); 290 } 291 292 static void 293 nvmf_ctrlr_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem, 294 struct spdk_nvmf_ctrlr_data *cdata) 295 { 296 cdata->kas = KAS_DEFAULT_VALUE; 297 cdata->sgls.supported = 1; 298 cdata->sgls.keyed_sgl = 1; 299 cdata->sgls.sgl_offset = 1; 300 cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16; 301 cdata->nvmf_specific.ioccsz += transport->opts.in_capsule_data_size / 16; 302 cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16; 303 cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */ 304 cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC; 305 cdata->nvmf_specific.msdbd = 1; 306 307 if (transport->ops->cdata_init) { 308 transport->ops->cdata_init(transport, subsystem, cdata); 309 } 310 } 311 312 static struct spdk_nvmf_ctrlr * 313 nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem, 314 struct spdk_nvmf_request *req, 315 struct spdk_nvmf_fabric_connect_cmd *connect_cmd, 316 struct spdk_nvmf_fabric_connect_data *connect_data) 317 { 318 struct spdk_nvmf_ctrlr *ctrlr; 319 struct spdk_nvmf_transport *transport; 320 321 ctrlr = calloc(1, sizeof(*ctrlr)); 322 if (ctrlr == NULL) { 323 SPDK_ERRLOG("Memory allocation failed\n"); 324 return NULL; 325 } 326 327 TAILQ_INIT(&ctrlr->log_head); 328 ctrlr->subsys = subsystem; 329 ctrlr->thread = req->qpair->group->thread; 330 331 transport = req->qpair->transport; 332 ctrlr->qpair_mask = spdk_bit_array_create(transport->opts.max_qpairs_per_ctrlr); 333 if (!ctrlr->qpair_mask) { 334 SPDK_ERRLOG("Failed to allocate controller qpair mask\n"); 335 free(ctrlr); 336 return NULL; 337 } 338 339 nvmf_ctrlr_cdata_init(transport, subsystem, &ctrlr->cdata); 340 341 /* 342 * KAS: This field indicates the granularity of the Keep Alive Timer in 100ms units. 343 * If this field is cleared to 0h, then Keep Alive is not supported. 344 */ 345 if (ctrlr->cdata.kas) { 346 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(connect_cmd->kato, 347 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) * 348 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS; 349 } 350 351 ctrlr->feat.async_event_configuration.bits.ns_attr_notice = 1; 352 if (ctrlr->subsys->flags.ana_reporting) { 353 ctrlr->feat.async_event_configuration.bits.ana_change_notice = 1; 354 } 355 ctrlr->feat.volatile_write_cache.bits.wce = 1; 356 357 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 358 /* 359 * If keep-alive timeout is not set, discovery controllers use some 360 * arbitrary high value in order to cleanup stale discovery sessions 361 * 362 * From the 1.0a nvme-of spec: 363 * "The Keep Alive command is reserved for 364 * Discovery controllers. A transport may specify a 365 * fixed Discovery controller activity timeout value 366 * (e.g., 2 minutes). If no commands are received 367 * by a Discovery controller within that time 368 * period, the controller may perform the 369 * actions for Keep Alive Timer expiration". 370 * 371 * From the 1.1 nvme-of spec: 372 * "A host requests an explicit persistent connection 373 * to a Discovery controller and Asynchronous Event Notifications from 374 * the Discovery controller on that persistent connection by specifying 375 * a non-zero Keep Alive Timer value in the Connect command." 376 * 377 * In case non-zero KATO is used, we enable discovery_log_change_notice 378 * otherwise we disable it and use default discovery controller KATO. 379 * KATO is in millisecond. 380 */ 381 if (ctrlr->feat.keep_alive_timer.bits.kato == 0) { 382 ctrlr->feat.keep_alive_timer.bits.kato = NVMF_DISC_KATO_IN_MS; 383 ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 0; 384 } else { 385 ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 1; 386 } 387 } 388 389 /* Subtract 1 for admin queue, 1 for 0's based */ 390 ctrlr->feat.number_of_queues.bits.ncqr = transport->opts.max_qpairs_per_ctrlr - 1 - 391 1; 392 ctrlr->feat.number_of_queues.bits.nsqr = transport->opts.max_qpairs_per_ctrlr - 1 - 393 1; 394 395 spdk_uuid_copy(&ctrlr->hostid, (struct spdk_uuid *)connect_data->hostid); 396 memcpy(ctrlr->hostnqn, connect_data->hostnqn, sizeof(ctrlr->hostnqn)); 397 398 ctrlr->vcprop.cap.raw = 0; 399 ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */ 400 ctrlr->vcprop.cap.bits.mqes = transport->opts.max_queue_depth - 401 1; /* max queue depth */ 402 ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */ 403 ctrlr->vcprop.cap.bits.to = 1; /* ready timeout - 500 msec units */ 404 ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */ 405 ctrlr->vcprop.cap.bits.css = SPDK_NVME_CAP_CSS_NVM; /* NVM command set */ 406 ctrlr->vcprop.cap.bits.mpsmin = 0; /* 2 ^ (12 + mpsmin) == 4k */ 407 ctrlr->vcprop.cap.bits.mpsmax = 0; /* 2 ^ (12 + mpsmax) == 4k */ 408 409 /* Version Supported: 1.3 */ 410 ctrlr->vcprop.vs.bits.mjr = 1; 411 ctrlr->vcprop.vs.bits.mnr = 3; 412 ctrlr->vcprop.vs.bits.ter = 0; 413 414 ctrlr->vcprop.cc.raw = 0; 415 ctrlr->vcprop.cc.bits.en = 0; /* Init controller disabled */ 416 417 ctrlr->vcprop.csts.raw = 0; 418 ctrlr->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */ 419 420 SPDK_DEBUGLOG(nvmf, "cap 0x%" PRIx64 "\n", ctrlr->vcprop.cap.raw); 421 SPDK_DEBUGLOG(nvmf, "vs 0x%x\n", ctrlr->vcprop.vs.raw); 422 SPDK_DEBUGLOG(nvmf, "cc 0x%x\n", ctrlr->vcprop.cc.raw); 423 SPDK_DEBUGLOG(nvmf, "csts 0x%x\n", ctrlr->vcprop.csts.raw); 424 425 ctrlr->dif_insert_or_strip = transport->opts.dif_insert_or_strip; 426 427 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_NVME) { 428 ctrlr->listener = nvmf_subsystem_find_listener(ctrlr->subsys, 429 req->qpair->trid); 430 if (!ctrlr->listener) { 431 SPDK_ERRLOG("Listener was not found\n"); 432 free(ctrlr); 433 return NULL; 434 } 435 } 436 437 req->qpair->ctrlr = ctrlr; 438 spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_add_ctrlr, req); 439 440 return ctrlr; 441 } 442 443 static void 444 _nvmf_ctrlr_destruct(void *ctx) 445 { 446 struct spdk_nvmf_ctrlr *ctrlr = ctx; 447 struct spdk_nvmf_reservation_log *log, *log_tmp; 448 449 nvmf_ctrlr_stop_keep_alive_timer(ctrlr); 450 nvmf_ctrlr_stop_association_timer(ctrlr); 451 spdk_bit_array_free(&ctrlr->qpair_mask); 452 453 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) { 454 TAILQ_REMOVE(&ctrlr->log_head, log, link); 455 free(log); 456 } 457 free(ctrlr); 458 } 459 460 void 461 nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr) 462 { 463 nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr); 464 465 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr); 466 } 467 468 static void 469 nvmf_ctrlr_add_io_qpair(void *ctx) 470 { 471 struct spdk_nvmf_request *req = ctx; 472 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 473 struct spdk_nvmf_qpair *qpair = req->qpair; 474 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 475 476 /* Unit test will check qpair->ctrlr after calling spdk_nvmf_ctrlr_connect. 477 * For error case, the value should be NULL. So set it to NULL at first. 478 */ 479 qpair->ctrlr = NULL; 480 481 /* Make sure the controller is not being destroyed. */ 482 if (ctrlr->in_destruct) { 483 SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n"); 484 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 485 goto end; 486 } 487 488 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 489 SPDK_ERRLOG("I/O connect not allowed on discovery controller\n"); 490 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 491 goto end; 492 } 493 494 if (!ctrlr->vcprop.cc.bits.en) { 495 SPDK_ERRLOG("Got I/O connect before ctrlr was enabled\n"); 496 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 497 goto end; 498 } 499 500 if (1u << ctrlr->vcprop.cc.bits.iosqes != sizeof(struct spdk_nvme_cmd)) { 501 SPDK_ERRLOG("Got I/O connect with invalid IOSQES %u\n", 502 ctrlr->vcprop.cc.bits.iosqes); 503 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 504 goto end; 505 } 506 507 if (1u << ctrlr->vcprop.cc.bits.iocqes != sizeof(struct spdk_nvme_cpl)) { 508 SPDK_ERRLOG("Got I/O connect with invalid IOCQES %u\n", 509 ctrlr->vcprop.cc.bits.iocqes); 510 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 511 goto end; 512 } 513 514 ctrlr_add_qpair_and_update_rsp(qpair, ctrlr, rsp); 515 end: 516 spdk_nvmf_request_complete(req); 517 } 518 519 static void 520 _nvmf_ctrlr_add_io_qpair(void *ctx) 521 { 522 struct spdk_nvmf_request *req = ctx; 523 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 524 struct spdk_nvmf_fabric_connect_data *data = req->data; 525 struct spdk_nvmf_ctrlr *ctrlr; 526 struct spdk_nvmf_qpair *qpair = req->qpair; 527 struct spdk_nvmf_qpair *admin_qpair; 528 struct spdk_nvmf_tgt *tgt = qpair->transport->tgt; 529 struct spdk_nvmf_subsystem *subsystem; 530 const struct spdk_nvmf_subsystem_listener *listener; 531 532 SPDK_DEBUGLOG(nvmf, "Connect I/O Queue for controller id 0x%x\n", data->cntlid); 533 534 subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); 535 /* We already checked this in spdk_nvmf_ctrlr_connect */ 536 assert(subsystem != NULL); 537 538 ctrlr = nvmf_subsystem_get_ctrlr(subsystem, data->cntlid); 539 if (ctrlr == NULL) { 540 SPDK_ERRLOG("Unknown controller ID 0x%x\n", data->cntlid); 541 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid); 542 spdk_nvmf_request_complete(req); 543 return; 544 } 545 546 /* fail before passing a message to the controller thread. */ 547 if (ctrlr->in_destruct) { 548 SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n"); 549 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 550 spdk_nvmf_request_complete(req); 551 return; 552 } 553 554 /* If ANA reporting is enabled, check if I/O connect is on the same listener. */ 555 if (subsystem->flags.ana_reporting) { 556 listener = nvmf_subsystem_find_listener(subsystem, qpair->trid); 557 if (listener != ctrlr->listener) { 558 SPDK_ERRLOG("I/O connect is on a listener different from admin connect\n"); 559 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 560 spdk_nvmf_request_complete(req); 561 return; 562 } 563 } 564 565 admin_qpair = ctrlr->admin_qpair; 566 qpair->ctrlr = ctrlr; 567 spdk_thread_send_msg(admin_qpair->group->thread, nvmf_ctrlr_add_io_qpair, req); 568 } 569 570 static bool 571 nvmf_qpair_access_allowed(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_subsystem *subsystem, 572 const char *hostnqn) 573 { 574 struct spdk_nvme_transport_id listen_trid = {}; 575 576 if (!spdk_nvmf_subsystem_host_allowed(subsystem, hostnqn)) { 577 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s'\n", subsystem->subnqn, hostnqn); 578 return false; 579 } 580 581 if (spdk_nvmf_qpair_get_listen_trid(qpair, &listen_trid)) { 582 SPDK_ERRLOG("Subsystem '%s' is unable to enforce access control due to an internal error.\n", 583 subsystem->subnqn); 584 return false; 585 } 586 587 if (!spdk_nvmf_subsystem_listener_allowed(subsystem, &listen_trid)) { 588 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s' to connect at this address.\n", 589 subsystem->subnqn, hostnqn); 590 return false; 591 } 592 593 return true; 594 } 595 596 static int 597 _nvmf_ctrlr_connect(struct spdk_nvmf_request *req) 598 { 599 struct spdk_nvmf_fabric_connect_data *data = req->data; 600 struct spdk_nvmf_fabric_connect_cmd *cmd = &req->cmd->connect_cmd; 601 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 602 struct spdk_nvmf_qpair *qpair = req->qpair; 603 struct spdk_nvmf_transport *transport = qpair->transport; 604 struct spdk_nvmf_ctrlr *ctrlr; 605 struct spdk_nvmf_subsystem *subsystem; 606 607 SPDK_DEBUGLOG(nvmf, "recfmt 0x%x qid %u sqsize %u\n", 608 cmd->recfmt, cmd->qid, cmd->sqsize); 609 610 SPDK_DEBUGLOG(nvmf, "Connect data:\n"); 611 SPDK_DEBUGLOG(nvmf, " cntlid: 0x%04x\n", data->cntlid); 612 SPDK_DEBUGLOG(nvmf, " hostid: %08x-%04x-%04x-%02x%02x-%04x%08x ***\n", 613 ntohl(*(uint32_t *)&data->hostid[0]), 614 ntohs(*(uint16_t *)&data->hostid[4]), 615 ntohs(*(uint16_t *)&data->hostid[6]), 616 data->hostid[8], 617 data->hostid[9], 618 ntohs(*(uint16_t *)&data->hostid[10]), 619 ntohl(*(uint32_t *)&data->hostid[12])); 620 SPDK_DEBUGLOG(nvmf, " subnqn: \"%s\"\n", data->subnqn); 621 SPDK_DEBUGLOG(nvmf, " hostnqn: \"%s\"\n", data->hostnqn); 622 623 subsystem = spdk_nvmf_tgt_find_subsystem(transport->tgt, data->subnqn); 624 if (!subsystem) { 625 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 626 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 627 } 628 629 if (cmd->recfmt != 0) { 630 SPDK_ERRLOG("Connect command unsupported RECFMT %u\n", cmd->recfmt); 631 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 632 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT; 633 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 634 } 635 636 /* 637 * SQSIZE is a 0-based value, so it must be at least 1 (minimum queue depth is 2) and 638 * strictly less than max_aq_depth (admin queues) or max_queue_depth (io queues). 639 */ 640 if (cmd->sqsize == 0) { 641 SPDK_ERRLOG("Invalid SQSIZE = 0\n"); 642 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 643 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 644 } 645 646 if (cmd->qid == 0) { 647 if (cmd->sqsize >= transport->opts.max_aq_depth) { 648 SPDK_ERRLOG("Invalid SQSIZE for admin queue %u (min 1, max %u)\n", 649 cmd->sqsize, transport->opts.max_aq_depth - 1); 650 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 651 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 652 } 653 } else if (cmd->sqsize >= transport->opts.max_queue_depth) { 654 SPDK_ERRLOG("Invalid SQSIZE %u (min 1, max %u)\n", 655 cmd->sqsize, transport->opts.max_queue_depth - 1); 656 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 657 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 658 } 659 660 qpair->sq_head_max = cmd->sqsize; 661 qpair->qid = cmd->qid; 662 663 if (0 == qpair->qid) { 664 qpair->group->stat.admin_qpairs++; 665 } else { 666 qpair->group->stat.io_qpairs++; 667 } 668 669 if (cmd->qid == 0) { 670 SPDK_DEBUGLOG(nvmf, "Connect Admin Queue for controller ID 0x%x\n", data->cntlid); 671 672 if (data->cntlid != 0xFFFF) { 673 /* This NVMf target only supports dynamic mode. */ 674 SPDK_ERRLOG("The NVMf target only supports dynamic mode (CNTLID = 0x%x).\n", data->cntlid); 675 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid); 676 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 677 } 678 679 /* Establish a new ctrlr */ 680 ctrlr = nvmf_ctrlr_create(subsystem, req, cmd, data); 681 if (!ctrlr) { 682 SPDK_ERRLOG("nvmf_ctrlr_create() failed\n"); 683 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 684 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 685 } else { 686 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 687 } 688 } else { 689 spdk_thread_send_msg(subsystem->thread, _nvmf_ctrlr_add_io_qpair, req); 690 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 691 } 692 } 693 694 static inline bool 695 nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req) 696 { 697 return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC && 698 req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT; 699 } 700 701 static struct spdk_nvmf_subsystem_poll_group * 702 nvmf_subsystem_pg_from_connect_cmd(struct spdk_nvmf_request *req) 703 { 704 struct spdk_nvmf_fabric_connect_data *data; 705 struct spdk_nvmf_subsystem *subsystem; 706 struct spdk_nvmf_tgt *tgt; 707 708 assert(nvmf_request_is_fabric_connect(req)); 709 assert(req->qpair->ctrlr == NULL); 710 711 data = req->data; 712 tgt = req->qpair->transport->tgt; 713 714 subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); 715 if (subsystem == NULL) { 716 return NULL; 717 } 718 719 return &req->qpair->group->sgroups[subsystem->id]; 720 } 721 722 int 723 spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req) 724 { 725 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 726 struct spdk_nvmf_qpair *qpair = req->qpair; 727 struct spdk_nvmf_subsystem_poll_group *sgroup; 728 enum spdk_nvmf_request_exec_status status; 729 730 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 731 if (!sgroup) { 732 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 733 status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 734 goto out; 735 } 736 737 sgroup->io_outstanding++; 738 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 739 740 status = _nvmf_ctrlr_connect(req); 741 742 out: 743 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 744 _nvmf_request_complete(req); 745 } 746 747 return status; 748 } 749 750 static int 751 nvmf_ctrlr_cmd_connect(struct spdk_nvmf_request *req) 752 { 753 struct spdk_nvmf_fabric_connect_data *data = req->data; 754 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 755 struct spdk_nvmf_transport *transport = req->qpair->transport; 756 struct spdk_nvmf_subsystem *subsystem; 757 758 if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) { 759 SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length); 760 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 761 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 762 } 763 764 subsystem = spdk_nvmf_tgt_find_subsystem(transport->tgt, data->subnqn); 765 if (!subsystem) { 766 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 767 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 768 } 769 770 if ((subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE) || 771 (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSING) || 772 (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED) || 773 (subsystem->state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING)) { 774 SPDK_ERRLOG("Subsystem '%s' is not ready\n", subsystem->subnqn); 775 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 776 rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY; 777 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 778 } 779 780 /* Ensure that hostnqn is null terminated */ 781 if (!memchr(data->hostnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) { 782 SPDK_ERRLOG("Connect HOSTNQN is not null terminated\n"); 783 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, hostnqn); 784 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 785 } 786 787 if (!nvmf_qpair_access_allowed(req->qpair, subsystem, data->hostnqn)) { 788 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 789 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_HOST; 790 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 791 } 792 793 return _nvmf_ctrlr_connect(req); 794 } 795 796 static int 797 nvmf_ctrlr_association_remove(void *ctx) 798 { 799 struct spdk_nvmf_ctrlr *ctrlr = ctx; 800 int rc; 801 802 SPDK_DEBUGLOG(nvmf, "Disconnecting host from subsystem %s due to association timeout.\n", 803 ctrlr->subsys->subnqn); 804 805 rc = spdk_nvmf_qpair_disconnect(ctrlr->admin_qpair, NULL, NULL); 806 if (rc < 0) { 807 SPDK_ERRLOG("Fail to disconnect admin ctrlr qpair\n"); 808 assert(false); 809 } 810 811 nvmf_ctrlr_stop_association_timer(ctrlr); 812 return 1; 813 } 814 815 static void 816 nvmf_ctrlr_cc_shn_done(struct spdk_io_channel_iter *i, int status) 817 { 818 struct spdk_nvmf_ctrlr *ctrlr = spdk_io_channel_iter_get_ctx(i); 819 820 if (status < 0) { 821 SPDK_ERRLOG("Fail to disconnect io ctrlr qpairs\n"); 822 assert(false); 823 } 824 825 ctrlr->vcprop.csts.bits.shst = SPDK_NVME_SHST_COMPLETE; 826 827 /* After CC.EN transitions to 0 (due to shutdown or reset), the association 828 * between the host and controller shall be preserved for at least 2 minutes */ 829 if (ctrlr->association_timer) { 830 SPDK_DEBUGLOG(nvmf, "Association timer already set\n"); 831 nvmf_ctrlr_stop_association_timer(ctrlr); 832 } 833 ctrlr->association_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_association_remove, ctrlr, 834 ctrlr->admin_qpair->transport->opts.association_timeout * 1000); 835 } 836 837 static void 838 nvmf_ctrlr_cc_reset_done(struct spdk_io_channel_iter *i, int status) 839 { 840 struct spdk_nvmf_ctrlr *ctrlr = spdk_io_channel_iter_get_ctx(i); 841 842 if (status < 0) { 843 SPDK_ERRLOG("Fail to disconnect io ctrlr qpairs\n"); 844 assert(false); 845 } 846 847 /* Only a subset of the registers are cleared out on a reset */ 848 ctrlr->vcprop.cc.raw = 0; 849 ctrlr->vcprop.csts.raw = 0; 850 851 /* After CC.EN transitions to 0 (due to shutdown or reset), the association 852 * between the host and controller shall be preserved for at least 2 minutes */ 853 if (ctrlr->association_timer) { 854 SPDK_DEBUGLOG(nvmf, "Association timer already set\n"); 855 nvmf_ctrlr_stop_association_timer(ctrlr); 856 } 857 ctrlr->association_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_association_remove, ctrlr, 858 ctrlr->admin_qpair->transport->opts.association_timeout * 1000); 859 } 860 861 const struct spdk_nvmf_registers * 862 spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr) 863 { 864 return &ctrlr->vcprop; 865 } 866 867 static uint64_t 868 nvmf_prop_get_cap(struct spdk_nvmf_ctrlr *ctrlr) 869 { 870 return ctrlr->vcprop.cap.raw; 871 } 872 873 static uint64_t 874 nvmf_prop_get_vs(struct spdk_nvmf_ctrlr *ctrlr) 875 { 876 return ctrlr->vcprop.vs.raw; 877 } 878 879 static uint64_t 880 nvmf_prop_get_cc(struct spdk_nvmf_ctrlr *ctrlr) 881 { 882 return ctrlr->vcprop.cc.raw; 883 } 884 885 static bool 886 nvmf_prop_set_cc(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 887 { 888 union spdk_nvme_cc_register cc, diff; 889 890 cc.raw = value; 891 892 SPDK_DEBUGLOG(nvmf, "cur CC: 0x%08x\n", ctrlr->vcprop.cc.raw); 893 SPDK_DEBUGLOG(nvmf, "new CC: 0x%08x\n", cc.raw); 894 895 /* 896 * Calculate which bits changed between the current and new CC. 897 * Mark each bit as 0 once it is handled to determine if any unhandled bits were changed. 898 */ 899 diff.raw = cc.raw ^ ctrlr->vcprop.cc.raw; 900 901 if (diff.bits.en) { 902 if (cc.bits.en) { 903 SPDK_DEBUGLOG(nvmf, "Property Set CC Enable!\n"); 904 nvmf_ctrlr_stop_association_timer(ctrlr); 905 906 ctrlr->vcprop.cc.bits.en = 1; 907 ctrlr->vcprop.csts.bits.rdy = 1; 908 } else { 909 SPDK_DEBUGLOG(nvmf, "Property Set CC Disable!\n"); 910 ctrlr->vcprop.cc.bits.en = 0; 911 spdk_for_each_channel(ctrlr->subsys->tgt, 912 nvmf_ctrlr_disconnect_io_qpairs_on_pg, 913 ctrlr, 914 nvmf_ctrlr_cc_reset_done); 915 } 916 diff.bits.en = 0; 917 } 918 919 if (diff.bits.shn) { 920 if (cc.bits.shn == SPDK_NVME_SHN_NORMAL || 921 cc.bits.shn == SPDK_NVME_SHN_ABRUPT) { 922 SPDK_DEBUGLOG(nvmf, "Property Set CC Shutdown %u%ub!\n", 923 cc.bits.shn >> 1, cc.bits.shn & 1); 924 ctrlr->vcprop.cc.bits.shn = cc.bits.shn; 925 spdk_for_each_channel(ctrlr->subsys->tgt, 926 nvmf_ctrlr_disconnect_io_qpairs_on_pg, 927 ctrlr, 928 nvmf_ctrlr_cc_shn_done); 929 930 /* From the time a shutdown is initiated the controller shall disable 931 * Keep Alive timer */ 932 nvmf_ctrlr_stop_keep_alive_timer(ctrlr); 933 } else if (cc.bits.shn == 0) { 934 ctrlr->vcprop.cc.bits.shn = 0; 935 } else { 936 SPDK_ERRLOG("Prop Set CC: Invalid SHN value %u%ub\n", 937 cc.bits.shn >> 1, cc.bits.shn & 1); 938 return false; 939 } 940 diff.bits.shn = 0; 941 } 942 943 if (diff.bits.iosqes) { 944 SPDK_DEBUGLOG(nvmf, "Prop Set IOSQES = %u (%u bytes)\n", 945 cc.bits.iosqes, 1u << cc.bits.iosqes); 946 ctrlr->vcprop.cc.bits.iosqes = cc.bits.iosqes; 947 diff.bits.iosqes = 0; 948 } 949 950 if (diff.bits.iocqes) { 951 SPDK_DEBUGLOG(nvmf, "Prop Set IOCQES = %u (%u bytes)\n", 952 cc.bits.iocqes, 1u << cc.bits.iocqes); 953 ctrlr->vcprop.cc.bits.iocqes = cc.bits.iocqes; 954 diff.bits.iocqes = 0; 955 } 956 957 if (diff.bits.ams) { 958 SPDK_ERRLOG("Arbitration Mechanism Selected (AMS) 0x%x not supported!\n", cc.bits.ams); 959 return false; 960 } 961 962 if (diff.bits.mps) { 963 SPDK_ERRLOG("Memory Page Size (MPS) %u KiB not supported!\n", (1 << (2 + cc.bits.mps))); 964 return false; 965 } 966 967 if (diff.bits.css) { 968 SPDK_ERRLOG("I/O Command Set Selected (CSS) 0x%x not supported!\n", cc.bits.css); 969 return false; 970 } 971 972 if (diff.raw != 0) { 973 /* Print an error message, but don't fail the command in this case. 974 * If we did want to fail in this case, we'd need to ensure we acted 975 * on no other bits or the initiator gets confused. */ 976 SPDK_ERRLOG("Prop Set CC toggled reserved bits 0x%x!\n", diff.raw); 977 } 978 979 return true; 980 } 981 982 static uint64_t 983 nvmf_prop_get_csts(struct spdk_nvmf_ctrlr *ctrlr) 984 { 985 return ctrlr->vcprop.csts.raw; 986 } 987 988 static uint64_t 989 nvmf_prop_get_aqa(struct spdk_nvmf_ctrlr *ctrlr) 990 { 991 return ctrlr->vcprop.aqa.raw; 992 } 993 994 static bool 995 nvmf_prop_set_aqa(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 996 { 997 union spdk_nvme_aqa_register aqa; 998 999 aqa.raw = value; 1000 1001 if (aqa.bits.asqs < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES - 1 || 1002 aqa.bits.acqs < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES - 1 || 1003 aqa.bits.reserved1 != 0 || aqa.bits.reserved2 != 0) { 1004 return false; 1005 } 1006 1007 ctrlr->vcprop.aqa.raw = value; 1008 1009 return true; 1010 } 1011 1012 static uint64_t 1013 nvmf_prop_get_asq(struct spdk_nvmf_ctrlr *ctrlr) 1014 { 1015 return ctrlr->vcprop.asq; 1016 } 1017 1018 static bool 1019 nvmf_prop_set_asq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1020 { 1021 ctrlr->vcprop.asq = (ctrlr->vcprop.asq & (0xFFFFFFFFULL << 32ULL)) | value; 1022 1023 return true; 1024 } 1025 1026 static bool 1027 nvmf_prop_set_asq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1028 { 1029 ctrlr->vcprop.asq = (ctrlr->vcprop.asq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL); 1030 1031 return true; 1032 } 1033 1034 static uint64_t 1035 nvmf_prop_get_acq(struct spdk_nvmf_ctrlr *ctrlr) 1036 { 1037 return ctrlr->vcprop.acq; 1038 } 1039 1040 static bool 1041 nvmf_prop_set_acq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1042 { 1043 ctrlr->vcprop.acq = (ctrlr->vcprop.acq & (0xFFFFFFFFULL << 32ULL)) | value; 1044 1045 return true; 1046 } 1047 1048 static bool 1049 nvmf_prop_set_acq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1050 { 1051 ctrlr->vcprop.acq = (ctrlr->vcprop.acq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL); 1052 1053 return true; 1054 } 1055 1056 struct nvmf_prop { 1057 uint32_t ofst; 1058 uint8_t size; 1059 char name[11]; 1060 uint64_t (*get_cb)(struct spdk_nvmf_ctrlr *ctrlr); 1061 bool (*set_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value); 1062 bool (*set_upper_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value); 1063 }; 1064 1065 #define PROP(field, size, get_cb, set_cb, set_upper_cb) \ 1066 { \ 1067 offsetof(struct spdk_nvme_registers, field), \ 1068 size, \ 1069 #field, \ 1070 get_cb, set_cb, set_upper_cb \ 1071 } 1072 1073 static const struct nvmf_prop nvmf_props[] = { 1074 PROP(cap, 8, nvmf_prop_get_cap, NULL, NULL), 1075 PROP(vs, 4, nvmf_prop_get_vs, NULL, NULL), 1076 PROP(cc, 4, nvmf_prop_get_cc, nvmf_prop_set_cc, NULL), 1077 PROP(csts, 4, nvmf_prop_get_csts, NULL, NULL), 1078 PROP(aqa, 4, nvmf_prop_get_aqa, nvmf_prop_set_aqa, NULL), 1079 PROP(asq, 8, nvmf_prop_get_asq, nvmf_prop_set_asq_lower, nvmf_prop_set_asq_upper), 1080 PROP(acq, 8, nvmf_prop_get_acq, nvmf_prop_set_acq_lower, nvmf_prop_set_acq_upper), 1081 }; 1082 1083 static const struct nvmf_prop * 1084 find_prop(uint32_t ofst, uint8_t size) 1085 { 1086 size_t i; 1087 1088 for (i = 0; i < SPDK_COUNTOF(nvmf_props); i++) { 1089 const struct nvmf_prop *prop = &nvmf_props[i]; 1090 1091 if ((ofst >= prop->ofst) && (ofst + size <= prop->ofst + prop->size)) { 1092 return prop; 1093 } 1094 } 1095 1096 return NULL; 1097 } 1098 1099 static int 1100 nvmf_property_get(struct spdk_nvmf_request *req) 1101 { 1102 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1103 struct spdk_nvmf_fabric_prop_get_cmd *cmd = &req->cmd->prop_get_cmd; 1104 struct spdk_nvmf_fabric_prop_get_rsp *response = &req->rsp->prop_get_rsp; 1105 const struct nvmf_prop *prop; 1106 uint8_t size; 1107 1108 response->status.sc = 0; 1109 response->value.u64 = 0; 1110 1111 SPDK_DEBUGLOG(nvmf, "size %d, offset 0x%x\n", 1112 cmd->attrib.size, cmd->ofst); 1113 1114 switch (cmd->attrib.size) { 1115 case SPDK_NVMF_PROP_SIZE_4: 1116 size = 4; 1117 break; 1118 case SPDK_NVMF_PROP_SIZE_8: 1119 size = 8; 1120 break; 1121 default: 1122 SPDK_ERRLOG("Invalid size value %d\n", cmd->attrib.size); 1123 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1124 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1125 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1126 } 1127 1128 prop = find_prop(cmd->ofst, size); 1129 if (prop == NULL || prop->get_cb == NULL) { 1130 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1131 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1132 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1133 } 1134 1135 SPDK_DEBUGLOG(nvmf, "name: %s\n", prop->name); 1136 1137 response->value.u64 = prop->get_cb(ctrlr); 1138 1139 SPDK_DEBUGLOG(nvmf, "response value: 0x%" PRIx64 "\n", response->value.u64); 1140 1141 if (size != prop->size) { 1142 /* The size must be 4 and the prop->size is 8. Figure out which part of the property to read. */ 1143 assert(size == 4); 1144 assert(prop->size == 8); 1145 1146 if (cmd->ofst == prop->ofst) { 1147 /* Keep bottom 4 bytes only */ 1148 response->value.u64 &= 0xFFFFFFFF; 1149 } else { 1150 /* Keep top 4 bytes only */ 1151 response->value.u64 >>= 32; 1152 } 1153 } 1154 1155 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1156 } 1157 1158 static int 1159 nvmf_property_set(struct spdk_nvmf_request *req) 1160 { 1161 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1162 struct spdk_nvmf_fabric_prop_set_cmd *cmd = &req->cmd->prop_set_cmd; 1163 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1164 const struct nvmf_prop *prop; 1165 uint64_t value; 1166 uint8_t size; 1167 bool ret; 1168 1169 SPDK_DEBUGLOG(nvmf, "size %d, offset 0x%x, value 0x%" PRIx64 "\n", 1170 cmd->attrib.size, cmd->ofst, cmd->value.u64); 1171 1172 switch (cmd->attrib.size) { 1173 case SPDK_NVMF_PROP_SIZE_4: 1174 size = 4; 1175 break; 1176 case SPDK_NVMF_PROP_SIZE_8: 1177 size = 8; 1178 break; 1179 default: 1180 SPDK_ERRLOG("Invalid size value %d\n", cmd->attrib.size); 1181 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1182 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1183 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1184 } 1185 1186 prop = find_prop(cmd->ofst, size); 1187 if (prop == NULL || prop->set_cb == NULL) { 1188 SPDK_ERRLOG("Invalid offset 0x%x\n", cmd->ofst); 1189 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1190 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1191 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1192 } 1193 1194 SPDK_DEBUGLOG(nvmf, "name: %s\n", prop->name); 1195 1196 value = cmd->value.u64; 1197 1198 if (prop->size == 4) { 1199 ret = prop->set_cb(ctrlr, (uint32_t)value); 1200 } else if (size != prop->size) { 1201 /* The size must be 4 and the prop->size is 8. Figure out which part of the property to write. */ 1202 assert(size == 4); 1203 assert(prop->size == 8); 1204 1205 if (cmd->ofst == prop->ofst) { 1206 ret = prop->set_cb(ctrlr, (uint32_t)value); 1207 } else { 1208 ret = prop->set_upper_cb(ctrlr, (uint32_t)value); 1209 } 1210 } else { 1211 ret = prop->set_cb(ctrlr, (uint32_t)value); 1212 if (ret) { 1213 ret = prop->set_upper_cb(ctrlr, (uint32_t)(value >> 32)); 1214 } 1215 } 1216 1217 if (!ret) { 1218 SPDK_ERRLOG("prop set_cb failed\n"); 1219 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1220 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1221 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1222 } 1223 1224 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1225 } 1226 1227 static int 1228 nvmf_ctrlr_set_features_arbitration(struct spdk_nvmf_request *req) 1229 { 1230 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1231 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1232 1233 SPDK_DEBUGLOG(nvmf, "Set Features - Arbitration (cdw11 = 0x%0x)\n", cmd->cdw11); 1234 1235 ctrlr->feat.arbitration.raw = cmd->cdw11; 1236 ctrlr->feat.arbitration.bits.reserved = 0; 1237 1238 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1239 } 1240 1241 static int 1242 nvmf_ctrlr_set_features_power_management(struct spdk_nvmf_request *req) 1243 { 1244 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1245 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1246 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1247 1248 SPDK_DEBUGLOG(nvmf, "Set Features - Power Management (cdw11 = 0x%0x)\n", cmd->cdw11); 1249 1250 /* Only PS = 0 is allowed, since we report NPSS = 0 */ 1251 if (cmd->cdw11_bits.feat_power_management.bits.ps != 0) { 1252 SPDK_ERRLOG("Invalid power state %u\n", cmd->cdw11_bits.feat_power_management.bits.ps); 1253 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1254 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1255 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1256 } 1257 1258 ctrlr->feat.power_management.raw = cmd->cdw11; 1259 ctrlr->feat.power_management.bits.reserved = 0; 1260 1261 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1262 } 1263 1264 static bool 1265 temp_threshold_opts_valid(const union spdk_nvme_feat_temperature_threshold *opts) 1266 { 1267 /* 1268 * Valid TMPSEL values: 1269 * 0000b - 1000b: temperature sensors 1270 * 1111b: set all implemented temperature sensors 1271 */ 1272 if (opts->bits.tmpsel >= 9 && opts->bits.tmpsel != 15) { 1273 /* 1001b - 1110b: reserved */ 1274 SPDK_ERRLOG("Invalid TMPSEL %u\n", opts->bits.tmpsel); 1275 return false; 1276 } 1277 1278 /* 1279 * Valid THSEL values: 1280 * 00b: over temperature threshold 1281 * 01b: under temperature threshold 1282 */ 1283 if (opts->bits.thsel > 1) { 1284 /* 10b - 11b: reserved */ 1285 SPDK_ERRLOG("Invalid THSEL %u\n", opts->bits.thsel); 1286 return false; 1287 } 1288 1289 return true; 1290 } 1291 1292 static int 1293 nvmf_ctrlr_set_features_temperature_threshold(struct spdk_nvmf_request *req) 1294 { 1295 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1296 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1297 1298 SPDK_DEBUGLOG(nvmf, "Set Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11); 1299 1300 if (!temp_threshold_opts_valid(&cmd->cdw11_bits.feat_temp_threshold)) { 1301 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1302 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1303 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1304 } 1305 1306 /* TODO: no sensors implemented - ignore new values */ 1307 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1308 } 1309 1310 static int 1311 nvmf_ctrlr_get_features_temperature_threshold(struct spdk_nvmf_request *req) 1312 { 1313 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1314 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1315 1316 SPDK_DEBUGLOG(nvmf, "Get Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11); 1317 1318 if (!temp_threshold_opts_valid(&cmd->cdw11_bits.feat_temp_threshold)) { 1319 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1320 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1321 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1322 } 1323 1324 /* TODO: no sensors implemented - return 0 for all thresholds */ 1325 rsp->cdw0 = 0; 1326 1327 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1328 } 1329 1330 static int 1331 nvmf_ctrlr_set_features_error_recovery(struct spdk_nvmf_request *req) 1332 { 1333 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1334 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1335 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1336 1337 SPDK_DEBUGLOG(nvmf, "Set Features - Error Recovery (cdw11 = 0x%0x)\n", cmd->cdw11); 1338 1339 if (cmd->cdw11_bits.feat_error_recovery.bits.dulbe) { 1340 /* 1341 * Host is not allowed to set this bit, since we don't advertise it in 1342 * Identify Namespace. 1343 */ 1344 SPDK_ERRLOG("Host set unsupported DULBE bit\n"); 1345 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1346 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1347 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1348 } 1349 1350 ctrlr->feat.error_recovery.raw = cmd->cdw11; 1351 ctrlr->feat.error_recovery.bits.reserved = 0; 1352 1353 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1354 } 1355 1356 static int 1357 nvmf_ctrlr_set_features_volatile_write_cache(struct spdk_nvmf_request *req) 1358 { 1359 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1360 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1361 1362 SPDK_DEBUGLOG(nvmf, "Set Features - Volatile Write Cache (cdw11 = 0x%0x)\n", cmd->cdw11); 1363 1364 ctrlr->feat.volatile_write_cache.raw = cmd->cdw11; 1365 ctrlr->feat.volatile_write_cache.bits.reserved = 0; 1366 1367 SPDK_DEBUGLOG(nvmf, "Set Features - Volatile Write Cache %s\n", 1368 ctrlr->feat.volatile_write_cache.bits.wce ? "Enabled" : "Disabled"); 1369 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1370 } 1371 1372 static int 1373 nvmf_ctrlr_set_features_write_atomicity(struct spdk_nvmf_request *req) 1374 { 1375 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1376 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1377 1378 SPDK_DEBUGLOG(nvmf, "Set Features - Write Atomicity (cdw11 = 0x%0x)\n", cmd->cdw11); 1379 1380 ctrlr->feat.write_atomicity.raw = cmd->cdw11; 1381 ctrlr->feat.write_atomicity.bits.reserved = 0; 1382 1383 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1384 } 1385 1386 static int 1387 nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req) 1388 { 1389 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1390 1391 SPDK_ERRLOG("Set Features - Host Identifier not allowed\n"); 1392 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1393 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1394 } 1395 1396 static int 1397 nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req) 1398 { 1399 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1400 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1401 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1402 1403 SPDK_DEBUGLOG(nvmf, "Get Features - Host Identifier\n"); 1404 1405 if (!cmd->cdw11_bits.feat_host_identifier.bits.exhid) { 1406 /* NVMe over Fabrics requires EXHID=1 (128-bit/16-byte host ID) */ 1407 SPDK_ERRLOG("Get Features - Host Identifier with EXHID=0 not allowed\n"); 1408 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1409 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1410 } 1411 1412 if (req->data == NULL || req->length < sizeof(ctrlr->hostid)) { 1413 SPDK_ERRLOG("Invalid data buffer for Get Features - Host Identifier\n"); 1414 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1415 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1416 } 1417 1418 spdk_uuid_copy((struct spdk_uuid *)req->data, &ctrlr->hostid); 1419 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1420 } 1421 1422 static int 1423 nvmf_ctrlr_get_features_reservation_notification_mask(struct spdk_nvmf_request *req) 1424 { 1425 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1426 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1427 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1428 struct spdk_nvmf_ns *ns; 1429 1430 SPDK_DEBUGLOG(nvmf, "get Features - Reservation Notificaton Mask\n"); 1431 1432 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1433 SPDK_ERRLOG("get Features - Invalid Namespace ID\n"); 1434 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1435 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1436 } 1437 1438 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1439 if (ns == NULL) { 1440 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n"); 1441 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1442 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1443 } 1444 rsp->cdw0 = ns->mask; 1445 1446 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1447 } 1448 1449 static int 1450 nvmf_ctrlr_set_features_reservation_notification_mask(struct spdk_nvmf_request *req) 1451 { 1452 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1453 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1454 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1455 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1456 struct spdk_nvmf_ns *ns; 1457 1458 SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Notificaton Mask\n"); 1459 1460 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1461 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 1462 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 1463 ns->mask = cmd->cdw11; 1464 } 1465 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1466 } 1467 1468 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1469 if (ns == NULL) { 1470 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n"); 1471 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1472 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1473 } 1474 ns->mask = cmd->cdw11; 1475 1476 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1477 } 1478 1479 static int 1480 nvmf_ctrlr_get_features_reservation_persistence(struct spdk_nvmf_request *req) 1481 { 1482 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1483 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1484 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1485 struct spdk_nvmf_ns *ns; 1486 1487 SPDK_DEBUGLOG(nvmf, "Get Features - Reservation Persistence\n"); 1488 1489 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1490 /* NSID with SPDK_NVME_GLOBAL_NS_TAG (=0xffffffff) also included */ 1491 if (ns == NULL) { 1492 SPDK_ERRLOG("Get Features - Invalid Namespace ID\n"); 1493 response->status.sct = SPDK_NVME_SCT_GENERIC; 1494 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1495 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1496 } 1497 1498 response->cdw0 = ns->ptpl_activated; 1499 1500 response->status.sct = SPDK_NVME_SCT_GENERIC; 1501 response->status.sc = SPDK_NVME_SC_SUCCESS; 1502 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1503 } 1504 1505 static int 1506 nvmf_ctrlr_set_features_reservation_persistence(struct spdk_nvmf_request *req) 1507 { 1508 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1509 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1510 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1511 struct spdk_nvmf_ns *ns; 1512 bool ptpl; 1513 1514 SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Persistence\n"); 1515 1516 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1517 ptpl = cmd->cdw11_bits.feat_rsv_persistence.bits.ptpl; 1518 1519 if (cmd->nsid != SPDK_NVME_GLOBAL_NS_TAG && ns && ns->ptpl_file) { 1520 ns->ptpl_activated = ptpl; 1521 } else if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1522 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns && ns->ptpl_file; 1523 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 1524 ns->ptpl_activated = ptpl; 1525 } 1526 } else { 1527 SPDK_ERRLOG("Set Features - Invalid Namespace ID or Reservation Configuration\n"); 1528 response->status.sct = SPDK_NVME_SCT_GENERIC; 1529 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1530 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1531 } 1532 1533 /* TODO: Feature not changeable for now */ 1534 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1535 response->status.sc = SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE; 1536 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1537 } 1538 1539 static int 1540 nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req) 1541 { 1542 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1543 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1544 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1545 1546 SPDK_DEBUGLOG(nvmf, "Set Features - Keep Alive Timer (%u ms)\n", cmd->cdw11); 1547 1548 /* 1549 * if attempts to disable keep alive by setting kato to 0h 1550 * a status value of keep alive invalid shall be returned 1551 */ 1552 if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato == 0) { 1553 rsp->status.sc = SPDK_NVME_SC_KEEP_ALIVE_INVALID; 1554 } else if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato < MIN_KEEP_ALIVE_TIMEOUT_IN_MS) { 1555 ctrlr->feat.keep_alive_timer.bits.kato = MIN_KEEP_ALIVE_TIMEOUT_IN_MS; 1556 } else { 1557 /* round up to milliseconds */ 1558 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up( 1559 cmd->cdw11_bits.feat_keep_alive_timer.bits.kato, 1560 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) * 1561 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS; 1562 } 1563 1564 /* 1565 * if change the keep alive timeout value successfully 1566 * update the keep alive poller. 1567 */ 1568 if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato != 0) { 1569 if (ctrlr->keep_alive_poller != NULL) { 1570 spdk_poller_unregister(&ctrlr->keep_alive_poller); 1571 } 1572 ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr, 1573 ctrlr->feat.keep_alive_timer.bits.kato * 1000); 1574 } 1575 1576 SPDK_DEBUGLOG(nvmf, "Set Features - Keep Alive Timer set to %u ms\n", 1577 ctrlr->feat.keep_alive_timer.bits.kato); 1578 1579 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1580 } 1581 1582 static int 1583 nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req) 1584 { 1585 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1586 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1587 uint32_t count; 1588 1589 SPDK_DEBUGLOG(nvmf, "Set Features - Number of Queues, cdw11 0x%x\n", 1590 req->cmd->nvme_cmd.cdw11); 1591 1592 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 1593 /* verify that the controller is ready to process commands */ 1594 if (count > 1) { 1595 SPDK_DEBUGLOG(nvmf, "Queue pairs already active!\n"); 1596 rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1597 } else { 1598 /* 1599 * Ignore the value requested by the host - 1600 * always return the pre-configured value based on max_qpairs_allowed. 1601 */ 1602 rsp->cdw0 = ctrlr->feat.number_of_queues.raw; 1603 } 1604 1605 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1606 } 1607 1608 static int 1609 nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req) 1610 { 1611 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1612 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1613 1614 SPDK_DEBUGLOG(nvmf, "Set Features - Async Event Configuration, cdw11 0x%08x\n", 1615 cmd->cdw11); 1616 ctrlr->feat.async_event_configuration.raw = cmd->cdw11; 1617 ctrlr->feat.async_event_configuration.bits.reserved = 0; 1618 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1619 } 1620 1621 static int 1622 nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req) 1623 { 1624 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1625 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1626 struct spdk_nvmf_subsystem_poll_group *sgroup; 1627 1628 SPDK_DEBUGLOG(nvmf, "Async Event Request\n"); 1629 1630 /* AER cmd is an exception */ 1631 sgroup = &req->qpair->group->sgroups[ctrlr->subsys->id]; 1632 assert(sgroup != NULL); 1633 sgroup->io_outstanding--; 1634 1635 /* Four asynchronous events are supported for now */ 1636 if (ctrlr->nr_aer_reqs >= NVMF_MAX_ASYNC_EVENTS) { 1637 SPDK_DEBUGLOG(nvmf, "AERL exceeded\n"); 1638 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1639 rsp->status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED; 1640 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1641 } 1642 1643 if (ctrlr->notice_event.bits.async_event_type == 1644 SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) { 1645 rsp->cdw0 = ctrlr->notice_event.raw; 1646 ctrlr->notice_event.raw = 0; 1647 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1648 } 1649 1650 if (ctrlr->reservation_event.bits.async_event_type == 1651 SPDK_NVME_ASYNC_EVENT_TYPE_IO) { 1652 rsp->cdw0 = ctrlr->reservation_event.raw; 1653 ctrlr->reservation_event.raw = 0; 1654 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1655 } 1656 1657 ctrlr->aer_req[ctrlr->nr_aer_reqs++] = req; 1658 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 1659 } 1660 1661 static void 1662 nvmf_get_firmware_slot_log_page(void *buffer, uint64_t offset, uint32_t length) 1663 { 1664 struct spdk_nvme_firmware_page fw_page; 1665 size_t copy_len; 1666 1667 memset(&fw_page, 0, sizeof(fw_page)); 1668 fw_page.afi.active_slot = 1; 1669 fw_page.afi.next_reset_slot = 0; 1670 spdk_strcpy_pad(fw_page.revision[0], FW_VERSION, sizeof(fw_page.revision[0]), ' '); 1671 1672 if (offset < sizeof(fw_page)) { 1673 copy_len = spdk_min(sizeof(fw_page) - offset, length); 1674 if (copy_len > 0) { 1675 memcpy(buffer, (const char *)&fw_page + offset, copy_len); 1676 } 1677 } 1678 } 1679 1680 #define SPDK_NVMF_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 1681 sizeof(uint32_t)) 1682 static void 1683 nvmf_get_ana_log_page(struct spdk_nvmf_ctrlr *ctrlr, void *data, 1684 uint64_t offset, uint32_t length) 1685 { 1686 char *buf = data; 1687 struct spdk_nvme_ana_page ana_hdr; 1688 char _ana_desc[SPDK_NVMF_ANA_DESC_SIZE]; 1689 struct spdk_nvme_ana_group_descriptor *ana_desc; 1690 size_t copy_len; 1691 uint32_t num_ns = 0; 1692 struct spdk_nvmf_ns *ns; 1693 1694 if (length == 0) { 1695 return; 1696 } 1697 1698 if (offset >= sizeof(ana_hdr)) { 1699 offset -= sizeof(ana_hdr); 1700 } else { 1701 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL; 1702 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 1703 num_ns++; 1704 } 1705 1706 memset(&ana_hdr, 0, sizeof(ana_hdr)); 1707 1708 ana_hdr.num_ana_group_desc = num_ns; 1709 /* TODO: Support Change Count. */ 1710 ana_hdr.change_count = 0; 1711 1712 copy_len = spdk_min(sizeof(ana_hdr) - offset, length); 1713 memcpy(buf, (const char *)&ana_hdr + offset, copy_len); 1714 length -= copy_len; 1715 buf += copy_len; 1716 offset = 0; 1717 } 1718 1719 if (length == 0) { 1720 return; 1721 } 1722 1723 ana_desc = (void *)_ana_desc; 1724 1725 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL; 1726 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 1727 if (offset >= SPDK_NVMF_ANA_DESC_SIZE) { 1728 offset -= SPDK_NVMF_ANA_DESC_SIZE; 1729 continue; 1730 } 1731 1732 memset(ana_desc, 0, SPDK_NVMF_ANA_DESC_SIZE); 1733 1734 ana_desc->ana_group_id = ns->nsid; 1735 ana_desc->num_of_nsid = 1; 1736 ana_desc->ana_state = ctrlr->listener->ana_state; 1737 ana_desc->nsid[0] = ns->nsid; 1738 /* TODO: Support Change Count. */ 1739 ana_desc->change_count = 0; 1740 1741 copy_len = spdk_min(SPDK_NVMF_ANA_DESC_SIZE - offset, length); 1742 memcpy(buf, (const char *)ana_desc + offset, copy_len); 1743 length -= copy_len; 1744 buf += copy_len; 1745 offset = 0; 1746 1747 if (length == 0) { 1748 return; 1749 } 1750 } 1751 } 1752 1753 void 1754 nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 1755 { 1756 uint16_t max_changes = SPDK_COUNTOF(ctrlr->changed_ns_list.ns_list); 1757 uint16_t i; 1758 bool found = false; 1759 1760 for (i = 0; i < ctrlr->changed_ns_list_count; i++) { 1761 if (ctrlr->changed_ns_list.ns_list[i] == nsid) { 1762 /* nsid is already in the list */ 1763 found = true; 1764 break; 1765 } 1766 } 1767 1768 if (!found) { 1769 if (ctrlr->changed_ns_list_count == max_changes) { 1770 /* Out of space - set first entry to FFFFFFFFh and zero-fill the rest. */ 1771 ctrlr->changed_ns_list.ns_list[0] = 0xFFFFFFFFu; 1772 for (i = 1; i < max_changes; i++) { 1773 ctrlr->changed_ns_list.ns_list[i] = 0; 1774 } 1775 } else { 1776 ctrlr->changed_ns_list.ns_list[ctrlr->changed_ns_list_count++] = nsid; 1777 } 1778 } 1779 } 1780 1781 static void 1782 nvmf_get_changed_ns_list_log_page(struct spdk_nvmf_ctrlr *ctrlr, 1783 void *buffer, uint64_t offset, uint32_t length) 1784 { 1785 size_t copy_length; 1786 1787 if (offset < sizeof(ctrlr->changed_ns_list)) { 1788 copy_length = spdk_min(length, sizeof(ctrlr->changed_ns_list) - offset); 1789 if (copy_length) { 1790 memcpy(buffer, (char *)&ctrlr->changed_ns_list + offset, copy_length); 1791 } 1792 } 1793 1794 /* Clear log page each time it is read */ 1795 ctrlr->changed_ns_list_count = 0; 1796 memset(&ctrlr->changed_ns_list, 0, sizeof(ctrlr->changed_ns_list)); 1797 } 1798 1799 /* The structure can be modified if we provide support for other commands in future */ 1800 static const struct spdk_nvme_cmds_and_effect_log_page g_cmds_and_effect_log_page = { 1801 .admin_cmds_supported = { 1802 /* CSUPP, LBCC, NCC, NIC, CCC, CSE */ 1803 /* Get Log Page */ 1804 [SPDK_NVME_OPC_GET_LOG_PAGE] = {1, 0, 0, 0, 0, 0, 0, 0}, 1805 /* Identify */ 1806 [SPDK_NVME_OPC_IDENTIFY] = {1, 0, 0, 0, 0, 0, 0, 0}, 1807 /* Abort */ 1808 [SPDK_NVME_OPC_ABORT] = {1, 0, 0, 0, 0, 0, 0, 0}, 1809 /* Set Features */ 1810 [SPDK_NVME_OPC_SET_FEATURES] = {1, 0, 0, 0, 0, 0, 0, 0}, 1811 /* Get Features */ 1812 [SPDK_NVME_OPC_GET_FEATURES] = {1, 0, 0, 0, 0, 0, 0, 0}, 1813 /* Async Event Request */ 1814 [SPDK_NVME_OPC_ASYNC_EVENT_REQUEST] = {1, 0, 0, 0, 0, 0, 0, 0}, 1815 /* Keep Alive */ 1816 [SPDK_NVME_OPC_KEEP_ALIVE] = {1, 0, 0, 0, 0, 0, 0, 0}, 1817 }, 1818 .io_cmds_supported = { 1819 /* FLUSH */ 1820 [SPDK_NVME_OPC_FLUSH] = {1, 1, 0, 0, 0, 0, 0, 0}, 1821 /* WRITE */ 1822 [SPDK_NVME_OPC_WRITE] = {1, 1, 0, 0, 0, 0, 0, 0}, 1823 /* READ */ 1824 [SPDK_NVME_OPC_READ] = {1, 0, 0, 0, 0, 0, 0, 0}, 1825 /* WRITE ZEROES */ 1826 [SPDK_NVME_OPC_WRITE_ZEROES] = {1, 1, 0, 0, 0, 0, 0, 0}, 1827 /* DATASET MANAGEMENT */ 1828 [SPDK_NVME_OPC_DATASET_MANAGEMENT] = {1, 1, 0, 0, 0, 0, 0, 0}, 1829 /* COMPARE */ 1830 [SPDK_NVME_OPC_COMPARE] = {1, 0, 0, 0, 0, 0, 0, 0}, 1831 }, 1832 }; 1833 1834 static void 1835 nvmf_get_cmds_and_effects_log_page(void *buffer, 1836 uint64_t offset, uint32_t length) 1837 { 1838 uint32_t page_size = sizeof(struct spdk_nvme_cmds_and_effect_log_page); 1839 size_t copy_len = 0; 1840 size_t zero_len = length; 1841 1842 if (offset < page_size) { 1843 copy_len = spdk_min(page_size - offset, length); 1844 zero_len -= copy_len; 1845 memcpy(buffer, (char *)(&g_cmds_and_effect_log_page) + offset, copy_len); 1846 } 1847 1848 if (zero_len) { 1849 memset((char *)buffer + copy_len, 0, zero_len); 1850 } 1851 } 1852 1853 static void 1854 nvmf_get_reservation_notification_log_page(struct spdk_nvmf_ctrlr *ctrlr, 1855 void *data, uint64_t offset, uint32_t length) 1856 { 1857 uint32_t unit_log_len, avail_log_len, next_pos, copy_len; 1858 struct spdk_nvmf_reservation_log *log, *log_tmp; 1859 uint8_t *buf = data; 1860 1861 unit_log_len = sizeof(struct spdk_nvme_reservation_notification_log); 1862 /* No available log, return 1 zeroed log page */ 1863 if (!ctrlr->num_avail_log_pages) { 1864 memset(buf, 0, spdk_min(length, unit_log_len)); 1865 return; 1866 } 1867 1868 avail_log_len = ctrlr->num_avail_log_pages * unit_log_len; 1869 if (offset >= avail_log_len) { 1870 return; 1871 } 1872 1873 next_pos = 0; 1874 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) { 1875 TAILQ_REMOVE(&ctrlr->log_head, log, link); 1876 ctrlr->num_avail_log_pages--; 1877 1878 next_pos += unit_log_len; 1879 if (next_pos > offset) { 1880 copy_len = spdk_min(next_pos - offset, length); 1881 memcpy(buf, &log->log, copy_len); 1882 length -= copy_len; 1883 offset += copy_len; 1884 buf += copy_len; 1885 } 1886 free(log); 1887 1888 if (length == 0) { 1889 break; 1890 } 1891 } 1892 return; 1893 } 1894 1895 static int 1896 nvmf_ctrlr_get_log_page(struct spdk_nvmf_request *req) 1897 { 1898 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1899 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1900 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1901 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1902 uint64_t offset, len; 1903 uint32_t numdl, numdu; 1904 uint8_t lid; 1905 1906 if (req->data == NULL) { 1907 SPDK_ERRLOG("get log command with no buffer\n"); 1908 response->status.sct = SPDK_NVME_SCT_GENERIC; 1909 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1910 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1911 } 1912 1913 offset = (uint64_t)cmd->cdw12 | ((uint64_t)cmd->cdw13 << 32); 1914 if (offset & 3) { 1915 SPDK_ERRLOG("Invalid log page offset 0x%" PRIx64 "\n", offset); 1916 response->status.sct = SPDK_NVME_SCT_GENERIC; 1917 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1918 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1919 } 1920 1921 numdl = cmd->cdw10_bits.get_log_page.numdl; 1922 numdu = cmd->cdw11_bits.get_log_page.numdu; 1923 len = ((numdu << 16) + numdl + (uint64_t)1) * 4; 1924 if (len > req->length) { 1925 SPDK_ERRLOG("Get log page: len (%" PRIu64 ") > buf size (%u)\n", 1926 len, req->length); 1927 response->status.sct = SPDK_NVME_SCT_GENERIC; 1928 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1929 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1930 } 1931 1932 lid = cmd->cdw10_bits.get_log_page.lid; 1933 SPDK_DEBUGLOG(nvmf, "Get log page: LID=0x%02X offset=0x%" PRIx64 " len=0x%" PRIx64 "\n", 1934 lid, offset, len); 1935 1936 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 1937 switch (lid) { 1938 case SPDK_NVME_LOG_DISCOVERY: 1939 nvmf_get_discovery_log_page(subsystem->tgt, ctrlr->hostnqn, req->iov, req->iovcnt, offset, 1940 len); 1941 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1942 default: 1943 goto invalid_log_page; 1944 } 1945 } else { 1946 switch (lid) { 1947 case SPDK_NVME_LOG_ERROR: 1948 case SPDK_NVME_LOG_HEALTH_INFORMATION: 1949 /* TODO: actually fill out log page data */ 1950 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1951 case SPDK_NVME_LOG_FIRMWARE_SLOT: 1952 nvmf_get_firmware_slot_log_page(req->data, offset, len); 1953 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1954 case SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS: 1955 if (subsystem->flags.ana_reporting) { 1956 nvmf_get_ana_log_page(ctrlr, req->data, offset, len); 1957 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1958 } else { 1959 goto invalid_log_page; 1960 } 1961 case SPDK_NVME_LOG_COMMAND_EFFECTS_LOG: 1962 nvmf_get_cmds_and_effects_log_page(req->data, offset, len); 1963 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1964 case SPDK_NVME_LOG_CHANGED_NS_LIST: 1965 nvmf_get_changed_ns_list_log_page(ctrlr, req->data, offset, len); 1966 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1967 case SPDK_NVME_LOG_RESERVATION_NOTIFICATION: 1968 nvmf_get_reservation_notification_log_page(ctrlr, req->data, offset, len); 1969 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1970 default: 1971 goto invalid_log_page; 1972 } 1973 } 1974 1975 invalid_log_page: 1976 SPDK_ERRLOG("Unsupported Get Log Page 0x%02X\n", lid); 1977 response->status.sct = SPDK_NVME_SCT_GENERIC; 1978 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1979 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1980 } 1981 1982 int 1983 spdk_nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr *ctrlr, 1984 struct spdk_nvme_cmd *cmd, 1985 struct spdk_nvme_cpl *rsp, 1986 struct spdk_nvme_ns_data *nsdata) 1987 { 1988 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1989 struct spdk_nvmf_ns *ns; 1990 uint32_t max_num_blocks; 1991 1992 if (cmd->nsid == 0 || cmd->nsid > subsystem->max_nsid) { 1993 SPDK_ERRLOG("Identify Namespace for invalid NSID %u\n", cmd->nsid); 1994 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1995 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 1996 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1997 } 1998 1999 ns = _nvmf_subsystem_get_ns(subsystem, cmd->nsid); 2000 if (ns == NULL || ns->bdev == NULL) { 2001 /* 2002 * Inactive namespaces should return a zero filled data structure. 2003 * The data buffer is already zeroed by nvmf_ctrlr_process_admin_cmd(), 2004 * so we can just return early here. 2005 */ 2006 SPDK_DEBUGLOG(nvmf, "Identify Namespace for inactive NSID %u\n", cmd->nsid); 2007 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2008 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 2009 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2010 } 2011 2012 nvmf_bdev_ctrlr_identify_ns(ns, nsdata, ctrlr->dif_insert_or_strip); 2013 2014 /* Due to bug in the Linux kernel NVMe driver we have to set noiob no larger than mdts */ 2015 max_num_blocks = ctrlr->admin_qpair->transport->opts.max_io_size / 2016 (1U << nsdata->lbaf[nsdata->flbas.format].lbads); 2017 if (nsdata->noiob > max_num_blocks) { 2018 nsdata->noiob = max_num_blocks; 2019 } 2020 2021 if (subsystem->flags.ana_reporting) { 2022 /* ANA group ID matches NSID. */ 2023 nsdata->anagrpid = ns->nsid; 2024 2025 if (ctrlr->listener->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE || 2026 ctrlr->listener->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE) { 2027 nsdata->nuse = 0; 2028 } 2029 } 2030 2031 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2032 } 2033 2034 static void 2035 nvmf_ctrlr_populate_oacs(struct spdk_nvmf_ctrlr *ctrlr, 2036 struct spdk_nvme_ctrlr_data *cdata) 2037 { 2038 cdata->oacs.virtualization_management = 2039 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT].hdlr != NULL; 2040 cdata->oacs.nvme_mi = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NVME_MI_SEND].hdlr != NULL 2041 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NVME_MI_RECEIVE].hdlr != NULL; 2042 cdata->oacs.directives = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DIRECTIVE_SEND].hdlr != NULL 2043 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DIRECTIVE_RECEIVE].hdlr != NULL; 2044 cdata->oacs.device_self_test = 2045 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DEVICE_SELF_TEST].hdlr != NULL; 2046 cdata->oacs.ns_manage = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NS_MANAGEMENT].hdlr != NULL 2047 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NS_ATTACHMENT].hdlr != NULL; 2048 cdata->oacs.firmware = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD].hdlr != 2049 NULL 2050 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FIRMWARE_COMMIT].hdlr != NULL; 2051 cdata->oacs.format = 2052 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FORMAT_NVM].hdlr != NULL; 2053 cdata->oacs.security = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_SECURITY_SEND].hdlr != NULL 2054 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_SECURITY_RECEIVE].hdlr != NULL; 2055 cdata->oacs.get_lba_status = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_GET_LBA_STATUS].hdlr != 2056 NULL; 2057 } 2058 2059 int 2060 spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata) 2061 { 2062 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2063 struct spdk_nvmf_transport *transport = ctrlr->admin_qpair->transport; 2064 2065 /* 2066 * Common fields for discovery and NVM subsystems 2067 */ 2068 spdk_strcpy_pad(cdata->fr, FW_VERSION, sizeof(cdata->fr), ' '); 2069 assert((transport->opts.max_io_size % 4096) == 0); 2070 cdata->mdts = spdk_u32log2(transport->opts.max_io_size / 4096); 2071 cdata->cntlid = ctrlr->cntlid; 2072 cdata->ver = ctrlr->vcprop.vs; 2073 cdata->aerl = NVMF_MAX_ASYNC_EVENTS - 1; 2074 cdata->lpa.edlp = 1; 2075 cdata->elpe = 127; 2076 cdata->maxcmd = transport->opts.max_queue_depth; 2077 cdata->sgls = ctrlr->cdata.sgls; 2078 cdata->fuses.compare_and_write = 1; 2079 cdata->acwu = 1; 2080 if (subsystem->flags.ana_reporting) { 2081 cdata->mnan = subsystem->max_nsid; 2082 } 2083 spdk_strcpy_pad(cdata->subnqn, subsystem->subnqn, sizeof(cdata->subnqn), '\0'); 2084 2085 SPDK_DEBUGLOG(nvmf, "ctrlr data: maxcmd 0x%x\n", cdata->maxcmd); 2086 SPDK_DEBUGLOG(nvmf, "sgls data: 0x%x\n", from_le32(&cdata->sgls)); 2087 2088 2089 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2090 /* 2091 * NVM Discovery subsystem fields 2092 */ 2093 cdata->oaes.discovery_log_change_notices = 1; 2094 } else { 2095 /* 2096 * NVM subsystem fields (reserved for discovery subsystems) 2097 */ 2098 spdk_strcpy_pad(cdata->mn, spdk_nvmf_subsystem_get_mn(subsystem), sizeof(cdata->mn), ' '); 2099 spdk_strcpy_pad(cdata->sn, spdk_nvmf_subsystem_get_sn(subsystem), sizeof(cdata->sn), ' '); 2100 cdata->kas = ctrlr->cdata.kas; 2101 2102 cdata->rab = 6; 2103 cdata->cmic.multi_port = 1; 2104 cdata->cmic.multi_host = 1; 2105 if (subsystem->flags.ana_reporting) { 2106 /* Asymmetric Namespace Access Reporting is supported. */ 2107 cdata->cmic.ana_reporting = 1; 2108 } 2109 cdata->oaes.ns_attribute_notices = 1; 2110 if (subsystem->flags.ana_reporting) { 2111 cdata->oaes.ana_change_notices = 1; 2112 } 2113 cdata->ctratt.host_id_exhid_supported = 1; 2114 /* TODO: Concurrent execution of multiple abort commands. */ 2115 cdata->acl = 0; 2116 cdata->aerl = 0; 2117 cdata->frmw.slot1_ro = 1; 2118 cdata->frmw.num_slots = 1; 2119 2120 cdata->lpa.celp = 1; /* Command Effects log page supported */ 2121 2122 cdata->sqes.min = 6; 2123 cdata->sqes.max = 6; 2124 cdata->cqes.min = 4; 2125 cdata->cqes.max = 4; 2126 cdata->nn = subsystem->max_nsid; 2127 cdata->vwc.present = 1; 2128 cdata->vwc.flush_broadcast = SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED; 2129 2130 cdata->nvmf_specific = ctrlr->cdata.nvmf_specific; 2131 2132 cdata->oncs.dsm = nvmf_ctrlr_dsm_supported(ctrlr); 2133 cdata->oncs.write_zeroes = nvmf_ctrlr_write_zeroes_supported(ctrlr); 2134 cdata->oncs.reservations = 1; 2135 if (subsystem->flags.ana_reporting) { 2136 cdata->anatt = ANA_TRANSITION_TIME_IN_SEC; 2137 /* ANA Change state is not used, and ANA Persistent Loss state 2138 * is not supported for now. 2139 */ 2140 cdata->anacap.ana_optimized_state = 1; 2141 cdata->anacap.ana_non_optimized_state = 1; 2142 cdata->anacap.ana_inaccessible_state = 1; 2143 /* ANAGRPID does not change while namespace is attached to controller */ 2144 cdata->anacap.no_change_anagrpid = 1; 2145 cdata->anagrpmax = subsystem->max_nsid; 2146 cdata->nanagrpid = subsystem->max_nsid; 2147 } 2148 2149 nvmf_ctrlr_populate_oacs(ctrlr, cdata); 2150 2151 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ioccsz 0x%x\n", 2152 cdata->nvmf_specific.ioccsz); 2153 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: iorcsz 0x%x\n", 2154 cdata->nvmf_specific.iorcsz); 2155 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: icdoff 0x%x\n", 2156 cdata->nvmf_specific.icdoff); 2157 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ctrattr 0x%x\n", 2158 *(uint8_t *)&cdata->nvmf_specific.ctrattr); 2159 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: msdbd 0x%x\n", 2160 cdata->nvmf_specific.msdbd); 2161 } 2162 2163 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2164 } 2165 2166 static int 2167 nvmf_ctrlr_identify_active_ns_list(struct spdk_nvmf_subsystem *subsystem, 2168 struct spdk_nvme_cmd *cmd, 2169 struct spdk_nvme_cpl *rsp, 2170 struct spdk_nvme_ns_list *ns_list) 2171 { 2172 struct spdk_nvmf_ns *ns; 2173 uint32_t count = 0; 2174 2175 if (cmd->nsid >= 0xfffffffeUL) { 2176 SPDK_ERRLOG("Identify Active Namespace List with invalid NSID %u\n", cmd->nsid); 2177 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 2178 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2179 } 2180 2181 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 2182 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 2183 if (ns->opts.nsid <= cmd->nsid) { 2184 continue; 2185 } 2186 2187 ns_list->ns_list[count++] = ns->opts.nsid; 2188 if (count == SPDK_COUNTOF(ns_list->ns_list)) { 2189 break; 2190 } 2191 } 2192 2193 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2194 } 2195 2196 static void 2197 _add_ns_id_desc(void **buf_ptr, size_t *buf_remain, 2198 enum spdk_nvme_nidt type, 2199 const void *data, size_t data_size) 2200 { 2201 struct spdk_nvme_ns_id_desc *desc; 2202 size_t desc_size = sizeof(*desc) + data_size; 2203 2204 /* 2205 * These should never fail in practice, since all valid NS ID descriptors 2206 * should be defined so that they fit in the available 4096-byte buffer. 2207 */ 2208 assert(data_size > 0); 2209 assert(data_size <= UINT8_MAX); 2210 assert(desc_size < *buf_remain); 2211 if (data_size == 0 || data_size > UINT8_MAX || desc_size > *buf_remain) { 2212 return; 2213 } 2214 2215 desc = *buf_ptr; 2216 desc->nidt = type; 2217 desc->nidl = data_size; 2218 memcpy(desc->nid, data, data_size); 2219 2220 *buf_ptr += desc_size; 2221 *buf_remain -= desc_size; 2222 } 2223 2224 static int 2225 nvmf_ctrlr_identify_ns_id_descriptor_list( 2226 struct spdk_nvmf_subsystem *subsystem, 2227 struct spdk_nvme_cmd *cmd, 2228 struct spdk_nvme_cpl *rsp, 2229 void *id_desc_list, size_t id_desc_list_size) 2230 { 2231 struct spdk_nvmf_ns *ns; 2232 size_t buf_remain = id_desc_list_size; 2233 void *buf_ptr = id_desc_list; 2234 2235 ns = _nvmf_subsystem_get_ns(subsystem, cmd->nsid); 2236 if (ns == NULL || ns->bdev == NULL) { 2237 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2238 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 2239 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2240 } 2241 2242 #define ADD_ID_DESC(type, data, size) \ 2243 do { \ 2244 if (!spdk_mem_all_zero(data, size)) { \ 2245 _add_ns_id_desc(&buf_ptr, &buf_remain, type, data, size); \ 2246 } \ 2247 } while (0) 2248 2249 ADD_ID_DESC(SPDK_NVME_NIDT_EUI64, ns->opts.eui64, sizeof(ns->opts.eui64)); 2250 ADD_ID_DESC(SPDK_NVME_NIDT_NGUID, ns->opts.nguid, sizeof(ns->opts.nguid)); 2251 ADD_ID_DESC(SPDK_NVME_NIDT_UUID, &ns->opts.uuid, sizeof(ns->opts.uuid)); 2252 2253 /* 2254 * The list is automatically 0-terminated because controller to host buffers in 2255 * admin commands always get zeroed in nvmf_ctrlr_process_admin_cmd(). 2256 */ 2257 2258 #undef ADD_ID_DESC 2259 2260 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2261 } 2262 2263 static int 2264 nvmf_ctrlr_identify(struct spdk_nvmf_request *req) 2265 { 2266 uint8_t cns; 2267 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2268 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2269 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2270 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2271 2272 if (req->data == NULL || req->length < 4096) { 2273 SPDK_ERRLOG("identify command with invalid buffer\n"); 2274 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2275 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2276 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2277 } 2278 2279 cns = cmd->cdw10_bits.identify.cns; 2280 2281 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY && 2282 cns != SPDK_NVME_IDENTIFY_CTRLR) { 2283 /* Discovery controllers only support Identify Controller */ 2284 goto invalid_cns; 2285 } 2286 2287 switch (cns) { 2288 case SPDK_NVME_IDENTIFY_NS: 2289 return spdk_nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, req->data); 2290 case SPDK_NVME_IDENTIFY_CTRLR: 2291 return spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, req->data); 2292 case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST: 2293 return nvmf_ctrlr_identify_active_ns_list(subsystem, cmd, rsp, req->data); 2294 case SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST: 2295 return nvmf_ctrlr_identify_ns_id_descriptor_list(subsystem, cmd, rsp, req->data, req->length); 2296 default: 2297 goto invalid_cns; 2298 } 2299 2300 invalid_cns: 2301 SPDK_ERRLOG("Identify command with unsupported CNS 0x%02x\n", cns); 2302 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2303 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2304 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2305 } 2306 2307 static bool 2308 nvmf_qpair_abort_aer(struct spdk_nvmf_qpair *qpair, uint16_t cid) 2309 { 2310 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 2311 struct spdk_nvmf_request *req; 2312 int i; 2313 2314 if (!nvmf_qpair_is_admin_queue(qpair)) { 2315 return false; 2316 } 2317 2318 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 2319 if (ctrlr->aer_req[i]->cmd->nvme_cmd.cid == cid) { 2320 SPDK_DEBUGLOG(nvmf, "Aborting AER request\n"); 2321 req = ctrlr->aer_req[i]; 2322 ctrlr->aer_req[i] = NULL; 2323 ctrlr->nr_aer_reqs--; 2324 2325 /* Move the last req to the aborting position for making aer_reqs 2326 * in continuous 2327 */ 2328 if (i < ctrlr->nr_aer_reqs) { 2329 ctrlr->aer_req[i] = ctrlr->aer_req[ctrlr->nr_aer_reqs]; 2330 ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL; 2331 } 2332 2333 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2334 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 2335 _nvmf_request_complete(req); 2336 return true; 2337 } 2338 } 2339 2340 return false; 2341 } 2342 2343 static void 2344 nvmf_qpair_abort_request(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req) 2345 { 2346 uint16_t cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid; 2347 2348 if (nvmf_qpair_abort_aer(qpair, cid)) { 2349 SPDK_DEBUGLOG(nvmf, "abort ctrlr=%p sqid=%u cid=%u successful\n", 2350 qpair->ctrlr, qpair->qid, cid); 2351 req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command successfully aborted */ 2352 2353 spdk_nvmf_request_complete(req); 2354 return; 2355 } 2356 2357 nvmf_transport_qpair_abort_request(qpair, req); 2358 } 2359 2360 static void 2361 nvmf_ctrlr_abort_done(struct spdk_io_channel_iter *i, int status) 2362 { 2363 struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i); 2364 2365 if (status == 0) { 2366 /* There was no qpair whose ID matches SQID of the abort command. 2367 * Hence call _nvmf_request_complete() here. 2368 */ 2369 _nvmf_request_complete(req); 2370 } 2371 } 2372 2373 static void 2374 nvmf_ctrlr_abort_on_pg(struct spdk_io_channel_iter *i) 2375 { 2376 struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i); 2377 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 2378 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 2379 uint16_t sqid = req->cmd->nvme_cmd.cdw10_bits.abort.sqid; 2380 struct spdk_nvmf_qpair *qpair; 2381 2382 TAILQ_FOREACH(qpair, &group->qpairs, link) { 2383 if (qpair->ctrlr == req->qpair->ctrlr && qpair->qid == sqid) { 2384 /* Found the qpair */ 2385 2386 nvmf_qpair_abort_request(qpair, req); 2387 2388 /* Return -1 for the status so the iteration across threads stops. */ 2389 spdk_for_each_channel_continue(i, -1); 2390 return; 2391 } 2392 } 2393 2394 spdk_for_each_channel_continue(i, 0); 2395 } 2396 2397 static int 2398 nvmf_ctrlr_abort(struct spdk_nvmf_request *req) 2399 { 2400 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2401 2402 rsp->cdw0 = 1U; /* Command not aborted */ 2403 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2404 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 2405 2406 /* Send a message to each poll group, searching for this ctrlr, sqid, and command. */ 2407 spdk_for_each_channel(req->qpair->ctrlr->subsys->tgt, 2408 nvmf_ctrlr_abort_on_pg, 2409 req, 2410 nvmf_ctrlr_abort_done 2411 ); 2412 2413 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2414 } 2415 2416 int 2417 nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req) 2418 { 2419 struct spdk_nvmf_request *req_to_abort = req->req_to_abort; 2420 struct spdk_bdev *bdev; 2421 struct spdk_bdev_desc *desc; 2422 struct spdk_io_channel *ch; 2423 int rc; 2424 2425 assert(req_to_abort != NULL); 2426 2427 if (g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_ABORT].hdlr && 2428 nvmf_qpair_is_admin_queue(req_to_abort->qpair)) { 2429 return g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_ABORT].hdlr(req); 2430 } 2431 2432 rc = spdk_nvmf_request_get_bdev(req_to_abort->cmd->nvme_cmd.nsid, req_to_abort, 2433 &bdev, &desc, &ch); 2434 if (rc != 0) { 2435 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2436 } 2437 2438 return spdk_nvmf_bdev_ctrlr_abort_cmd(bdev, desc, ch, req, req_to_abort); 2439 } 2440 2441 static int 2442 get_features_generic(struct spdk_nvmf_request *req, uint32_t cdw0) 2443 { 2444 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2445 2446 rsp->cdw0 = cdw0; 2447 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2448 } 2449 2450 /* we have to use the typedef in the function declaration to appease astyle. */ 2451 typedef enum spdk_nvme_path_status_code spdk_nvme_path_status_code_t; 2452 2453 static spdk_nvme_path_status_code_t 2454 _nvme_ana_state_to_path_status(enum spdk_nvme_ana_state ana_state) 2455 { 2456 switch (ana_state) { 2457 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 2458 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 2459 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 2460 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS; 2461 case SPDK_NVME_ANA_CHANGE_STATE: 2462 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_TRANSITION; 2463 default: 2464 return SPDK_NVME_SC_INTERNAL_PATH_ERROR; 2465 } 2466 } 2467 2468 static int 2469 nvmf_ctrlr_get_features(struct spdk_nvmf_request *req) 2470 { 2471 uint8_t feature; 2472 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2473 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2474 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2475 enum spdk_nvme_ana_state ana_state; 2476 2477 feature = cmd->cdw10_bits.get_features.fid; 2478 2479 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2480 /* 2481 * Features supported by Discovery controller 2482 */ 2483 switch (feature) { 2484 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 2485 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw); 2486 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 2487 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw); 2488 default: 2489 SPDK_ERRLOG("Get Features command with unsupported feature ID 0x%02x\n", feature); 2490 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2491 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2492 } 2493 } 2494 /* 2495 * Process Get Features command for non-discovery controller 2496 */ 2497 ana_state = ctrlr->listener->ana_state; 2498 switch (ana_state) { 2499 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 2500 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 2501 case SPDK_NVME_ANA_CHANGE_STATE: 2502 switch (feature) { 2503 case SPDK_NVME_FEAT_ERROR_RECOVERY: 2504 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 2505 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 2506 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 2507 response->status.sct = SPDK_NVME_SCT_PATH; 2508 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 2509 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2510 default: 2511 break; 2512 } 2513 break; 2514 default: 2515 break; 2516 } 2517 2518 switch (feature) { 2519 case SPDK_NVME_FEAT_ARBITRATION: 2520 return get_features_generic(req, ctrlr->feat.arbitration.raw); 2521 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 2522 return get_features_generic(req, ctrlr->feat.power_management.raw); 2523 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 2524 return nvmf_ctrlr_get_features_temperature_threshold(req); 2525 case SPDK_NVME_FEAT_ERROR_RECOVERY: 2526 return get_features_generic(req, ctrlr->feat.error_recovery.raw); 2527 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 2528 return get_features_generic(req, ctrlr->feat.volatile_write_cache.raw); 2529 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 2530 return get_features_generic(req, ctrlr->feat.number_of_queues.raw); 2531 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 2532 return get_features_generic(req, ctrlr->feat.write_atomicity.raw); 2533 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 2534 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw); 2535 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 2536 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw); 2537 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 2538 return nvmf_ctrlr_get_features_host_identifier(req); 2539 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 2540 return nvmf_ctrlr_get_features_reservation_notification_mask(req); 2541 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 2542 return nvmf_ctrlr_get_features_reservation_persistence(req); 2543 default: 2544 SPDK_ERRLOG("Get Features command with unsupported feature ID 0x%02x\n", feature); 2545 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2546 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2547 } 2548 } 2549 2550 static int 2551 nvmf_ctrlr_set_features(struct spdk_nvmf_request *req) 2552 { 2553 uint8_t feature, save; 2554 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2555 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2556 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2557 enum spdk_nvme_ana_state ana_state; 2558 /* 2559 * Features are not saveable by the controller as indicated by 2560 * ONCS field of the Identify Controller data. 2561 * */ 2562 save = cmd->cdw10_bits.set_features.sv; 2563 if (save) { 2564 response->status.sc = SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE; 2565 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2566 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2567 } 2568 2569 feature = cmd->cdw10_bits.set_features.fid; 2570 2571 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2572 /* 2573 * Features supported by Discovery controller 2574 */ 2575 switch (feature) { 2576 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 2577 return nvmf_ctrlr_set_features_keep_alive_timer(req); 2578 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 2579 return nvmf_ctrlr_set_features_async_event_configuration(req); 2580 default: 2581 SPDK_ERRLOG("Set Features command with unsupported feature ID 0x%02x\n", feature); 2582 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2583 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2584 } 2585 } 2586 /* 2587 * Process Set Features command for non-discovery controller 2588 */ 2589 ana_state = ctrlr->listener->ana_state; 2590 switch (ana_state) { 2591 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 2592 case SPDK_NVME_ANA_CHANGE_STATE: 2593 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 2594 response->status.sct = SPDK_NVME_SCT_PATH; 2595 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 2596 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2597 } else { 2598 switch (feature) { 2599 case SPDK_NVME_FEAT_ERROR_RECOVERY: 2600 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 2601 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 2602 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 2603 response->status.sct = SPDK_NVME_SCT_PATH; 2604 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 2605 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2606 default: 2607 break; 2608 } 2609 } 2610 break; 2611 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 2612 response->status.sct = SPDK_NVME_SCT_PATH; 2613 response->status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS; 2614 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2615 default: 2616 break; 2617 } 2618 2619 switch (feature) { 2620 case SPDK_NVME_FEAT_ARBITRATION: 2621 return nvmf_ctrlr_set_features_arbitration(req); 2622 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 2623 return nvmf_ctrlr_set_features_power_management(req); 2624 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 2625 return nvmf_ctrlr_set_features_temperature_threshold(req); 2626 case SPDK_NVME_FEAT_ERROR_RECOVERY: 2627 return nvmf_ctrlr_set_features_error_recovery(req); 2628 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 2629 return nvmf_ctrlr_set_features_volatile_write_cache(req); 2630 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 2631 return nvmf_ctrlr_set_features_number_of_queues(req); 2632 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 2633 return nvmf_ctrlr_set_features_write_atomicity(req); 2634 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 2635 return nvmf_ctrlr_set_features_async_event_configuration(req); 2636 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 2637 return nvmf_ctrlr_set_features_keep_alive_timer(req); 2638 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 2639 return nvmf_ctrlr_set_features_host_identifier(req); 2640 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 2641 return nvmf_ctrlr_set_features_reservation_notification_mask(req); 2642 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 2643 return nvmf_ctrlr_set_features_reservation_persistence(req); 2644 default: 2645 SPDK_ERRLOG("Set Features command with unsupported feature ID 0x%02x\n", feature); 2646 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2647 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2648 } 2649 } 2650 2651 static int 2652 nvmf_ctrlr_keep_alive(struct spdk_nvmf_request *req) 2653 { 2654 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2655 2656 SPDK_DEBUGLOG(nvmf, "Keep Alive\n"); 2657 /* 2658 * To handle keep alive just clear or reset the 2659 * ctrlr based keep alive duration counter. 2660 * When added, a separate timer based process 2661 * will monitor if the time since last recorded 2662 * keep alive has exceeded the max duration and 2663 * take appropriate action. 2664 */ 2665 ctrlr->last_keep_alive_tick = spdk_get_ticks(); 2666 2667 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2668 } 2669 2670 int 2671 nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req) 2672 { 2673 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2674 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2675 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2676 int rc; 2677 2678 if (ctrlr == NULL) { 2679 SPDK_ERRLOG("Admin command sent before CONNECT\n"); 2680 response->status.sct = SPDK_NVME_SCT_GENERIC; 2681 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2682 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2683 } 2684 2685 if (ctrlr->vcprop.cc.bits.en != 1) { 2686 SPDK_ERRLOG("Admin command sent to disabled controller\n"); 2687 response->status.sct = SPDK_NVME_SCT_GENERIC; 2688 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2689 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2690 } 2691 2692 if (req->data && spdk_nvme_opc_get_data_transfer(cmd->opc) == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 2693 memset(req->data, 0, req->length); 2694 } 2695 2696 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2697 /* Discovery controllers only support these admin OPS. */ 2698 switch (cmd->opc) { 2699 case SPDK_NVME_OPC_IDENTIFY: 2700 case SPDK_NVME_OPC_GET_LOG_PAGE: 2701 case SPDK_NVME_OPC_KEEP_ALIVE: 2702 case SPDK_NVME_OPC_SET_FEATURES: 2703 case SPDK_NVME_OPC_GET_FEATURES: 2704 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: 2705 break; 2706 default: 2707 goto invalid_opcode; 2708 } 2709 } 2710 2711 /* Call a custom adm cmd handler if set. Aborts are handled in a different path (see nvmf_passthru_admin_cmd) */ 2712 if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr && cmd->opc != SPDK_NVME_OPC_ABORT) { 2713 rc = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr(req); 2714 if (rc >= SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 2715 /* The handler took care of this commmand */ 2716 return rc; 2717 } 2718 } 2719 2720 switch (cmd->opc) { 2721 case SPDK_NVME_OPC_GET_LOG_PAGE: 2722 return nvmf_ctrlr_get_log_page(req); 2723 case SPDK_NVME_OPC_IDENTIFY: 2724 return nvmf_ctrlr_identify(req); 2725 case SPDK_NVME_OPC_ABORT: 2726 return nvmf_ctrlr_abort(req); 2727 case SPDK_NVME_OPC_GET_FEATURES: 2728 return nvmf_ctrlr_get_features(req); 2729 case SPDK_NVME_OPC_SET_FEATURES: 2730 return nvmf_ctrlr_set_features(req); 2731 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: 2732 return nvmf_ctrlr_async_event_request(req); 2733 case SPDK_NVME_OPC_KEEP_ALIVE: 2734 return nvmf_ctrlr_keep_alive(req); 2735 2736 case SPDK_NVME_OPC_CREATE_IO_SQ: 2737 case SPDK_NVME_OPC_CREATE_IO_CQ: 2738 case SPDK_NVME_OPC_DELETE_IO_SQ: 2739 case SPDK_NVME_OPC_DELETE_IO_CQ: 2740 /* Create and Delete I/O CQ/SQ not allowed in NVMe-oF */ 2741 goto invalid_opcode; 2742 2743 default: 2744 goto invalid_opcode; 2745 } 2746 2747 invalid_opcode: 2748 SPDK_ERRLOG("Unsupported admin opcode 0x%x\n", cmd->opc); 2749 response->status.sct = SPDK_NVME_SCT_GENERIC; 2750 response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 2751 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2752 } 2753 2754 static int 2755 nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req) 2756 { 2757 struct spdk_nvmf_qpair *qpair = req->qpair; 2758 struct spdk_nvmf_capsule_cmd *cap_hdr; 2759 2760 cap_hdr = &req->cmd->nvmf_cmd; 2761 2762 if (qpair->ctrlr == NULL) { 2763 /* No ctrlr established yet; the only valid command is Connect */ 2764 if (cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) { 2765 return nvmf_ctrlr_cmd_connect(req); 2766 } else { 2767 SPDK_DEBUGLOG(nvmf, "Got fctype 0x%x, expected Connect\n", 2768 cap_hdr->fctype); 2769 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2770 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2771 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2772 } 2773 } else if (nvmf_qpair_is_admin_queue(qpair)) { 2774 /* 2775 * Controller session is established, and this is an admin queue. 2776 * Disallow Connect and allow other fabrics commands. 2777 */ 2778 switch (cap_hdr->fctype) { 2779 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET: 2780 return nvmf_property_set(req); 2781 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET: 2782 return nvmf_property_get(req); 2783 default: 2784 SPDK_DEBUGLOG(nvmf, "unknown fctype 0x%02x\n", 2785 cap_hdr->fctype); 2786 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2787 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 2788 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2789 } 2790 } else { 2791 /* Controller session is established, and this is an I/O queue */ 2792 /* For now, no I/O-specific Fabrics commands are implemented (other than Connect) */ 2793 SPDK_DEBUGLOG(nvmf, "Unexpected I/O fctype 0x%x\n", cap_hdr->fctype); 2794 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2795 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 2796 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2797 } 2798 } 2799 2800 static inline int 2801 nvmf_ctrlr_async_event_notification(struct spdk_nvmf_ctrlr *ctrlr, 2802 union spdk_nvme_async_event_completion *event) 2803 { 2804 struct spdk_nvmf_request *req; 2805 struct spdk_nvme_cpl *rsp; 2806 2807 assert(ctrlr->nr_aer_reqs > 0); 2808 2809 req = ctrlr->aer_req[--ctrlr->nr_aer_reqs]; 2810 rsp = &req->rsp->nvme_cpl; 2811 2812 rsp->cdw0 = event->raw; 2813 2814 _nvmf_request_complete(req); 2815 ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL; 2816 2817 return 0; 2818 } 2819 2820 int 2821 nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr) 2822 { 2823 union spdk_nvme_async_event_completion event = {0}; 2824 2825 /* Users may disable the event notification */ 2826 if (!ctrlr->feat.async_event_configuration.bits.ns_attr_notice) { 2827 return 0; 2828 } 2829 2830 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2831 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2832 event.bits.log_page_identifier = SPDK_NVME_LOG_CHANGED_NS_LIST; 2833 2834 /* If there is no outstanding AER request, queue the event. Then 2835 * if an AER is later submitted, this event can be sent as a 2836 * response. 2837 */ 2838 if (ctrlr->nr_aer_reqs == 0) { 2839 if (ctrlr->notice_event.bits.async_event_type == 2840 SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) { 2841 return 0; 2842 } 2843 2844 ctrlr->notice_event.raw = event.raw; 2845 return 0; 2846 } 2847 2848 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 2849 } 2850 2851 int 2852 nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr) 2853 { 2854 union spdk_nvme_async_event_completion event = {0}; 2855 2856 /* Users may disable the event notification */ 2857 if (!ctrlr->feat.async_event_configuration.bits.ana_change_notice) { 2858 return 0; 2859 } 2860 2861 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2862 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2863 event.bits.log_page_identifier = SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS; 2864 2865 /* If there is no outstanding AER request, queue the event. Then 2866 * if an AER is later submitted, this event can be sent as a 2867 * response. 2868 */ 2869 if (ctrlr->nr_aer_reqs == 0) { 2870 if (ctrlr->notice_event.bits.async_event_type == 2871 SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) { 2872 return 0; 2873 } 2874 2875 ctrlr->notice_event.raw = event.raw; 2876 return 0; 2877 } 2878 2879 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 2880 } 2881 2882 void 2883 nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr) 2884 { 2885 union spdk_nvme_async_event_completion event = {0}; 2886 2887 if (!ctrlr->num_avail_log_pages) { 2888 return; 2889 } 2890 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_IO; 2891 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL; 2892 event.bits.log_page_identifier = SPDK_NVME_LOG_RESERVATION_NOTIFICATION; 2893 2894 /* If there is no outstanding AER request, queue the event. Then 2895 * if an AER is later submitted, this event can be sent as a 2896 * response. 2897 */ 2898 if (ctrlr->nr_aer_reqs == 0) { 2899 if (ctrlr->reservation_event.bits.async_event_type == 2900 SPDK_NVME_ASYNC_EVENT_TYPE_IO) { 2901 return; 2902 } 2903 2904 ctrlr->reservation_event.raw = event.raw; 2905 return; 2906 } 2907 2908 nvmf_ctrlr_async_event_notification(ctrlr, &event); 2909 } 2910 2911 int 2912 nvmf_ctrlr_async_event_discovery_log_change_notice(struct spdk_nvmf_ctrlr *ctrlr) 2913 { 2914 union spdk_nvme_async_event_completion event = {0}; 2915 2916 /* Users may disable the event notification manually or 2917 * it may not be enabled due to keep alive timeout 2918 * not being set in connect command to discovery controller. 2919 */ 2920 if (!ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice) { 2921 return 0; 2922 } 2923 2924 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2925 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE; 2926 event.bits.log_page_identifier = SPDK_NVME_LOG_DISCOVERY; 2927 2928 /* If there is no outstanding AER request, queue the event. Then 2929 * if an AER is later submitted, this event can be sent as a 2930 * response. 2931 */ 2932 if (ctrlr->nr_aer_reqs == 0) { 2933 if (ctrlr->notice_event.bits.async_event_type == 2934 SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) { 2935 return 0; 2936 } 2937 2938 ctrlr->notice_event.raw = event.raw; 2939 return 0; 2940 } 2941 2942 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 2943 } 2944 2945 void 2946 nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair) 2947 { 2948 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 2949 int i; 2950 2951 if (!nvmf_qpair_is_admin_queue(qpair)) { 2952 return; 2953 } 2954 2955 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 2956 spdk_nvmf_request_free(ctrlr->aer_req[i]); 2957 ctrlr->aer_req[i] = NULL; 2958 } 2959 2960 ctrlr->nr_aer_reqs = 0; 2961 } 2962 2963 void 2964 nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr) 2965 { 2966 struct spdk_nvmf_request *req; 2967 int i; 2968 2969 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 2970 req = ctrlr->aer_req[i]; 2971 2972 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2973 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 2974 _nvmf_request_complete(req); 2975 2976 ctrlr->aer_req[i] = NULL; 2977 } 2978 2979 ctrlr->nr_aer_reqs = 0; 2980 } 2981 2982 static void 2983 _nvmf_ctrlr_add_reservation_log(void *ctx) 2984 { 2985 struct spdk_nvmf_reservation_log *log = (struct spdk_nvmf_reservation_log *)ctx; 2986 struct spdk_nvmf_ctrlr *ctrlr = log->ctrlr; 2987 2988 ctrlr->log_page_count++; 2989 2990 /* Maximum number of queued log pages is 255 */ 2991 if (ctrlr->num_avail_log_pages == 0xff) { 2992 struct spdk_nvmf_reservation_log *entry; 2993 entry = TAILQ_LAST(&ctrlr->log_head, log_page_head); 2994 entry->log.log_page_count = ctrlr->log_page_count; 2995 free(log); 2996 return; 2997 } 2998 2999 log->log.log_page_count = ctrlr->log_page_count; 3000 log->log.num_avail_log_pages = ctrlr->num_avail_log_pages++; 3001 TAILQ_INSERT_TAIL(&ctrlr->log_head, log, link); 3002 3003 nvmf_ctrlr_async_event_reservation_notification(ctrlr); 3004 } 3005 3006 void 3007 nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 3008 struct spdk_nvmf_ns *ns, 3009 enum spdk_nvme_reservation_notification_log_page_type type) 3010 { 3011 struct spdk_nvmf_reservation_log *log; 3012 3013 switch (type) { 3014 case SPDK_NVME_RESERVATION_LOG_PAGE_EMPTY: 3015 return; 3016 case SPDK_NVME_REGISTRATION_PREEMPTED: 3017 if (ns->mask & SPDK_NVME_REGISTRATION_PREEMPTED_MASK) { 3018 return; 3019 } 3020 break; 3021 case SPDK_NVME_RESERVATION_RELEASED: 3022 if (ns->mask & SPDK_NVME_RESERVATION_RELEASED_MASK) { 3023 return; 3024 } 3025 break; 3026 case SPDK_NVME_RESERVATION_PREEMPTED: 3027 if (ns->mask & SPDK_NVME_RESERVATION_PREEMPTED_MASK) { 3028 return; 3029 } 3030 break; 3031 default: 3032 return; 3033 } 3034 3035 log = calloc(1, sizeof(*log)); 3036 if (!log) { 3037 SPDK_ERRLOG("Alloc log page failed, ignore the log\n"); 3038 return; 3039 } 3040 log->ctrlr = ctrlr; 3041 log->log.type = type; 3042 log->log.nsid = ns->nsid; 3043 3044 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_reservation_log, log); 3045 } 3046 3047 /* Check from subsystem poll group's namespace information data structure */ 3048 static bool 3049 nvmf_ns_info_ctrlr_is_registrant(struct spdk_nvmf_subsystem_pg_ns_info *ns_info, 3050 struct spdk_nvmf_ctrlr *ctrlr) 3051 { 3052 uint32_t i; 3053 3054 for (i = 0; i < SPDK_NVMF_MAX_NUM_REGISTRANTS; i++) { 3055 if (!spdk_uuid_compare(&ns_info->reg_hostid[i], &ctrlr->hostid)) { 3056 return true; 3057 } 3058 } 3059 3060 return false; 3061 } 3062 3063 /* 3064 * Check the NVMe command is permitted or not for current controller(Host). 3065 */ 3066 static int 3067 nvmf_ns_reservation_request_check(struct spdk_nvmf_subsystem_pg_ns_info *ns_info, 3068 struct spdk_nvmf_ctrlr *ctrlr, 3069 struct spdk_nvmf_request *req) 3070 { 3071 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3072 enum spdk_nvme_reservation_type rtype = ns_info->rtype; 3073 uint8_t status = SPDK_NVME_SC_SUCCESS; 3074 uint8_t racqa; 3075 bool is_registrant; 3076 3077 /* No valid reservation */ 3078 if (!rtype) { 3079 return 0; 3080 } 3081 3082 is_registrant = nvmf_ns_info_ctrlr_is_registrant(ns_info, ctrlr); 3083 /* All registrants type and current ctrlr is a valid registrant */ 3084 if ((rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 3085 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && is_registrant) { 3086 return 0; 3087 } else if (!spdk_uuid_compare(&ns_info->holder_id, &ctrlr->hostid)) { 3088 return 0; 3089 } 3090 3091 /* Non-holder for current controller */ 3092 switch (cmd->opc) { 3093 case SPDK_NVME_OPC_READ: 3094 case SPDK_NVME_OPC_COMPARE: 3095 if (rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 3096 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3097 goto exit; 3098 } 3099 if ((rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY || 3100 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && !is_registrant) { 3101 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3102 } 3103 break; 3104 case SPDK_NVME_OPC_FLUSH: 3105 case SPDK_NVME_OPC_WRITE: 3106 case SPDK_NVME_OPC_WRITE_UNCORRECTABLE: 3107 case SPDK_NVME_OPC_WRITE_ZEROES: 3108 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 3109 if (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE || 3110 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 3111 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3112 goto exit; 3113 } 3114 if (!is_registrant) { 3115 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3116 } 3117 break; 3118 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 3119 racqa = cmd->cdw10_bits.resv_acquire.racqa; 3120 if (racqa == SPDK_NVME_RESERVE_ACQUIRE) { 3121 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3122 goto exit; 3123 } 3124 if (!is_registrant) { 3125 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3126 } 3127 break; 3128 case SPDK_NVME_OPC_RESERVATION_RELEASE: 3129 if (!is_registrant) { 3130 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3131 } 3132 break; 3133 default: 3134 break; 3135 } 3136 3137 exit: 3138 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3139 req->rsp->nvme_cpl.status.sc = status; 3140 if (status == SPDK_NVME_SC_RESERVATION_CONFLICT) { 3141 return -EPERM; 3142 } 3143 3144 return 0; 3145 } 3146 3147 static int 3148 nvmf_ctrlr_process_io_fused_cmd(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 3149 struct spdk_bdev_desc *desc, struct spdk_io_channel *ch) 3150 { 3151 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3152 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 3153 struct spdk_nvmf_request *first_fused_req = req->qpair->first_fused_req; 3154 int rc; 3155 3156 if (cmd->fuse == SPDK_NVME_CMD_FUSE_FIRST) { 3157 /* first fused operation (should be compare) */ 3158 if (first_fused_req != NULL) { 3159 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 3160 3161 SPDK_ERRLOG("Wrong sequence of fused operations\n"); 3162 3163 /* abort req->qpair->first_fused_request and continue with new fused command */ 3164 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 3165 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 3166 _nvmf_request_complete(first_fused_req); 3167 } else if (cmd->opc != SPDK_NVME_OPC_COMPARE) { 3168 SPDK_ERRLOG("Wrong op code of fused operations\n"); 3169 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3170 rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3171 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3172 } 3173 3174 req->qpair->first_fused_req = req; 3175 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3176 } else if (cmd->fuse == SPDK_NVME_CMD_FUSE_SECOND) { 3177 /* second fused operation (should be write) */ 3178 if (first_fused_req == NULL) { 3179 SPDK_ERRLOG("Wrong sequence of fused operations\n"); 3180 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3181 rsp->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 3182 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3183 } else if (cmd->opc != SPDK_NVME_OPC_WRITE) { 3184 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 3185 3186 SPDK_ERRLOG("Wrong op code of fused operations\n"); 3187 3188 /* abort req->qpair->first_fused_request and fail current command */ 3189 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 3190 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 3191 _nvmf_request_complete(first_fused_req); 3192 3193 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3194 rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3195 req->qpair->first_fused_req = NULL; 3196 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3197 } 3198 3199 /* save request of first command to generate response later */ 3200 req->first_fused_req = first_fused_req; 3201 req->qpair->first_fused_req = NULL; 3202 } else { 3203 SPDK_ERRLOG("Invalid fused command fuse field.\n"); 3204 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3205 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3206 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3207 } 3208 3209 rc = nvmf_bdev_ctrlr_compare_and_write_cmd(bdev, desc, ch, req->first_fused_req, req); 3210 3211 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 3212 if (spdk_nvme_cpl_is_error(rsp)) { 3213 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 3214 3215 fused_response->status = rsp->status; 3216 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3217 rsp->status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED; 3218 /* Complete first of fused commands. Second will be completed by upper layer */ 3219 _nvmf_request_complete(first_fused_req); 3220 req->first_fused_req = NULL; 3221 } 3222 } 3223 3224 return rc; 3225 } 3226 3227 int 3228 nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req) 3229 { 3230 uint32_t nsid; 3231 struct spdk_nvmf_ns *ns; 3232 struct spdk_bdev *bdev; 3233 struct spdk_bdev_desc *desc; 3234 struct spdk_io_channel *ch; 3235 struct spdk_nvmf_poll_group *group = req->qpair->group; 3236 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3237 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3238 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 3239 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 3240 enum spdk_nvme_ana_state ana_state; 3241 3242 /* pre-set response details for this command */ 3243 response->status.sc = SPDK_NVME_SC_SUCCESS; 3244 nsid = cmd->nsid; 3245 3246 if (spdk_unlikely(ctrlr == NULL)) { 3247 SPDK_ERRLOG("I/O command sent before CONNECT\n"); 3248 response->status.sct = SPDK_NVME_SCT_GENERIC; 3249 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 3250 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3251 } 3252 3253 if (spdk_unlikely(ctrlr->vcprop.cc.bits.en != 1)) { 3254 SPDK_ERRLOG("I/O command sent to disabled controller\n"); 3255 response->status.sct = SPDK_NVME_SCT_GENERIC; 3256 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 3257 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3258 } 3259 3260 /* It will be lower overhead to check if ANA state is optimized or 3261 * non-optimized. 3262 */ 3263 ana_state = ctrlr->listener->ana_state; 3264 if (spdk_unlikely(ana_state != SPDK_NVME_ANA_OPTIMIZED_STATE && 3265 ana_state != SPDK_NVME_ANA_NON_OPTIMIZED_STATE)) { 3266 SPDK_DEBUGLOG(nvmf, "Fail I/O command due to ANA state %d\n", 3267 ana_state); 3268 response->status.sct = SPDK_NVME_SCT_PATH; 3269 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 3270 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3271 } 3272 3273 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 3274 if (ns == NULL || ns->bdev == NULL) { 3275 SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd->nsid); 3276 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 3277 response->status.dnr = 1; 3278 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3279 } 3280 3281 /* scan-build falsely reporting dereference of null pointer */ 3282 assert(group != NULL && group->sgroups != NULL); 3283 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1]; 3284 if (nvmf_ns_reservation_request_check(ns_info, ctrlr, req)) { 3285 SPDK_DEBUGLOG(nvmf, "Reservation Conflict for nsid %u, opcode %u\n", 3286 cmd->nsid, cmd->opc); 3287 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3288 } 3289 3290 bdev = ns->bdev; 3291 desc = ns->desc; 3292 ch = ns_info->channel; 3293 3294 if (spdk_unlikely(cmd->fuse & SPDK_NVME_CMD_FUSE_MASK)) { 3295 return nvmf_ctrlr_process_io_fused_cmd(req, bdev, desc, ch); 3296 } else if (spdk_unlikely(req->qpair->first_fused_req != NULL)) { 3297 struct spdk_nvme_cpl *fused_response = &req->qpair->first_fused_req->rsp->nvme_cpl; 3298 3299 SPDK_ERRLOG("Expected second of fused commands - failing first of fused commands\n"); 3300 3301 /* abort req->qpair->first_fused_request and continue with new command */ 3302 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 3303 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 3304 _nvmf_request_complete(req->qpair->first_fused_req); 3305 req->qpair->first_fused_req = NULL; 3306 } 3307 3308 switch (cmd->opc) { 3309 case SPDK_NVME_OPC_READ: 3310 return nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req); 3311 case SPDK_NVME_OPC_WRITE: 3312 return nvmf_bdev_ctrlr_write_cmd(bdev, desc, ch, req); 3313 case SPDK_NVME_OPC_COMPARE: 3314 return nvmf_bdev_ctrlr_compare_cmd(bdev, desc, ch, req); 3315 case SPDK_NVME_OPC_WRITE_ZEROES: 3316 return nvmf_bdev_ctrlr_write_zeroes_cmd(bdev, desc, ch, req); 3317 case SPDK_NVME_OPC_FLUSH: 3318 return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req); 3319 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 3320 return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req); 3321 case SPDK_NVME_OPC_RESERVATION_REGISTER: 3322 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 3323 case SPDK_NVME_OPC_RESERVATION_RELEASE: 3324 case SPDK_NVME_OPC_RESERVATION_REPORT: 3325 spdk_thread_send_msg(ctrlr->subsys->thread, nvmf_ns_reservation_request, req); 3326 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3327 default: 3328 return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req); 3329 } 3330 } 3331 3332 static void 3333 nvmf_qpair_request_cleanup(struct spdk_nvmf_qpair *qpair) 3334 { 3335 if (qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING) { 3336 assert(qpair->state_cb != NULL); 3337 3338 if (TAILQ_EMPTY(&qpair->outstanding)) { 3339 qpair->state_cb(qpair->state_cb_arg, 0); 3340 } 3341 } 3342 } 3343 3344 int 3345 spdk_nvmf_request_free(struct spdk_nvmf_request *req) 3346 { 3347 struct spdk_nvmf_qpair *qpair = req->qpair; 3348 3349 TAILQ_REMOVE(&qpair->outstanding, req, link); 3350 if (nvmf_transport_req_free(req)) { 3351 SPDK_ERRLOG("Unable to free transport level request resources.\n"); 3352 } 3353 3354 nvmf_qpair_request_cleanup(qpair); 3355 3356 return 0; 3357 } 3358 3359 static void 3360 _nvmf_request_complete(void *ctx) 3361 { 3362 struct spdk_nvmf_request *req = ctx; 3363 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 3364 struct spdk_nvmf_qpair *qpair; 3365 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 3366 bool is_aer = false; 3367 3368 rsp->sqid = 0; 3369 rsp->status.p = 0; 3370 rsp->cid = req->cmd->nvme_cmd.cid; 3371 3372 qpair = req->qpair; 3373 if (qpair->ctrlr) { 3374 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 3375 assert(sgroup != NULL); 3376 is_aer = req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 3377 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) { 3378 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 3379 } 3380 3381 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) { 3382 spdk_nvme_print_completion(qpair->qid, rsp); 3383 } 3384 3385 TAILQ_REMOVE(&qpair->outstanding, req, link); 3386 if (nvmf_transport_req_complete(req)) { 3387 SPDK_ERRLOG("Transport request completion error!\n"); 3388 } 3389 3390 /* AER cmd is an exception */ 3391 if (sgroup && !is_aer) { 3392 assert(sgroup->io_outstanding > 0); 3393 sgroup->io_outstanding--; 3394 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSING && 3395 sgroup->io_outstanding == 0) { 3396 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 3397 sgroup->cb_fn(sgroup->cb_arg, 0); 3398 } 3399 } 3400 3401 nvmf_qpair_request_cleanup(qpair); 3402 } 3403 3404 int 3405 spdk_nvmf_request_complete(struct spdk_nvmf_request *req) 3406 { 3407 struct spdk_nvmf_qpair *qpair = req->qpair; 3408 3409 if (spdk_likely(qpair->group->thread == spdk_get_thread())) { 3410 _nvmf_request_complete(req); 3411 } else { 3412 spdk_thread_send_msg(qpair->group->thread, 3413 _nvmf_request_complete, req); 3414 } 3415 3416 return 0; 3417 } 3418 3419 static void 3420 _nvmf_request_exec(struct spdk_nvmf_request *req, 3421 struct spdk_nvmf_subsystem_poll_group *sgroup) 3422 { 3423 struct spdk_nvmf_qpair *qpair = req->qpair; 3424 enum spdk_nvmf_request_exec_status status; 3425 3426 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) { 3427 spdk_nvme_print_command(qpair->qid, &req->cmd->nvme_cmd); 3428 } 3429 3430 if (sgroup) { 3431 sgroup->io_outstanding++; 3432 } 3433 3434 /* Place the request on the outstanding list so we can keep track of it */ 3435 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 3436 3437 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) { 3438 status = nvmf_ctrlr_process_fabrics_cmd(req); 3439 } else if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair))) { 3440 status = nvmf_ctrlr_process_admin_cmd(req); 3441 } else { 3442 status = nvmf_ctrlr_process_io_cmd(req); 3443 } 3444 3445 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 3446 _nvmf_request_complete(req); 3447 } 3448 } 3449 3450 void 3451 spdk_nvmf_request_exec(struct spdk_nvmf_request *req) 3452 { 3453 struct spdk_nvmf_qpair *qpair = req->qpair; 3454 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 3455 3456 if (qpair->ctrlr) { 3457 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 3458 assert(sgroup != NULL); 3459 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) { 3460 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 3461 } 3462 3463 if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) { 3464 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3465 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 3466 /* Place the request on the outstanding list so we can keep track of it */ 3467 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 3468 /* Still increment io_outstanding because request_complete decrements it */ 3469 if (sgroup != NULL) { 3470 sgroup->io_outstanding++; 3471 } 3472 _nvmf_request_complete(req); 3473 return; 3474 } 3475 3476 /* Check if the subsystem is paused (if there is a subsystem) */ 3477 if (sgroup != NULL) { 3478 if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) { 3479 /* The subsystem is not currently active. Queue this request. */ 3480 TAILQ_INSERT_TAIL(&sgroup->queued, req, link); 3481 return; 3482 } 3483 } 3484 3485 _nvmf_request_exec(req, sgroup); 3486 } 3487 3488 static bool 3489 nvmf_ctrlr_get_dif_ctx(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd, 3490 struct spdk_dif_ctx *dif_ctx) 3491 { 3492 struct spdk_nvmf_ns *ns; 3493 struct spdk_bdev *bdev; 3494 3495 if (ctrlr == NULL || cmd == NULL) { 3496 return false; 3497 } 3498 3499 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 3500 if (ns == NULL || ns->bdev == NULL) { 3501 return false; 3502 } 3503 3504 bdev = ns->bdev; 3505 3506 switch (cmd->opc) { 3507 case SPDK_NVME_OPC_READ: 3508 case SPDK_NVME_OPC_WRITE: 3509 case SPDK_NVME_OPC_COMPARE: 3510 return nvmf_bdev_ctrlr_get_dif_ctx(bdev, cmd, dif_ctx); 3511 default: 3512 break; 3513 } 3514 3515 return false; 3516 } 3517 3518 bool 3519 spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx *dif_ctx) 3520 { 3521 struct spdk_nvmf_qpair *qpair = req->qpair; 3522 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 3523 3524 if (spdk_likely(ctrlr == NULL || !ctrlr->dif_insert_or_strip)) { 3525 return false; 3526 } 3527 3528 if (spdk_unlikely(qpair->state != SPDK_NVMF_QPAIR_ACTIVE)) { 3529 return false; 3530 } 3531 3532 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) { 3533 return false; 3534 } 3535 3536 if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair))) { 3537 return false; 3538 } 3539 3540 return nvmf_ctrlr_get_dif_ctx(ctrlr, &req->cmd->nvme_cmd, dif_ctx); 3541 } 3542 3543 void 3544 spdk_nvmf_set_custom_admin_cmd_hdlr(uint8_t opc, spdk_nvmf_custom_cmd_hdlr hdlr) 3545 { 3546 g_nvmf_custom_admin_cmd_hdlrs[opc].hdlr = hdlr; 3547 } 3548 3549 static int 3550 nvmf_passthru_admin_cmd(struct spdk_nvmf_request *req) 3551 { 3552 struct spdk_bdev *bdev; 3553 struct spdk_bdev_desc *desc; 3554 struct spdk_io_channel *ch; 3555 struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req); 3556 struct spdk_nvme_cpl *response = spdk_nvmf_request_get_response(req); 3557 uint32_t bdev_nsid; 3558 int rc; 3559 3560 if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].nsid == 0) { 3561 bdev_nsid = cmd->nsid; 3562 } else { 3563 bdev_nsid = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].nsid; 3564 } 3565 3566 rc = spdk_nvmf_request_get_bdev(bdev_nsid, req, &bdev, &desc, &ch); 3567 if (rc) { 3568 response->status.sct = SPDK_NVME_SCT_GENERIC; 3569 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 3570 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3571 } 3572 return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, NULL); 3573 } 3574 3575 void 3576 spdk_nvmf_set_passthru_admin_cmd(uint8_t opc, uint32_t forward_nsid) 3577 { 3578 g_nvmf_custom_admin_cmd_hdlrs[opc].hdlr = nvmf_passthru_admin_cmd; 3579 g_nvmf_custom_admin_cmd_hdlrs[opc].nsid = forward_nsid; 3580 } 3581 3582 int 3583 spdk_nvmf_request_get_bdev(uint32_t nsid, struct spdk_nvmf_request *req, 3584 struct spdk_bdev **bdev, struct spdk_bdev_desc **desc, struct spdk_io_channel **ch) 3585 { 3586 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3587 struct spdk_nvmf_ns *ns; 3588 struct spdk_nvmf_poll_group *group = req->qpair->group; 3589 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 3590 3591 *bdev = NULL; 3592 *desc = NULL; 3593 *ch = NULL; 3594 3595 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 3596 if (ns == NULL || ns->bdev == NULL) { 3597 return -EINVAL; 3598 } 3599 3600 assert(group != NULL && group->sgroups != NULL); 3601 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1]; 3602 *bdev = ns->bdev; 3603 *desc = ns->desc; 3604 *ch = ns_info->channel; 3605 3606 return 0; 3607 } 3608 3609 struct spdk_nvmf_ctrlr *spdk_nvmf_request_get_ctrlr(struct spdk_nvmf_request *req) 3610 { 3611 return req->qpair->ctrlr; 3612 } 3613 3614 struct spdk_nvme_cmd *spdk_nvmf_request_get_cmd(struct spdk_nvmf_request *req) 3615 { 3616 return &req->cmd->nvme_cmd; 3617 } 3618 3619 struct spdk_nvme_cpl *spdk_nvmf_request_get_response(struct spdk_nvmf_request *req) 3620 { 3621 return &req->rsp->nvme_cpl; 3622 } 3623 3624 struct spdk_nvmf_subsystem *spdk_nvmf_request_get_subsystem(struct spdk_nvmf_request *req) 3625 { 3626 return req->qpair->ctrlr->subsys; 3627 } 3628 3629 void spdk_nvmf_request_get_data(struct spdk_nvmf_request *req, void **data, uint32_t *length) 3630 { 3631 *data = req->data; 3632 *length = req->length; 3633 } 3634 3635 struct spdk_nvmf_subsystem *spdk_nvmf_ctrlr_get_subsystem(struct spdk_nvmf_ctrlr *ctrlr) 3636 { 3637 return ctrlr->subsys; 3638 } 3639 3640 uint16_t spdk_nvmf_ctrlr_get_id(struct spdk_nvmf_ctrlr *ctrlr) 3641 { 3642 return ctrlr->cntlid; 3643 } 3644 3645 struct spdk_nvmf_request *spdk_nvmf_request_get_req_to_abort(struct spdk_nvmf_request *req) 3646 { 3647 return req->req_to_abort; 3648 } 3649