1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 #include "transport.h" 38 39 #include "spdk/bit_array.h" 40 #include "spdk/endian.h" 41 #include "spdk/thread.h" 42 #include "spdk/nvme_spec.h" 43 #include "spdk/nvmf_cmd.h" 44 #include "spdk/string.h" 45 #include "spdk/util.h" 46 #include "spdk/version.h" 47 #include "spdk/log.h" 48 #include "spdk_internal/usdt.h" 49 50 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS 10000 51 #define NVMF_DISC_KATO_IN_MS 120000 52 #define KAS_TIME_UNIT_IN_MS 100 53 #define KAS_DEFAULT_VALUE (MIN_KEEP_ALIVE_TIMEOUT_IN_MS / KAS_TIME_UNIT_IN_MS) 54 55 /* 56 * Report the SPDK version as the firmware revision. 57 * SPDK_VERSION_STRING won't fit into FR (only 8 bytes), so try to fit the most important parts. 58 */ 59 #define FW_VERSION SPDK_VERSION_MAJOR_STRING SPDK_VERSION_MINOR_STRING SPDK_VERSION_PATCH_STRING 60 61 #define ANA_TRANSITION_TIME_IN_SEC 10 62 63 /* 64 * Support for custom admin command handlers 65 */ 66 struct spdk_nvmf_custom_admin_cmd { 67 spdk_nvmf_custom_cmd_hdlr hdlr; 68 uint32_t nsid; /* nsid to forward */ 69 }; 70 71 static struct spdk_nvmf_custom_admin_cmd g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_MAX_OPC + 1]; 72 73 static void _nvmf_request_complete(void *ctx); 74 75 static inline void 76 nvmf_invalid_connect_response(struct spdk_nvmf_fabric_connect_rsp *rsp, 77 uint8_t iattr, uint16_t ipo) 78 { 79 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 80 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 81 rsp->status_code_specific.invalid.iattr = iattr; 82 rsp->status_code_specific.invalid.ipo = ipo; 83 } 84 85 #define SPDK_NVMF_INVALID_CONNECT_CMD(rsp, field) \ 86 nvmf_invalid_connect_response(rsp, 0, offsetof(struct spdk_nvmf_fabric_connect_cmd, field)) 87 #define SPDK_NVMF_INVALID_CONNECT_DATA(rsp, field) \ 88 nvmf_invalid_connect_response(rsp, 1, offsetof(struct spdk_nvmf_fabric_connect_data, field)) 89 90 91 static void 92 nvmf_ctrlr_stop_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr) 93 { 94 if (!ctrlr) { 95 SPDK_ERRLOG("Controller is NULL\n"); 96 return; 97 } 98 99 if (ctrlr->keep_alive_poller == NULL) { 100 return; 101 } 102 103 SPDK_DEBUGLOG(nvmf, "Stop keep alive poller\n"); 104 spdk_poller_unregister(&ctrlr->keep_alive_poller); 105 } 106 107 static void 108 nvmf_ctrlr_stop_association_timer(struct spdk_nvmf_ctrlr *ctrlr) 109 { 110 if (!ctrlr) { 111 SPDK_ERRLOG("Controller is NULL\n"); 112 assert(false); 113 return; 114 } 115 116 if (ctrlr->association_timer == NULL) { 117 return; 118 } 119 120 SPDK_DEBUGLOG(nvmf, "Stop association timer\n"); 121 spdk_poller_unregister(&ctrlr->association_timer); 122 } 123 124 static void 125 nvmf_ctrlr_disconnect_qpairs_done(struct spdk_io_channel_iter *i, int status) 126 { 127 if (status == 0) { 128 SPDK_DEBUGLOG(nvmf, "ctrlr disconnect qpairs complete successfully\n"); 129 } else { 130 SPDK_ERRLOG("Fail to disconnect ctrlr qpairs\n"); 131 } 132 } 133 134 static int 135 _nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i, bool include_admin) 136 { 137 int rc = 0; 138 struct spdk_nvmf_ctrlr *ctrlr; 139 struct spdk_nvmf_qpair *qpair, *temp_qpair; 140 struct spdk_io_channel *ch; 141 struct spdk_nvmf_poll_group *group; 142 143 ctrlr = spdk_io_channel_iter_get_ctx(i); 144 ch = spdk_io_channel_iter_get_channel(i); 145 group = spdk_io_channel_get_ctx(ch); 146 147 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, temp_qpair) { 148 if (qpair->ctrlr == ctrlr && (include_admin || !nvmf_qpair_is_admin_queue(qpair))) { 149 rc = spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 150 if (rc) { 151 SPDK_ERRLOG("Qpair disconnect failed\n"); 152 return rc; 153 } 154 } 155 } 156 157 return rc; 158 } 159 160 static void 161 nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i) 162 { 163 spdk_for_each_channel_continue(i, _nvmf_ctrlr_disconnect_qpairs_on_pg(i, true)); 164 } 165 166 static void 167 nvmf_ctrlr_disconnect_io_qpairs_on_pg(struct spdk_io_channel_iter *i) 168 { 169 spdk_for_each_channel_continue(i, _nvmf_ctrlr_disconnect_qpairs_on_pg(i, false)); 170 } 171 172 static int 173 nvmf_ctrlr_keep_alive_poll(void *ctx) 174 { 175 uint64_t keep_alive_timeout_tick; 176 uint64_t now = spdk_get_ticks(); 177 struct spdk_nvmf_ctrlr *ctrlr = ctx; 178 179 SPDK_DEBUGLOG(nvmf, "Polling ctrlr keep alive timeout\n"); 180 181 /* If the Keep alive feature is in use and the timer expires */ 182 keep_alive_timeout_tick = ctrlr->last_keep_alive_tick + 183 ctrlr->feat.keep_alive_timer.bits.kato * spdk_get_ticks_hz() / UINT64_C(1000); 184 if (now > keep_alive_timeout_tick) { 185 SPDK_NOTICELOG("Disconnecting host %s from subsystem %s due to keep alive timeout.\n", 186 ctrlr->hostnqn, ctrlr->subsys->subnqn); 187 /* set the Controller Fatal Status bit to '1' */ 188 if (ctrlr->vcprop.csts.bits.cfs == 0) { 189 ctrlr->vcprop.csts.bits.cfs = 1; 190 191 /* 192 * disconnect qpairs, terminate Transport connection 193 * destroy ctrlr, break the host to controller association 194 * disconnect qpairs with qpair->ctrlr == ctrlr 195 */ 196 spdk_for_each_channel(ctrlr->subsys->tgt, 197 nvmf_ctrlr_disconnect_qpairs_on_pg, 198 ctrlr, 199 nvmf_ctrlr_disconnect_qpairs_done); 200 } 201 } 202 203 return SPDK_POLLER_BUSY; 204 } 205 206 static void 207 nvmf_ctrlr_start_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr) 208 { 209 if (!ctrlr) { 210 SPDK_ERRLOG("Controller is NULL\n"); 211 return; 212 } 213 214 /* if cleared to 0 then the Keep Alive Timer is disabled */ 215 if (ctrlr->feat.keep_alive_timer.bits.kato != 0) { 216 217 ctrlr->last_keep_alive_tick = spdk_get_ticks(); 218 219 SPDK_DEBUGLOG(nvmf, "Ctrlr add keep alive poller\n"); 220 ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr, 221 ctrlr->feat.keep_alive_timer.bits.kato * 1000); 222 } 223 } 224 225 static void 226 ctrlr_add_qpair_and_update_rsp(struct spdk_nvmf_qpair *qpair, 227 struct spdk_nvmf_ctrlr *ctrlr, 228 struct spdk_nvmf_fabric_connect_rsp *rsp) 229 { 230 assert(ctrlr->admin_qpair->group->thread == spdk_get_thread()); 231 232 /* check if we would exceed ctrlr connection limit */ 233 if (qpair->qid >= spdk_bit_array_capacity(ctrlr->qpair_mask)) { 234 SPDK_ERRLOG("Requested QID %u but Max QID is %u\n", 235 qpair->qid, spdk_bit_array_capacity(ctrlr->qpair_mask) - 1); 236 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 237 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 238 return; 239 } 240 241 if (spdk_bit_array_get(ctrlr->qpair_mask, qpair->qid)) { 242 SPDK_ERRLOG("Got I/O connect with duplicate QID %u\n", qpair->qid); 243 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 244 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 245 return; 246 } 247 248 qpair->ctrlr = ctrlr; 249 spdk_bit_array_set(ctrlr->qpair_mask, qpair->qid); 250 251 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 252 rsp->status_code_specific.success.cntlid = ctrlr->cntlid; 253 SPDK_DEBUGLOG(nvmf, "connect capsule response: cntlid = 0x%04x\n", 254 rsp->status_code_specific.success.cntlid); 255 256 SPDK_DTRACE_PROBE4(nvmf_ctrlr_add_qpair, qpair, qpair->qid, ctrlr->subsys->subnqn, 257 ctrlr->hostnqn); 258 } 259 260 static void 261 _nvmf_ctrlr_add_admin_qpair(void *ctx) 262 { 263 struct spdk_nvmf_request *req = ctx; 264 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 265 struct spdk_nvmf_qpair *qpair = req->qpair; 266 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 267 268 ctrlr->admin_qpair = qpair; 269 ctrlr->association_timeout = qpair->transport->opts.association_timeout; 270 nvmf_ctrlr_start_keep_alive_timer(ctrlr); 271 ctrlr_add_qpair_and_update_rsp(qpair, ctrlr, rsp); 272 _nvmf_request_complete(req); 273 } 274 275 static void 276 _nvmf_subsystem_add_ctrlr(void *ctx) 277 { 278 struct spdk_nvmf_request *req = ctx; 279 struct spdk_nvmf_qpair *qpair = req->qpair; 280 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 281 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 282 283 if (nvmf_subsystem_add_ctrlr(ctrlr->subsys, ctrlr)) { 284 SPDK_ERRLOG("Unable to add controller to subsystem\n"); 285 spdk_bit_array_free(&ctrlr->qpair_mask); 286 free(ctrlr); 287 qpair->ctrlr = NULL; 288 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 289 spdk_nvmf_request_complete(req); 290 return; 291 } 292 293 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_admin_qpair, req); 294 } 295 296 static void 297 nvmf_ctrlr_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem, 298 struct spdk_nvmf_ctrlr_data *cdata) 299 { 300 cdata->kas = KAS_DEFAULT_VALUE; 301 cdata->sgls.supported = 1; 302 cdata->sgls.keyed_sgl = 1; 303 cdata->sgls.sgl_offset = 1; 304 cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16; 305 cdata->nvmf_specific.ioccsz += transport->opts.in_capsule_data_size / 16; 306 cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16; 307 cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */ 308 cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC; 309 cdata->nvmf_specific.msdbd = 1; 310 311 if (transport->ops->cdata_init) { 312 transport->ops->cdata_init(transport, subsystem, cdata); 313 } 314 } 315 316 static struct spdk_nvmf_ctrlr * 317 nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem, 318 struct spdk_nvmf_request *req, 319 struct spdk_nvmf_fabric_connect_cmd *connect_cmd, 320 struct spdk_nvmf_fabric_connect_data *connect_data) 321 { 322 struct spdk_nvmf_ctrlr *ctrlr; 323 struct spdk_nvmf_transport *transport; 324 struct spdk_nvme_transport_id listen_trid = {}; 325 326 ctrlr = calloc(1, sizeof(*ctrlr)); 327 if (ctrlr == NULL) { 328 SPDK_ERRLOG("Memory allocation failed\n"); 329 return NULL; 330 } 331 332 STAILQ_INIT(&ctrlr->async_events); 333 TAILQ_INIT(&ctrlr->log_head); 334 ctrlr->subsys = subsystem; 335 ctrlr->thread = req->qpair->group->thread; 336 ctrlr->disconnect_in_progress = false; 337 338 transport = req->qpair->transport; 339 ctrlr->qpair_mask = spdk_bit_array_create(transport->opts.max_qpairs_per_ctrlr); 340 if (!ctrlr->qpair_mask) { 341 SPDK_ERRLOG("Failed to allocate controller qpair mask\n"); 342 goto err_qpair_mask; 343 } 344 345 nvmf_ctrlr_cdata_init(transport, subsystem, &ctrlr->cdata); 346 347 /* 348 * KAS: This field indicates the granularity of the Keep Alive Timer in 100ms units. 349 * If this field is cleared to 0h, then Keep Alive is not supported. 350 */ 351 if (ctrlr->cdata.kas) { 352 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(connect_cmd->kato, 353 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) * 354 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS; 355 } 356 357 ctrlr->feat.async_event_configuration.bits.ns_attr_notice = 1; 358 if (ctrlr->subsys->flags.ana_reporting) { 359 ctrlr->feat.async_event_configuration.bits.ana_change_notice = 1; 360 } 361 ctrlr->feat.volatile_write_cache.bits.wce = 1; 362 363 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 364 /* 365 * If keep-alive timeout is not set, discovery controllers use some 366 * arbitrary high value in order to cleanup stale discovery sessions 367 * 368 * From the 1.0a nvme-of spec: 369 * "The Keep Alive command is reserved for 370 * Discovery controllers. A transport may specify a 371 * fixed Discovery controller activity timeout value 372 * (e.g., 2 minutes). If no commands are received 373 * by a Discovery controller within that time 374 * period, the controller may perform the 375 * actions for Keep Alive Timer expiration". 376 * 377 * From the 1.1 nvme-of spec: 378 * "A host requests an explicit persistent connection 379 * to a Discovery controller and Asynchronous Event Notifications from 380 * the Discovery controller on that persistent connection by specifying 381 * a non-zero Keep Alive Timer value in the Connect command." 382 * 383 * In case non-zero KATO is used, we enable discovery_log_change_notice 384 * otherwise we disable it and use default discovery controller KATO. 385 * KATO is in millisecond. 386 */ 387 if (ctrlr->feat.keep_alive_timer.bits.kato == 0) { 388 ctrlr->feat.keep_alive_timer.bits.kato = NVMF_DISC_KATO_IN_MS; 389 ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 0; 390 } else { 391 ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 1; 392 } 393 } 394 395 /* Subtract 1 for admin queue, 1 for 0's based */ 396 ctrlr->feat.number_of_queues.bits.ncqr = transport->opts.max_qpairs_per_ctrlr - 1 - 397 1; 398 ctrlr->feat.number_of_queues.bits.nsqr = transport->opts.max_qpairs_per_ctrlr - 1 - 399 1; 400 401 spdk_uuid_copy(&ctrlr->hostid, (struct spdk_uuid *)connect_data->hostid); 402 memcpy(ctrlr->hostnqn, connect_data->hostnqn, sizeof(ctrlr->hostnqn)); 403 404 ctrlr->vcprop.cap.raw = 0; 405 ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */ 406 ctrlr->vcprop.cap.bits.mqes = transport->opts.max_queue_depth - 407 1; /* max queue depth */ 408 ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */ 409 ctrlr->vcprop.cap.bits.to = 1; /* ready timeout - 500 msec units */ 410 ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */ 411 ctrlr->vcprop.cap.bits.css = SPDK_NVME_CAP_CSS_NVM; /* NVM command set */ 412 ctrlr->vcprop.cap.bits.mpsmin = 0; /* 2 ^ (12 + mpsmin) == 4k */ 413 ctrlr->vcprop.cap.bits.mpsmax = 0; /* 2 ^ (12 + mpsmax) == 4k */ 414 415 /* Version Supported: 1.3 */ 416 ctrlr->vcprop.vs.bits.mjr = 1; 417 ctrlr->vcprop.vs.bits.mnr = 3; 418 ctrlr->vcprop.vs.bits.ter = 0; 419 420 ctrlr->vcprop.cc.raw = 0; 421 ctrlr->vcprop.cc.bits.en = 0; /* Init controller disabled */ 422 423 ctrlr->vcprop.csts.raw = 0; 424 ctrlr->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */ 425 426 SPDK_DEBUGLOG(nvmf, "cap 0x%" PRIx64 "\n", ctrlr->vcprop.cap.raw); 427 SPDK_DEBUGLOG(nvmf, "vs 0x%x\n", ctrlr->vcprop.vs.raw); 428 SPDK_DEBUGLOG(nvmf, "cc 0x%x\n", ctrlr->vcprop.cc.raw); 429 SPDK_DEBUGLOG(nvmf, "csts 0x%x\n", ctrlr->vcprop.csts.raw); 430 431 ctrlr->dif_insert_or_strip = transport->opts.dif_insert_or_strip; 432 433 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_NVME) { 434 if (spdk_nvmf_qpair_get_listen_trid(req->qpair, &listen_trid) != 0) { 435 SPDK_ERRLOG("Could not get listener transport ID\n"); 436 goto err_listener; 437 } 438 439 ctrlr->listener = nvmf_subsystem_find_listener(ctrlr->subsys, &listen_trid); 440 if (!ctrlr->listener) { 441 SPDK_ERRLOG("Listener was not found\n"); 442 goto err_listener; 443 } 444 } 445 446 req->qpair->ctrlr = ctrlr; 447 spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_add_ctrlr, req); 448 449 return ctrlr; 450 err_listener: 451 spdk_bit_array_free(&ctrlr->qpair_mask); 452 err_qpair_mask: 453 free(ctrlr); 454 return NULL; 455 } 456 457 static void 458 _nvmf_ctrlr_destruct(void *ctx) 459 { 460 struct spdk_nvmf_ctrlr *ctrlr = ctx; 461 struct spdk_nvmf_reservation_log *log, *log_tmp; 462 struct spdk_nvmf_async_event_completion *event, *event_tmp; 463 464 if (ctrlr->disconnect_in_progress) { 465 SPDK_ERRLOG("freeing ctrlr with disconnect in progress\n"); 466 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr); 467 return; 468 } 469 470 nvmf_ctrlr_stop_keep_alive_timer(ctrlr); 471 nvmf_ctrlr_stop_association_timer(ctrlr); 472 spdk_bit_array_free(&ctrlr->qpair_mask); 473 474 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) { 475 TAILQ_REMOVE(&ctrlr->log_head, log, link); 476 free(log); 477 } 478 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 479 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 480 free(event); 481 } 482 free(ctrlr); 483 } 484 485 void 486 nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr) 487 { 488 nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr); 489 490 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr); 491 } 492 493 static void 494 nvmf_ctrlr_add_io_qpair(void *ctx) 495 { 496 struct spdk_nvmf_request *req = ctx; 497 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 498 struct spdk_nvmf_qpair *qpair = req->qpair; 499 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 500 struct spdk_nvmf_qpair *admin_qpair = ctrlr->admin_qpair; 501 502 /* Unit test will check qpair->ctrlr after calling spdk_nvmf_ctrlr_connect. 503 * For error case, the value should be NULL. So set it to NULL at first. 504 */ 505 qpair->ctrlr = NULL; 506 507 /* Make sure the controller is not being destroyed. */ 508 if (ctrlr->in_destruct) { 509 SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n"); 510 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 511 goto end; 512 } 513 514 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 515 SPDK_ERRLOG("I/O connect not allowed on discovery controller\n"); 516 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 517 goto end; 518 } 519 520 if (!ctrlr->vcprop.cc.bits.en) { 521 SPDK_ERRLOG("Got I/O connect before ctrlr was enabled\n"); 522 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 523 goto end; 524 } 525 526 if (1u << ctrlr->vcprop.cc.bits.iosqes != sizeof(struct spdk_nvme_cmd)) { 527 SPDK_ERRLOG("Got I/O connect with invalid IOSQES %u\n", 528 ctrlr->vcprop.cc.bits.iosqes); 529 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 530 goto end; 531 } 532 533 if (1u << ctrlr->vcprop.cc.bits.iocqes != sizeof(struct spdk_nvme_cpl)) { 534 SPDK_ERRLOG("Got I/O connect with invalid IOCQES %u\n", 535 ctrlr->vcprop.cc.bits.iocqes); 536 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 537 goto end; 538 } 539 540 if (admin_qpair->state != SPDK_NVMF_QPAIR_ACTIVE || admin_qpair->group == NULL) { 541 /* There is a chance that admin qpair is being destroyed at this moment due to e.g. 542 * expired keep alive timer. Part of the qpair destruction process is change of qpair's 543 * state to DEACTIVATING and removing it from poll group */ 544 SPDK_ERRLOG("Inactive admin qpair (state %d, group %p)\n", admin_qpair->state, admin_qpair->group); 545 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 546 goto end; 547 } 548 549 ctrlr_add_qpair_and_update_rsp(qpair, ctrlr, rsp); 550 end: 551 spdk_nvmf_request_complete(req); 552 } 553 554 static void 555 _nvmf_ctrlr_add_io_qpair(void *ctx) 556 { 557 struct spdk_nvmf_request *req = ctx; 558 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 559 struct spdk_nvmf_fabric_connect_data *data = req->data; 560 struct spdk_nvmf_ctrlr *ctrlr; 561 struct spdk_nvmf_qpair *qpair = req->qpair; 562 struct spdk_nvmf_qpair *admin_qpair; 563 struct spdk_nvmf_tgt *tgt = qpair->transport->tgt; 564 struct spdk_nvmf_subsystem *subsystem; 565 struct spdk_nvme_transport_id listen_trid = {}; 566 const struct spdk_nvmf_subsystem_listener *listener; 567 568 SPDK_DEBUGLOG(nvmf, "Connect I/O Queue for controller id 0x%x\n", data->cntlid); 569 570 subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); 571 /* We already checked this in spdk_nvmf_ctrlr_connect */ 572 assert(subsystem != NULL); 573 574 ctrlr = nvmf_subsystem_get_ctrlr(subsystem, data->cntlid); 575 if (ctrlr == NULL) { 576 SPDK_ERRLOG("Unknown controller ID 0x%x\n", data->cntlid); 577 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid); 578 spdk_nvmf_request_complete(req); 579 return; 580 } 581 582 /* fail before passing a message to the controller thread. */ 583 if (ctrlr->in_destruct) { 584 SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n"); 585 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 586 spdk_nvmf_request_complete(req); 587 return; 588 } 589 590 /* If ANA reporting is enabled, check if I/O connect is on the same listener. */ 591 if (subsystem->flags.ana_reporting) { 592 if (spdk_nvmf_qpair_get_listen_trid(req->qpair, &listen_trid) != 0) { 593 SPDK_ERRLOG("Could not get listener transport ID\n"); 594 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 595 spdk_nvmf_request_complete(req); 596 return; 597 } 598 599 listener = nvmf_subsystem_find_listener(subsystem, &listen_trid); 600 if (listener != ctrlr->listener) { 601 SPDK_ERRLOG("I/O connect is on a listener different from admin connect\n"); 602 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 603 spdk_nvmf_request_complete(req); 604 return; 605 } 606 } 607 608 admin_qpair = ctrlr->admin_qpair; 609 if (admin_qpair->state != SPDK_NVMF_QPAIR_ACTIVE || admin_qpair->group == NULL) { 610 /* There is a chance that admin qpair is being destroyed at this moment due to e.g. 611 * expired keep alive timer. Part of the qpair destruction process is change of qpair's 612 * state to DEACTIVATING and removing it from poll group */ 613 SPDK_ERRLOG("Inactive admin qpair (state %d, group %p)\n", admin_qpair->state, admin_qpair->group); 614 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 615 spdk_nvmf_request_complete(req); 616 return; 617 } 618 qpair->ctrlr = ctrlr; 619 spdk_thread_send_msg(admin_qpair->group->thread, nvmf_ctrlr_add_io_qpair, req); 620 } 621 622 static bool 623 nvmf_qpair_access_allowed(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_subsystem *subsystem, 624 const char *hostnqn) 625 { 626 struct spdk_nvme_transport_id listen_trid = {}; 627 628 if (!spdk_nvmf_subsystem_host_allowed(subsystem, hostnqn)) { 629 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s'\n", subsystem->subnqn, hostnqn); 630 return false; 631 } 632 633 if (spdk_nvmf_qpair_get_listen_trid(qpair, &listen_trid)) { 634 SPDK_ERRLOG("Subsystem '%s' is unable to enforce access control due to an internal error.\n", 635 subsystem->subnqn); 636 return false; 637 } 638 639 if (!spdk_nvmf_subsystem_listener_allowed(subsystem, &listen_trid)) { 640 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s' to connect at this address.\n", 641 subsystem->subnqn, hostnqn); 642 return false; 643 } 644 645 return true; 646 } 647 648 static int 649 _nvmf_ctrlr_connect(struct spdk_nvmf_request *req) 650 { 651 struct spdk_nvmf_fabric_connect_data *data = req->data; 652 struct spdk_nvmf_fabric_connect_cmd *cmd = &req->cmd->connect_cmd; 653 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 654 struct spdk_nvmf_qpair *qpair = req->qpair; 655 struct spdk_nvmf_transport *transport = qpair->transport; 656 struct spdk_nvmf_ctrlr *ctrlr; 657 struct spdk_nvmf_subsystem *subsystem; 658 659 SPDK_DEBUGLOG(nvmf, "recfmt 0x%x qid %u sqsize %u\n", 660 cmd->recfmt, cmd->qid, cmd->sqsize); 661 662 SPDK_DEBUGLOG(nvmf, "Connect data:\n"); 663 SPDK_DEBUGLOG(nvmf, " cntlid: 0x%04x\n", data->cntlid); 664 SPDK_DEBUGLOG(nvmf, " hostid: %08x-%04x-%04x-%02x%02x-%04x%08x ***\n", 665 ntohl(*(uint32_t *)&data->hostid[0]), 666 ntohs(*(uint16_t *)&data->hostid[4]), 667 ntohs(*(uint16_t *)&data->hostid[6]), 668 data->hostid[8], 669 data->hostid[9], 670 ntohs(*(uint16_t *)&data->hostid[10]), 671 ntohl(*(uint32_t *)&data->hostid[12])); 672 SPDK_DEBUGLOG(nvmf, " subnqn: \"%s\"\n", data->subnqn); 673 SPDK_DEBUGLOG(nvmf, " hostnqn: \"%s\"\n", data->hostnqn); 674 675 subsystem = spdk_nvmf_tgt_find_subsystem(transport->tgt, data->subnqn); 676 if (!subsystem) { 677 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 678 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 679 } 680 681 if (cmd->recfmt != 0) { 682 SPDK_ERRLOG("Connect command unsupported RECFMT %u\n", cmd->recfmt); 683 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 684 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT; 685 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 686 } 687 688 /* 689 * SQSIZE is a 0-based value, so it must be at least 1 (minimum queue depth is 2) and 690 * strictly less than max_aq_depth (admin queues) or max_queue_depth (io queues). 691 */ 692 if (cmd->sqsize == 0) { 693 SPDK_ERRLOG("Invalid SQSIZE = 0\n"); 694 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 695 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 696 } 697 698 if (cmd->qid == 0) { 699 if (cmd->sqsize >= transport->opts.max_aq_depth) { 700 SPDK_ERRLOG("Invalid SQSIZE for admin queue %u (min 1, max %u)\n", 701 cmd->sqsize, transport->opts.max_aq_depth - 1); 702 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 703 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 704 } 705 } else if (cmd->sqsize >= transport->opts.max_queue_depth) { 706 SPDK_ERRLOG("Invalid SQSIZE %u (min 1, max %u)\n", 707 cmd->sqsize, transport->opts.max_queue_depth - 1); 708 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 709 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 710 } 711 712 qpair->sq_head_max = cmd->sqsize; 713 qpair->qid = cmd->qid; 714 715 if (0 == qpair->qid) { 716 qpair->group->stat.admin_qpairs++; 717 qpair->group->stat.current_admin_qpairs++; 718 } else { 719 qpair->group->stat.io_qpairs++; 720 qpair->group->stat.current_io_qpairs++; 721 } 722 723 if (cmd->qid == 0) { 724 SPDK_DEBUGLOG(nvmf, "Connect Admin Queue for controller ID 0x%x\n", data->cntlid); 725 726 if (data->cntlid != 0xFFFF) { 727 /* This NVMf target only supports dynamic mode. */ 728 SPDK_ERRLOG("The NVMf target only supports dynamic mode (CNTLID = 0x%x).\n", data->cntlid); 729 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid); 730 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 731 } 732 733 /* Establish a new ctrlr */ 734 ctrlr = nvmf_ctrlr_create(subsystem, req, cmd, data); 735 if (!ctrlr) { 736 SPDK_ERRLOG("nvmf_ctrlr_create() failed\n"); 737 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 738 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 739 } else { 740 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 741 } 742 } else { 743 spdk_thread_send_msg(subsystem->thread, _nvmf_ctrlr_add_io_qpair, req); 744 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 745 } 746 } 747 748 static inline bool 749 nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req) 750 { 751 return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC && 752 req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT; 753 } 754 755 static struct spdk_nvmf_subsystem_poll_group * 756 nvmf_subsystem_pg_from_connect_cmd(struct spdk_nvmf_request *req) 757 { 758 struct spdk_nvmf_fabric_connect_data *data; 759 struct spdk_nvmf_subsystem *subsystem; 760 struct spdk_nvmf_tgt *tgt; 761 762 assert(nvmf_request_is_fabric_connect(req)); 763 assert(req->qpair->ctrlr == NULL); 764 765 data = req->data; 766 tgt = req->qpair->transport->tgt; 767 768 subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); 769 if (subsystem == NULL) { 770 return NULL; 771 } 772 773 return &req->qpair->group->sgroups[subsystem->id]; 774 } 775 776 int 777 spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req) 778 { 779 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 780 struct spdk_nvmf_qpair *qpair = req->qpair; 781 struct spdk_nvmf_subsystem_poll_group *sgroup; 782 enum spdk_nvmf_request_exec_status status; 783 784 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 785 if (!sgroup) { 786 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 787 status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 788 goto out; 789 } 790 791 sgroup->mgmt_io_outstanding++; 792 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 793 794 status = _nvmf_ctrlr_connect(req); 795 796 out: 797 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 798 _nvmf_request_complete(req); 799 } 800 801 return status; 802 } 803 804 static int nvmf_ctrlr_cmd_connect(struct spdk_nvmf_request *req); 805 806 static int 807 retry_connect(void *arg) 808 { 809 struct spdk_nvmf_request *req = arg; 810 struct spdk_nvmf_subsystem_poll_group *sgroup; 811 int rc; 812 813 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 814 assert(sgroup != NULL); 815 sgroup->mgmt_io_outstanding++; 816 spdk_poller_unregister(&req->poller); 817 rc = nvmf_ctrlr_cmd_connect(req); 818 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 819 _nvmf_request_complete(req); 820 } 821 return SPDK_POLLER_BUSY; 822 } 823 824 static int 825 nvmf_ctrlr_cmd_connect(struct spdk_nvmf_request *req) 826 { 827 struct spdk_nvmf_fabric_connect_data *data = req->data; 828 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 829 struct spdk_nvmf_transport *transport = req->qpair->transport; 830 struct spdk_nvmf_subsystem *subsystem; 831 832 if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) { 833 SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length); 834 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 835 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 836 } 837 838 subsystem = spdk_nvmf_tgt_find_subsystem(transport->tgt, data->subnqn); 839 if (!subsystem) { 840 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 841 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 842 } 843 844 if ((subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE) || 845 (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSING) || 846 (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED) || 847 (subsystem->state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING)) { 848 struct spdk_nvmf_subsystem_poll_group *sgroup; 849 850 if (req->timeout_tsc == 0) { 851 /* We will only retry the request up to 1 second. */ 852 req->timeout_tsc = spdk_get_ticks() + spdk_get_ticks_hz(); 853 } else if (spdk_get_ticks() > req->timeout_tsc) { 854 SPDK_ERRLOG("Subsystem '%s' was not ready for 1 second\n", subsystem->subnqn); 855 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 856 rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY; 857 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 858 } 859 860 /* Subsystem is not ready to handle a connect. Use a poller to retry it 861 * again later. Decrement the mgmt_io_outstanding to avoid the 862 * subsystem waiting for this command to complete before unpausing. 863 */ 864 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 865 assert(sgroup != NULL); 866 sgroup->mgmt_io_outstanding--; 867 SPDK_DEBUGLOG(nvmf, "Subsystem '%s' is not ready for connect, retrying...\n", subsystem->subnqn); 868 req->poller = SPDK_POLLER_REGISTER(retry_connect, req, 100); 869 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 870 } 871 872 /* Ensure that hostnqn is null terminated */ 873 if (!memchr(data->hostnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) { 874 SPDK_ERRLOG("Connect HOSTNQN is not null terminated\n"); 875 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, hostnqn); 876 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 877 } 878 879 if (!nvmf_qpair_access_allowed(req->qpair, subsystem, data->hostnqn)) { 880 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 881 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_HOST; 882 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 883 } 884 885 return _nvmf_ctrlr_connect(req); 886 } 887 888 static int 889 nvmf_ctrlr_association_remove(void *ctx) 890 { 891 struct spdk_nvmf_ctrlr *ctrlr = ctx; 892 int rc; 893 894 SPDK_DEBUGLOG(nvmf, "Disconnecting host from subsystem %s due to association timeout.\n", 895 ctrlr->subsys->subnqn); 896 897 rc = spdk_nvmf_qpair_disconnect(ctrlr->admin_qpair, NULL, NULL); 898 if (rc < 0) { 899 SPDK_ERRLOG("Fail to disconnect admin ctrlr qpair\n"); 900 assert(false); 901 } 902 903 nvmf_ctrlr_stop_association_timer(ctrlr); 904 return 1; 905 } 906 907 static void 908 nvmf_ctrlr_cc_shn_done(struct spdk_io_channel_iter *i, int status) 909 { 910 struct spdk_nvmf_ctrlr *ctrlr = spdk_io_channel_iter_get_ctx(i); 911 912 if (status < 0) { 913 SPDK_ERRLOG("Fail to disconnect io ctrlr qpairs\n"); 914 assert(false); 915 } 916 917 ctrlr->vcprop.csts.bits.shst = SPDK_NVME_SHST_COMPLETE; 918 919 /* After CC.EN transitions to 0 (due to shutdown or reset), the association 920 * between the host and controller shall be preserved for at least 2 minutes */ 921 if (ctrlr->association_timer) { 922 SPDK_DEBUGLOG(nvmf, "Association timer already set\n"); 923 nvmf_ctrlr_stop_association_timer(ctrlr); 924 } 925 if (ctrlr->association_timeout) { 926 ctrlr->association_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_association_remove, ctrlr, 927 ctrlr->association_timeout * 1000); 928 } 929 ctrlr->disconnect_in_progress = false; 930 } 931 932 static void 933 nvmf_ctrlr_cc_reset_done(struct spdk_io_channel_iter *i, int status) 934 { 935 struct spdk_nvmf_ctrlr *ctrlr = spdk_io_channel_iter_get_ctx(i); 936 937 if (status < 0) { 938 SPDK_ERRLOG("Fail to disconnect io ctrlr qpairs\n"); 939 assert(false); 940 } 941 942 /* Only a subset of the registers are cleared out on a reset */ 943 ctrlr->vcprop.cc.raw = 0; 944 ctrlr->vcprop.csts.raw = 0; 945 946 /* After CC.EN transitions to 0 (due to shutdown or reset), the association 947 * between the host and controller shall be preserved for at least 2 minutes */ 948 if (ctrlr->association_timer) { 949 SPDK_DEBUGLOG(nvmf, "Association timer already set\n"); 950 nvmf_ctrlr_stop_association_timer(ctrlr); 951 } 952 if (ctrlr->association_timeout) { 953 ctrlr->association_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_association_remove, ctrlr, 954 ctrlr->association_timeout * 1000); 955 } 956 ctrlr->disconnect_in_progress = false; 957 } 958 959 const struct spdk_nvmf_registers * 960 spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr) 961 { 962 return &ctrlr->vcprop; 963 } 964 965 static uint64_t 966 nvmf_prop_get_cap(struct spdk_nvmf_ctrlr *ctrlr) 967 { 968 return ctrlr->vcprop.cap.raw; 969 } 970 971 static uint64_t 972 nvmf_prop_get_vs(struct spdk_nvmf_ctrlr *ctrlr) 973 { 974 return ctrlr->vcprop.vs.raw; 975 } 976 977 static uint64_t 978 nvmf_prop_get_cc(struct spdk_nvmf_ctrlr *ctrlr) 979 { 980 return ctrlr->vcprop.cc.raw; 981 } 982 983 static bool 984 nvmf_prop_set_cc(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 985 { 986 union spdk_nvme_cc_register cc, diff; 987 988 cc.raw = value; 989 990 SPDK_DEBUGLOG(nvmf, "cur CC: 0x%08x\n", ctrlr->vcprop.cc.raw); 991 SPDK_DEBUGLOG(nvmf, "new CC: 0x%08x\n", cc.raw); 992 993 /* 994 * Calculate which bits changed between the current and new CC. 995 * Mark each bit as 0 once it is handled to determine if any unhandled bits were changed. 996 */ 997 diff.raw = cc.raw ^ ctrlr->vcprop.cc.raw; 998 999 if (diff.bits.en) { 1000 if (cc.bits.en) { 1001 SPDK_DEBUGLOG(nvmf, "Property Set CC Enable!\n"); 1002 nvmf_ctrlr_stop_association_timer(ctrlr); 1003 1004 ctrlr->vcprop.cc.bits.en = 1; 1005 ctrlr->vcprop.csts.bits.rdy = 1; 1006 } else { 1007 SPDK_DEBUGLOG(nvmf, "Property Set CC Disable!\n"); 1008 ctrlr->vcprop.cc.bits.en = 0; 1009 ctrlr->disconnect_in_progress = true; 1010 spdk_for_each_channel(ctrlr->subsys->tgt, 1011 nvmf_ctrlr_disconnect_io_qpairs_on_pg, 1012 ctrlr, 1013 nvmf_ctrlr_cc_reset_done); 1014 } 1015 diff.bits.en = 0; 1016 } 1017 1018 if (diff.bits.shn) { 1019 if (cc.bits.shn == SPDK_NVME_SHN_NORMAL || 1020 cc.bits.shn == SPDK_NVME_SHN_ABRUPT) { 1021 SPDK_DEBUGLOG(nvmf, "Property Set CC Shutdown %u%ub!\n", 1022 cc.bits.shn >> 1, cc.bits.shn & 1); 1023 ctrlr->vcprop.cc.bits.shn = cc.bits.shn; 1024 ctrlr->disconnect_in_progress = true; 1025 spdk_for_each_channel(ctrlr->subsys->tgt, 1026 nvmf_ctrlr_disconnect_io_qpairs_on_pg, 1027 ctrlr, 1028 nvmf_ctrlr_cc_shn_done); 1029 1030 /* From the time a shutdown is initiated the controller shall disable 1031 * Keep Alive timer */ 1032 nvmf_ctrlr_stop_keep_alive_timer(ctrlr); 1033 } else if (cc.bits.shn == 0) { 1034 ctrlr->vcprop.cc.bits.shn = 0; 1035 } else { 1036 SPDK_ERRLOG("Prop Set CC: Invalid SHN value %u%ub\n", 1037 cc.bits.shn >> 1, cc.bits.shn & 1); 1038 return false; 1039 } 1040 diff.bits.shn = 0; 1041 } 1042 1043 if (diff.bits.iosqes) { 1044 SPDK_DEBUGLOG(nvmf, "Prop Set IOSQES = %u (%u bytes)\n", 1045 cc.bits.iosqes, 1u << cc.bits.iosqes); 1046 ctrlr->vcprop.cc.bits.iosqes = cc.bits.iosqes; 1047 diff.bits.iosqes = 0; 1048 } 1049 1050 if (diff.bits.iocqes) { 1051 SPDK_DEBUGLOG(nvmf, "Prop Set IOCQES = %u (%u bytes)\n", 1052 cc.bits.iocqes, 1u << cc.bits.iocqes); 1053 ctrlr->vcprop.cc.bits.iocqes = cc.bits.iocqes; 1054 diff.bits.iocqes = 0; 1055 } 1056 1057 if (diff.bits.ams) { 1058 SPDK_ERRLOG("Arbitration Mechanism Selected (AMS) 0x%x not supported!\n", cc.bits.ams); 1059 return false; 1060 } 1061 1062 if (diff.bits.mps) { 1063 SPDK_ERRLOG("Memory Page Size (MPS) %u KiB not supported!\n", (1 << (2 + cc.bits.mps))); 1064 return false; 1065 } 1066 1067 if (diff.bits.css) { 1068 SPDK_ERRLOG("I/O Command Set Selected (CSS) 0x%x not supported!\n", cc.bits.css); 1069 return false; 1070 } 1071 1072 if (diff.raw != 0) { 1073 /* Print an error message, but don't fail the command in this case. 1074 * If we did want to fail in this case, we'd need to ensure we acted 1075 * on no other bits or the initiator gets confused. */ 1076 SPDK_ERRLOG("Prop Set CC toggled reserved bits 0x%x!\n", diff.raw); 1077 } 1078 1079 return true; 1080 } 1081 1082 static uint64_t 1083 nvmf_prop_get_csts(struct spdk_nvmf_ctrlr *ctrlr) 1084 { 1085 return ctrlr->vcprop.csts.raw; 1086 } 1087 1088 static uint64_t 1089 nvmf_prop_get_aqa(struct spdk_nvmf_ctrlr *ctrlr) 1090 { 1091 return ctrlr->vcprop.aqa.raw; 1092 } 1093 1094 static bool 1095 nvmf_prop_set_aqa(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1096 { 1097 union spdk_nvme_aqa_register aqa; 1098 1099 aqa.raw = value; 1100 1101 if (aqa.bits.asqs < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES - 1 || 1102 aqa.bits.acqs < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES - 1 || 1103 aqa.bits.reserved1 != 0 || aqa.bits.reserved2 != 0) { 1104 return false; 1105 } 1106 1107 ctrlr->vcprop.aqa.raw = value; 1108 1109 return true; 1110 } 1111 1112 static uint64_t 1113 nvmf_prop_get_asq(struct spdk_nvmf_ctrlr *ctrlr) 1114 { 1115 return ctrlr->vcprop.asq; 1116 } 1117 1118 static bool 1119 nvmf_prop_set_asq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1120 { 1121 ctrlr->vcprop.asq = (ctrlr->vcprop.asq & (0xFFFFFFFFULL << 32ULL)) | value; 1122 1123 return true; 1124 } 1125 1126 static bool 1127 nvmf_prop_set_asq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1128 { 1129 ctrlr->vcprop.asq = (ctrlr->vcprop.asq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL); 1130 1131 return true; 1132 } 1133 1134 static uint64_t 1135 nvmf_prop_get_acq(struct spdk_nvmf_ctrlr *ctrlr) 1136 { 1137 return ctrlr->vcprop.acq; 1138 } 1139 1140 static bool 1141 nvmf_prop_set_acq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1142 { 1143 ctrlr->vcprop.acq = (ctrlr->vcprop.acq & (0xFFFFFFFFULL << 32ULL)) | value; 1144 1145 return true; 1146 } 1147 1148 static bool 1149 nvmf_prop_set_acq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1150 { 1151 ctrlr->vcprop.acq = (ctrlr->vcprop.acq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL); 1152 1153 return true; 1154 } 1155 1156 struct nvmf_prop { 1157 uint32_t ofst; 1158 uint8_t size; 1159 char name[11]; 1160 uint64_t (*get_cb)(struct spdk_nvmf_ctrlr *ctrlr); 1161 bool (*set_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value); 1162 bool (*set_upper_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value); 1163 }; 1164 1165 #define PROP(field, size, get_cb, set_cb, set_upper_cb) \ 1166 { \ 1167 offsetof(struct spdk_nvme_registers, field), \ 1168 size, \ 1169 #field, \ 1170 get_cb, set_cb, set_upper_cb \ 1171 } 1172 1173 static const struct nvmf_prop nvmf_props[] = { 1174 PROP(cap, 8, nvmf_prop_get_cap, NULL, NULL), 1175 PROP(vs, 4, nvmf_prop_get_vs, NULL, NULL), 1176 PROP(cc, 4, nvmf_prop_get_cc, nvmf_prop_set_cc, NULL), 1177 PROP(csts, 4, nvmf_prop_get_csts, NULL, NULL), 1178 PROP(aqa, 4, nvmf_prop_get_aqa, nvmf_prop_set_aqa, NULL), 1179 PROP(asq, 8, nvmf_prop_get_asq, nvmf_prop_set_asq_lower, nvmf_prop_set_asq_upper), 1180 PROP(acq, 8, nvmf_prop_get_acq, nvmf_prop_set_acq_lower, nvmf_prop_set_acq_upper), 1181 }; 1182 1183 static const struct nvmf_prop * 1184 find_prop(uint32_t ofst, uint8_t size) 1185 { 1186 size_t i; 1187 1188 for (i = 0; i < SPDK_COUNTOF(nvmf_props); i++) { 1189 const struct nvmf_prop *prop = &nvmf_props[i]; 1190 1191 if ((ofst >= prop->ofst) && (ofst + size <= prop->ofst + prop->size)) { 1192 return prop; 1193 } 1194 } 1195 1196 return NULL; 1197 } 1198 1199 static int 1200 nvmf_property_get(struct spdk_nvmf_request *req) 1201 { 1202 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1203 struct spdk_nvmf_fabric_prop_get_cmd *cmd = &req->cmd->prop_get_cmd; 1204 struct spdk_nvmf_fabric_prop_get_rsp *response = &req->rsp->prop_get_rsp; 1205 const struct nvmf_prop *prop; 1206 uint8_t size; 1207 1208 response->status.sc = 0; 1209 response->value.u64 = 0; 1210 1211 SPDK_DEBUGLOG(nvmf, "size %d, offset 0x%x\n", 1212 cmd->attrib.size, cmd->ofst); 1213 1214 switch (cmd->attrib.size) { 1215 case SPDK_NVMF_PROP_SIZE_4: 1216 size = 4; 1217 break; 1218 case SPDK_NVMF_PROP_SIZE_8: 1219 size = 8; 1220 break; 1221 default: 1222 SPDK_ERRLOG("Invalid size value %d\n", cmd->attrib.size); 1223 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1224 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1225 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1226 } 1227 1228 prop = find_prop(cmd->ofst, size); 1229 if (prop == NULL || prop->get_cb == NULL) { 1230 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1231 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1232 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1233 } 1234 1235 SPDK_DEBUGLOG(nvmf, "name: %s\n", prop->name); 1236 1237 response->value.u64 = prop->get_cb(ctrlr); 1238 1239 if (size != prop->size) { 1240 /* The size must be 4 and the prop->size is 8. Figure out which part of the property to read. */ 1241 assert(size == 4); 1242 assert(prop->size == 8); 1243 1244 if (cmd->ofst == prop->ofst) { 1245 /* Keep bottom 4 bytes only */ 1246 response->value.u64 &= 0xFFFFFFFF; 1247 } else { 1248 /* Keep top 4 bytes only */ 1249 response->value.u64 >>= 32; 1250 } 1251 } 1252 1253 SPDK_DEBUGLOG(nvmf, "response value: 0x%" PRIx64 "\n", response->value.u64); 1254 1255 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1256 } 1257 1258 static int 1259 nvmf_property_set(struct spdk_nvmf_request *req) 1260 { 1261 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1262 struct spdk_nvmf_fabric_prop_set_cmd *cmd = &req->cmd->prop_set_cmd; 1263 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1264 const struct nvmf_prop *prop; 1265 uint64_t value; 1266 uint8_t size; 1267 bool ret; 1268 1269 SPDK_DEBUGLOG(nvmf, "size %d, offset 0x%x, value 0x%" PRIx64 "\n", 1270 cmd->attrib.size, cmd->ofst, cmd->value.u64); 1271 1272 switch (cmd->attrib.size) { 1273 case SPDK_NVMF_PROP_SIZE_4: 1274 size = 4; 1275 break; 1276 case SPDK_NVMF_PROP_SIZE_8: 1277 size = 8; 1278 break; 1279 default: 1280 SPDK_ERRLOG("Invalid size value %d\n", cmd->attrib.size); 1281 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1282 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1283 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1284 } 1285 1286 prop = find_prop(cmd->ofst, size); 1287 if (prop == NULL || prop->set_cb == NULL) { 1288 SPDK_ERRLOG("Invalid offset 0x%x\n", cmd->ofst); 1289 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1290 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1291 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1292 } 1293 1294 SPDK_DEBUGLOG(nvmf, "name: %s\n", prop->name); 1295 1296 value = cmd->value.u64; 1297 1298 if (prop->size == 4) { 1299 ret = prop->set_cb(ctrlr, (uint32_t)value); 1300 } else if (size != prop->size) { 1301 /* The size must be 4 and the prop->size is 8. Figure out which part of the property to write. */ 1302 assert(size == 4); 1303 assert(prop->size == 8); 1304 1305 if (cmd->ofst == prop->ofst) { 1306 ret = prop->set_cb(ctrlr, (uint32_t)value); 1307 } else { 1308 ret = prop->set_upper_cb(ctrlr, (uint32_t)value); 1309 } 1310 } else { 1311 ret = prop->set_cb(ctrlr, (uint32_t)value); 1312 if (ret) { 1313 ret = prop->set_upper_cb(ctrlr, (uint32_t)(value >> 32)); 1314 } 1315 } 1316 1317 if (!ret) { 1318 SPDK_ERRLOG("prop set_cb failed\n"); 1319 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1320 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1321 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1322 } 1323 1324 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1325 } 1326 1327 static int 1328 nvmf_ctrlr_set_features_arbitration(struct spdk_nvmf_request *req) 1329 { 1330 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1331 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1332 1333 SPDK_DEBUGLOG(nvmf, "Set Features - Arbitration (cdw11 = 0x%0x)\n", cmd->cdw11); 1334 1335 ctrlr->feat.arbitration.raw = cmd->cdw11; 1336 ctrlr->feat.arbitration.bits.reserved = 0; 1337 1338 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1339 } 1340 1341 static int 1342 nvmf_ctrlr_set_features_power_management(struct spdk_nvmf_request *req) 1343 { 1344 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1345 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1346 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1347 1348 SPDK_DEBUGLOG(nvmf, "Set Features - Power Management (cdw11 = 0x%0x)\n", cmd->cdw11); 1349 1350 /* Only PS = 0 is allowed, since we report NPSS = 0 */ 1351 if (cmd->cdw11_bits.feat_power_management.bits.ps != 0) { 1352 SPDK_ERRLOG("Invalid power state %u\n", cmd->cdw11_bits.feat_power_management.bits.ps); 1353 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1354 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1355 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1356 } 1357 1358 ctrlr->feat.power_management.raw = cmd->cdw11; 1359 ctrlr->feat.power_management.bits.reserved = 0; 1360 1361 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1362 } 1363 1364 static bool 1365 temp_threshold_opts_valid(const union spdk_nvme_feat_temperature_threshold *opts) 1366 { 1367 /* 1368 * Valid TMPSEL values: 1369 * 0000b - 1000b: temperature sensors 1370 * 1111b: set all implemented temperature sensors 1371 */ 1372 if (opts->bits.tmpsel >= 9 && opts->bits.tmpsel != 15) { 1373 /* 1001b - 1110b: reserved */ 1374 SPDK_ERRLOG("Invalid TMPSEL %u\n", opts->bits.tmpsel); 1375 return false; 1376 } 1377 1378 /* 1379 * Valid THSEL values: 1380 * 00b: over temperature threshold 1381 * 01b: under temperature threshold 1382 */ 1383 if (opts->bits.thsel > 1) { 1384 /* 10b - 11b: reserved */ 1385 SPDK_ERRLOG("Invalid THSEL %u\n", opts->bits.thsel); 1386 return false; 1387 } 1388 1389 return true; 1390 } 1391 1392 static int 1393 nvmf_ctrlr_set_features_temperature_threshold(struct spdk_nvmf_request *req) 1394 { 1395 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1396 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1397 1398 SPDK_DEBUGLOG(nvmf, "Set Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11); 1399 1400 if (!temp_threshold_opts_valid(&cmd->cdw11_bits.feat_temp_threshold)) { 1401 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1402 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1403 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1404 } 1405 1406 /* TODO: no sensors implemented - ignore new values */ 1407 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1408 } 1409 1410 static int 1411 nvmf_ctrlr_get_features_temperature_threshold(struct spdk_nvmf_request *req) 1412 { 1413 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1414 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1415 1416 SPDK_DEBUGLOG(nvmf, "Get Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11); 1417 1418 if (!temp_threshold_opts_valid(&cmd->cdw11_bits.feat_temp_threshold)) { 1419 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1420 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1421 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1422 } 1423 1424 /* TODO: no sensors implemented - return 0 for all thresholds */ 1425 rsp->cdw0 = 0; 1426 1427 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1428 } 1429 1430 static int 1431 nvmf_ctrlr_set_features_error_recovery(struct spdk_nvmf_request *req) 1432 { 1433 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1434 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1435 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1436 1437 SPDK_DEBUGLOG(nvmf, "Set Features - Error Recovery (cdw11 = 0x%0x)\n", cmd->cdw11); 1438 1439 if (cmd->cdw11_bits.feat_error_recovery.bits.dulbe) { 1440 /* 1441 * Host is not allowed to set this bit, since we don't advertise it in 1442 * Identify Namespace. 1443 */ 1444 SPDK_ERRLOG("Host set unsupported DULBE bit\n"); 1445 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1446 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1447 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1448 } 1449 1450 ctrlr->feat.error_recovery.raw = cmd->cdw11; 1451 ctrlr->feat.error_recovery.bits.reserved = 0; 1452 1453 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1454 } 1455 1456 static int 1457 nvmf_ctrlr_set_features_volatile_write_cache(struct spdk_nvmf_request *req) 1458 { 1459 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1460 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1461 1462 SPDK_DEBUGLOG(nvmf, "Set Features - Volatile Write Cache (cdw11 = 0x%0x)\n", cmd->cdw11); 1463 1464 ctrlr->feat.volatile_write_cache.raw = cmd->cdw11; 1465 ctrlr->feat.volatile_write_cache.bits.reserved = 0; 1466 1467 SPDK_DEBUGLOG(nvmf, "Set Features - Volatile Write Cache %s\n", 1468 ctrlr->feat.volatile_write_cache.bits.wce ? "Enabled" : "Disabled"); 1469 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1470 } 1471 1472 static int 1473 nvmf_ctrlr_set_features_write_atomicity(struct spdk_nvmf_request *req) 1474 { 1475 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1476 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1477 1478 SPDK_DEBUGLOG(nvmf, "Set Features - Write Atomicity (cdw11 = 0x%0x)\n", cmd->cdw11); 1479 1480 ctrlr->feat.write_atomicity.raw = cmd->cdw11; 1481 ctrlr->feat.write_atomicity.bits.reserved = 0; 1482 1483 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1484 } 1485 1486 static int 1487 nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req) 1488 { 1489 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1490 1491 SPDK_ERRLOG("Set Features - Host Identifier not allowed\n"); 1492 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1493 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1494 } 1495 1496 static int 1497 nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req) 1498 { 1499 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1500 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1501 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1502 1503 SPDK_DEBUGLOG(nvmf, "Get Features - Host Identifier\n"); 1504 1505 if (!cmd->cdw11_bits.feat_host_identifier.bits.exhid) { 1506 /* NVMe over Fabrics requires EXHID=1 (128-bit/16-byte host ID) */ 1507 SPDK_ERRLOG("Get Features - Host Identifier with EXHID=0 not allowed\n"); 1508 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1509 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1510 } 1511 1512 if (req->data == NULL || req->length < sizeof(ctrlr->hostid)) { 1513 SPDK_ERRLOG("Invalid data buffer for Get Features - Host Identifier\n"); 1514 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1515 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1516 } 1517 1518 spdk_uuid_copy((struct spdk_uuid *)req->data, &ctrlr->hostid); 1519 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1520 } 1521 1522 static int 1523 nvmf_ctrlr_get_features_reservation_notification_mask(struct spdk_nvmf_request *req) 1524 { 1525 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1526 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1527 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1528 struct spdk_nvmf_ns *ns; 1529 1530 SPDK_DEBUGLOG(nvmf, "get Features - Reservation Notificaton Mask\n"); 1531 1532 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1533 SPDK_ERRLOG("get Features - Invalid Namespace ID\n"); 1534 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1535 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1536 } 1537 1538 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1539 if (ns == NULL) { 1540 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n"); 1541 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1542 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1543 } 1544 rsp->cdw0 = ns->mask; 1545 1546 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1547 } 1548 1549 static int 1550 nvmf_ctrlr_set_features_reservation_notification_mask(struct spdk_nvmf_request *req) 1551 { 1552 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1553 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1554 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1555 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1556 struct spdk_nvmf_ns *ns; 1557 1558 SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Notificaton Mask\n"); 1559 1560 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1561 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 1562 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 1563 ns->mask = cmd->cdw11; 1564 } 1565 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1566 } 1567 1568 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1569 if (ns == NULL) { 1570 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n"); 1571 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1572 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1573 } 1574 ns->mask = cmd->cdw11; 1575 1576 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1577 } 1578 1579 static int 1580 nvmf_ctrlr_get_features_reservation_persistence(struct spdk_nvmf_request *req) 1581 { 1582 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1583 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1584 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1585 struct spdk_nvmf_ns *ns; 1586 1587 SPDK_DEBUGLOG(nvmf, "Get Features - Reservation Persistence\n"); 1588 1589 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1590 /* NSID with SPDK_NVME_GLOBAL_NS_TAG (=0xffffffff) also included */ 1591 if (ns == NULL) { 1592 SPDK_ERRLOG("Get Features - Invalid Namespace ID\n"); 1593 response->status.sct = SPDK_NVME_SCT_GENERIC; 1594 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1595 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1596 } 1597 1598 response->cdw0 = ns->ptpl_activated; 1599 1600 response->status.sct = SPDK_NVME_SCT_GENERIC; 1601 response->status.sc = SPDK_NVME_SC_SUCCESS; 1602 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1603 } 1604 1605 static int 1606 nvmf_ctrlr_set_features_reservation_persistence(struct spdk_nvmf_request *req) 1607 { 1608 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1609 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1610 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1611 struct spdk_nvmf_ns *ns; 1612 bool ptpl; 1613 1614 SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Persistence\n"); 1615 1616 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 1617 ptpl = cmd->cdw11_bits.feat_rsv_persistence.bits.ptpl; 1618 1619 if (cmd->nsid != SPDK_NVME_GLOBAL_NS_TAG && ns && ns->ptpl_file) { 1620 ns->ptpl_activated = ptpl; 1621 } else if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1622 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns && ns->ptpl_file; 1623 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 1624 ns->ptpl_activated = ptpl; 1625 } 1626 } else { 1627 SPDK_ERRLOG("Set Features - Invalid Namespace ID or Reservation Configuration\n"); 1628 response->status.sct = SPDK_NVME_SCT_GENERIC; 1629 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1630 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1631 } 1632 1633 /* TODO: Feature not changeable for now */ 1634 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1635 response->status.sc = SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE; 1636 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1637 } 1638 1639 static int 1640 nvmf_ctrlr_set_features_host_behavior_support(struct spdk_nvmf_request *req) 1641 { 1642 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1643 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1644 struct spdk_nvme_host_behavior *host_behavior; 1645 1646 SPDK_DEBUGLOG(nvmf, "Set Features - Host Behavior Support\n"); 1647 if (req->iovcnt != 1) { 1648 SPDK_ERRLOG("Host Behavior Support invalid iovcnt: %d\n", req->iovcnt); 1649 response->status.sct = SPDK_NVME_SCT_GENERIC; 1650 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1651 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1652 } 1653 if (req->iov[0].iov_len != sizeof(struct spdk_nvme_host_behavior)) { 1654 SPDK_ERRLOG("Host Behavior Support invalid iov_len: %zd\n", req->iov[0].iov_len); 1655 response->status.sct = SPDK_NVME_SCT_GENERIC; 1656 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1657 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1658 } 1659 1660 host_behavior = (struct spdk_nvme_host_behavior *)req->iov[0].iov_base; 1661 if (host_behavior->acre == 0) { 1662 ctrlr->acre_enabled = false; 1663 } else if (host_behavior->acre == 1) { 1664 ctrlr->acre_enabled = true; 1665 } else { 1666 SPDK_ERRLOG("Host Behavior Support invalid acre: 0x%02x\n", host_behavior->acre); 1667 response->status.sct = SPDK_NVME_SCT_GENERIC; 1668 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1669 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1670 } 1671 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1672 } 1673 1674 static int 1675 nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req) 1676 { 1677 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1678 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1679 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1680 1681 SPDK_DEBUGLOG(nvmf, "Set Features - Keep Alive Timer (%u ms)\n", cmd->cdw11); 1682 1683 /* 1684 * if attempts to disable keep alive by setting kato to 0h 1685 * a status value of keep alive invalid shall be returned 1686 */ 1687 if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato == 0) { 1688 rsp->status.sc = SPDK_NVME_SC_KEEP_ALIVE_INVALID; 1689 } else if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato < MIN_KEEP_ALIVE_TIMEOUT_IN_MS) { 1690 ctrlr->feat.keep_alive_timer.bits.kato = MIN_KEEP_ALIVE_TIMEOUT_IN_MS; 1691 } else { 1692 /* round up to milliseconds */ 1693 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up( 1694 cmd->cdw11_bits.feat_keep_alive_timer.bits.kato, 1695 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) * 1696 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS; 1697 } 1698 1699 /* 1700 * if change the keep alive timeout value successfully 1701 * update the keep alive poller. 1702 */ 1703 if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato != 0) { 1704 if (ctrlr->keep_alive_poller != NULL) { 1705 spdk_poller_unregister(&ctrlr->keep_alive_poller); 1706 } 1707 ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr, 1708 ctrlr->feat.keep_alive_timer.bits.kato * 1000); 1709 } 1710 1711 SPDK_DEBUGLOG(nvmf, "Set Features - Keep Alive Timer set to %u ms\n", 1712 ctrlr->feat.keep_alive_timer.bits.kato); 1713 1714 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1715 } 1716 1717 static int 1718 nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req) 1719 { 1720 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1721 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1722 uint32_t count; 1723 1724 SPDK_DEBUGLOG(nvmf, "Set Features - Number of Queues, cdw11 0x%x\n", 1725 req->cmd->nvme_cmd.cdw11); 1726 1727 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 1728 /* verify that the controller is ready to process commands */ 1729 if (count > 1) { 1730 SPDK_DEBUGLOG(nvmf, "Queue pairs already active!\n"); 1731 rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1732 } else { 1733 /* 1734 * Ignore the value requested by the host - 1735 * always return the pre-configured value based on max_qpairs_allowed. 1736 */ 1737 rsp->cdw0 = ctrlr->feat.number_of_queues.raw; 1738 } 1739 1740 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1741 } 1742 1743 static int 1744 nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req) 1745 { 1746 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1747 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1748 1749 SPDK_DEBUGLOG(nvmf, "Set Features - Async Event Configuration, cdw11 0x%08x\n", 1750 cmd->cdw11); 1751 ctrlr->feat.async_event_configuration.raw = cmd->cdw11; 1752 ctrlr->feat.async_event_configuration.bits.reserved1 = 0; 1753 ctrlr->feat.async_event_configuration.bits.reserved2 = 0; 1754 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1755 } 1756 1757 static int 1758 nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req) 1759 { 1760 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1761 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1762 struct spdk_nvmf_subsystem_poll_group *sgroup; 1763 struct spdk_nvmf_async_event_completion *pending_event; 1764 1765 SPDK_DEBUGLOG(nvmf, "Async Event Request\n"); 1766 1767 /* AER cmd is an exception */ 1768 sgroup = &req->qpair->group->sgroups[ctrlr->subsys->id]; 1769 assert(sgroup != NULL); 1770 sgroup->mgmt_io_outstanding--; 1771 1772 /* Four asynchronous events are supported for now */ 1773 if (ctrlr->nr_aer_reqs >= NVMF_MAX_ASYNC_EVENTS) { 1774 SPDK_DEBUGLOG(nvmf, "AERL exceeded\n"); 1775 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1776 rsp->status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED; 1777 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1778 } 1779 1780 if (!STAILQ_EMPTY(&ctrlr->async_events)) { 1781 pending_event = STAILQ_FIRST(&ctrlr->async_events); 1782 rsp->cdw0 = pending_event->event.raw; 1783 STAILQ_REMOVE(&ctrlr->async_events, pending_event, spdk_nvmf_async_event_completion, link); 1784 free(pending_event); 1785 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1786 } 1787 1788 ctrlr->aer_req[ctrlr->nr_aer_reqs++] = req; 1789 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 1790 } 1791 1792 struct copy_iovs_ctx { 1793 struct iovec *iovs; 1794 int iovcnt; 1795 int cur_iov_idx; 1796 size_t cur_iov_offset; 1797 }; 1798 1799 static void 1800 _init_copy_iovs_ctx(struct copy_iovs_ctx *copy_ctx, struct iovec *iovs, int iovcnt) 1801 { 1802 int iov_idx = 0; 1803 struct iovec *iov; 1804 1805 copy_ctx->iovs = iovs; 1806 copy_ctx->iovcnt = iovcnt; 1807 copy_ctx->cur_iov_idx = 0; 1808 copy_ctx->cur_iov_offset = 0; 1809 1810 while (iov_idx < copy_ctx->iovcnt) { 1811 iov = ©_ctx->iovs[iov_idx]; 1812 memset(iov->iov_base, 0, iov->iov_len); 1813 iov_idx++; 1814 } 1815 } 1816 1817 static size_t 1818 _copy_buf_to_iovs(struct copy_iovs_ctx *copy_ctx, const void *buf, size_t buf_len) 1819 { 1820 size_t len, iov_remain_len, copied_len = 0; 1821 struct iovec *iov; 1822 1823 if (buf_len == 0) { 1824 return 0; 1825 } 1826 1827 while (copy_ctx->cur_iov_idx < copy_ctx->iovcnt) { 1828 iov = ©_ctx->iovs[copy_ctx->cur_iov_idx]; 1829 iov_remain_len = iov->iov_len - copy_ctx->cur_iov_offset; 1830 if (iov_remain_len == 0) { 1831 copy_ctx->cur_iov_idx++; 1832 copy_ctx->cur_iov_offset = 0; 1833 continue; 1834 } 1835 1836 len = spdk_min(iov_remain_len, buf_len - copied_len); 1837 memcpy((char *)iov->iov_base + copy_ctx->cur_iov_offset, 1838 (const char *)buf + copied_len, 1839 len); 1840 copied_len += len; 1841 copy_ctx->cur_iov_offset += len; 1842 1843 if (buf_len == copied_len) { 1844 return copied_len; 1845 } 1846 } 1847 1848 return copied_len; 1849 } 1850 1851 static void 1852 nvmf_get_firmware_slot_log_page(struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length) 1853 { 1854 struct spdk_nvme_firmware_page fw_page; 1855 size_t copy_len; 1856 struct copy_iovs_ctx copy_ctx; 1857 1858 _init_copy_iovs_ctx(©_ctx, iovs, iovcnt); 1859 1860 memset(&fw_page, 0, sizeof(fw_page)); 1861 fw_page.afi.active_slot = 1; 1862 fw_page.afi.next_reset_slot = 0; 1863 spdk_strcpy_pad(fw_page.revision[0], FW_VERSION, sizeof(fw_page.revision[0]), ' '); 1864 1865 if (offset < sizeof(fw_page)) { 1866 copy_len = spdk_min(sizeof(fw_page) - offset, length); 1867 if (copy_len > 0) { 1868 _copy_buf_to_iovs(©_ctx, (const char *)&fw_page + offset, copy_len); 1869 } 1870 } 1871 } 1872 1873 /* 1874 * Asynchronous Event Mask Bit 1875 */ 1876 enum spdk_nvme_async_event_mask_bit { 1877 /* Mask Namespace Change Notificaton */ 1878 SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT = 0, 1879 /* Mask Asymmetric Namespace Access Change Notification */ 1880 SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT = 1, 1881 /* Mask Discovery Log Change Notification */ 1882 SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT = 2, 1883 /* Mask Reservation Log Page Available Notification */ 1884 SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT = 3, 1885 1886 /* 4 - 63 Reserved */ 1887 }; 1888 1889 static inline void 1890 nvmf_ctrlr_unmask_aen(struct spdk_nvmf_ctrlr *ctrlr, 1891 enum spdk_nvme_async_event_mask_bit mask) 1892 { 1893 ctrlr->notice_aen_mask &= ~(1 << mask); 1894 } 1895 1896 static inline bool 1897 nvmf_ctrlr_mask_aen(struct spdk_nvmf_ctrlr *ctrlr, 1898 enum spdk_nvme_async_event_mask_bit mask) 1899 { 1900 if (ctrlr->notice_aen_mask & (1 << mask)) { 1901 return false; 1902 } else { 1903 ctrlr->notice_aen_mask |= (1 << mask); 1904 return true; 1905 } 1906 } 1907 1908 #define SPDK_NVMF_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 1909 sizeof(uint32_t)) 1910 static void 1911 nvmf_get_ana_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt, 1912 uint64_t offset, uint32_t length, uint32_t rae) 1913 { 1914 struct spdk_nvme_ana_page ana_hdr; 1915 char _ana_desc[SPDK_NVMF_ANA_DESC_SIZE]; 1916 struct spdk_nvme_ana_group_descriptor *ana_desc; 1917 size_t copy_len, copied_len; 1918 uint32_t num_ns = 0; 1919 struct spdk_nvmf_ns *ns; 1920 struct copy_iovs_ctx copy_ctx; 1921 1922 _init_copy_iovs_ctx(©_ctx, iovs, iovcnt); 1923 1924 if (length == 0) { 1925 return; 1926 } 1927 1928 if (offset >= sizeof(ana_hdr)) { 1929 offset -= sizeof(ana_hdr); 1930 } else { 1931 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL; 1932 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 1933 num_ns++; 1934 } 1935 1936 memset(&ana_hdr, 0, sizeof(ana_hdr)); 1937 1938 ana_hdr.num_ana_group_desc = num_ns; 1939 /* TODO: Support Change Count. */ 1940 ana_hdr.change_count = 0; 1941 1942 copy_len = spdk_min(sizeof(ana_hdr) - offset, length); 1943 copied_len = _copy_buf_to_iovs(©_ctx, (const char *)&ana_hdr + offset, copy_len); 1944 assert(copied_len == copy_len); 1945 length -= copied_len; 1946 offset = 0; 1947 } 1948 1949 if (length == 0) { 1950 return; 1951 } 1952 1953 ana_desc = (void *)_ana_desc; 1954 1955 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL; 1956 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 1957 if (offset >= SPDK_NVMF_ANA_DESC_SIZE) { 1958 offset -= SPDK_NVMF_ANA_DESC_SIZE; 1959 continue; 1960 } 1961 1962 memset(ana_desc, 0, SPDK_NVMF_ANA_DESC_SIZE); 1963 1964 ana_desc->ana_group_id = ns->nsid; 1965 ana_desc->num_of_nsid = 1; 1966 ana_desc->ana_state = ctrlr->listener->ana_state; 1967 ana_desc->nsid[0] = ns->nsid; 1968 /* TODO: Support Change Count. */ 1969 ana_desc->change_count = 0; 1970 1971 copy_len = spdk_min(SPDK_NVMF_ANA_DESC_SIZE - offset, length); 1972 copied_len = _copy_buf_to_iovs(©_ctx, (const char *)ana_desc + offset, copy_len); 1973 assert(copied_len == copy_len); 1974 length -= copied_len; 1975 offset = 0; 1976 1977 if (length == 0) { 1978 goto done; 1979 } 1980 } 1981 1982 done: 1983 if (!rae) { 1984 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT); 1985 } 1986 } 1987 1988 void 1989 nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 1990 { 1991 uint16_t max_changes = SPDK_COUNTOF(ctrlr->changed_ns_list.ns_list); 1992 uint16_t i; 1993 bool found = false; 1994 1995 for (i = 0; i < ctrlr->changed_ns_list_count; i++) { 1996 if (ctrlr->changed_ns_list.ns_list[i] == nsid) { 1997 /* nsid is already in the list */ 1998 found = true; 1999 break; 2000 } 2001 } 2002 2003 if (!found) { 2004 if (ctrlr->changed_ns_list_count == max_changes) { 2005 /* Out of space - set first entry to FFFFFFFFh and zero-fill the rest. */ 2006 ctrlr->changed_ns_list.ns_list[0] = 0xFFFFFFFFu; 2007 for (i = 1; i < max_changes; i++) { 2008 ctrlr->changed_ns_list.ns_list[i] = 0; 2009 } 2010 } else { 2011 ctrlr->changed_ns_list.ns_list[ctrlr->changed_ns_list_count++] = nsid; 2012 } 2013 } 2014 } 2015 2016 static void 2017 nvmf_get_changed_ns_list_log_page(struct spdk_nvmf_ctrlr *ctrlr, 2018 struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length, uint32_t rae) 2019 { 2020 size_t copy_length; 2021 struct copy_iovs_ctx copy_ctx; 2022 2023 _init_copy_iovs_ctx(©_ctx, iovs, iovcnt); 2024 2025 if (offset < sizeof(ctrlr->changed_ns_list)) { 2026 copy_length = spdk_min(length, sizeof(ctrlr->changed_ns_list) - offset); 2027 if (copy_length) { 2028 _copy_buf_to_iovs(©_ctx, (char *)&ctrlr->changed_ns_list + offset, copy_length); 2029 } 2030 } 2031 2032 /* Clear log page each time it is read */ 2033 ctrlr->changed_ns_list_count = 0; 2034 memset(&ctrlr->changed_ns_list, 0, sizeof(ctrlr->changed_ns_list)); 2035 2036 if (!rae) { 2037 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT); 2038 } 2039 } 2040 2041 /* The structure can be modified if we provide support for other commands in future */ 2042 static const struct spdk_nvme_cmds_and_effect_log_page g_cmds_and_effect_log_page = { 2043 .admin_cmds_supported = { 2044 /* CSUPP, LBCC, NCC, NIC, CCC, CSE */ 2045 /* Get Log Page */ 2046 [SPDK_NVME_OPC_GET_LOG_PAGE] = {1, 0, 0, 0, 0, 0, 0, 0}, 2047 /* Identify */ 2048 [SPDK_NVME_OPC_IDENTIFY] = {1, 0, 0, 0, 0, 0, 0, 0}, 2049 /* Abort */ 2050 [SPDK_NVME_OPC_ABORT] = {1, 0, 0, 0, 0, 0, 0, 0}, 2051 /* Set Features */ 2052 [SPDK_NVME_OPC_SET_FEATURES] = {1, 0, 0, 0, 0, 0, 0, 0}, 2053 /* Get Features */ 2054 [SPDK_NVME_OPC_GET_FEATURES] = {1, 0, 0, 0, 0, 0, 0, 0}, 2055 /* Async Event Request */ 2056 [SPDK_NVME_OPC_ASYNC_EVENT_REQUEST] = {1, 0, 0, 0, 0, 0, 0, 0}, 2057 /* Keep Alive */ 2058 [SPDK_NVME_OPC_KEEP_ALIVE] = {1, 0, 0, 0, 0, 0, 0, 0}, 2059 }, 2060 .io_cmds_supported = { 2061 /* FLUSH */ 2062 [SPDK_NVME_OPC_FLUSH] = {1, 1, 0, 0, 0, 0, 0, 0}, 2063 /* WRITE */ 2064 [SPDK_NVME_OPC_WRITE] = {1, 1, 0, 0, 0, 0, 0, 0}, 2065 /* READ */ 2066 [SPDK_NVME_OPC_READ] = {1, 0, 0, 0, 0, 0, 0, 0}, 2067 /* WRITE ZEROES */ 2068 [SPDK_NVME_OPC_WRITE_ZEROES] = {1, 1, 0, 0, 0, 0, 0, 0}, 2069 /* DATASET MANAGEMENT */ 2070 [SPDK_NVME_OPC_DATASET_MANAGEMENT] = {1, 1, 0, 0, 0, 0, 0, 0}, 2071 /* COMPARE */ 2072 [SPDK_NVME_OPC_COMPARE] = {1, 0, 0, 0, 0, 0, 0, 0}, 2073 }, 2074 }; 2075 2076 static void 2077 nvmf_get_cmds_and_effects_log_page(struct iovec *iovs, int iovcnt, 2078 uint64_t offset, uint32_t length) 2079 { 2080 uint32_t page_size = sizeof(struct spdk_nvme_cmds_and_effect_log_page); 2081 size_t copy_len = 0; 2082 struct copy_iovs_ctx copy_ctx; 2083 2084 _init_copy_iovs_ctx(©_ctx, iovs, iovcnt); 2085 2086 if (offset < page_size) { 2087 copy_len = spdk_min(page_size - offset, length); 2088 _copy_buf_to_iovs(©_ctx, (char *)(&g_cmds_and_effect_log_page) + offset, copy_len); 2089 } 2090 } 2091 2092 static void 2093 nvmf_get_reservation_notification_log_page(struct spdk_nvmf_ctrlr *ctrlr, 2094 struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length, uint32_t rae) 2095 { 2096 uint32_t unit_log_len, avail_log_len, next_pos, copy_len; 2097 struct spdk_nvmf_reservation_log *log, *log_tmp; 2098 struct copy_iovs_ctx copy_ctx; 2099 2100 _init_copy_iovs_ctx(©_ctx, iovs, iovcnt); 2101 2102 unit_log_len = sizeof(struct spdk_nvme_reservation_notification_log); 2103 /* No available log, return zeroed log pages */ 2104 if (!ctrlr->num_avail_log_pages) { 2105 return; 2106 } 2107 2108 avail_log_len = ctrlr->num_avail_log_pages * unit_log_len; 2109 if (offset >= avail_log_len) { 2110 return; 2111 } 2112 2113 next_pos = 0; 2114 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) { 2115 TAILQ_REMOVE(&ctrlr->log_head, log, link); 2116 ctrlr->num_avail_log_pages--; 2117 2118 next_pos += unit_log_len; 2119 if (next_pos > offset) { 2120 copy_len = spdk_min(next_pos - offset, length); 2121 _copy_buf_to_iovs(©_ctx, &log->log, copy_len); 2122 length -= copy_len; 2123 offset += copy_len; 2124 } 2125 free(log); 2126 2127 if (length == 0) { 2128 break; 2129 } 2130 } 2131 2132 if (!rae) { 2133 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT); 2134 } 2135 return; 2136 } 2137 2138 static int 2139 nvmf_ctrlr_get_log_page(struct spdk_nvmf_request *req) 2140 { 2141 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2142 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2143 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2144 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2145 uint64_t offset, len; 2146 uint32_t rae, numdl, numdu; 2147 uint8_t lid; 2148 2149 if (req->data == NULL) { 2150 SPDK_ERRLOG("get log command with no buffer\n"); 2151 response->status.sct = SPDK_NVME_SCT_GENERIC; 2152 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2153 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2154 } 2155 2156 offset = (uint64_t)cmd->cdw12 | ((uint64_t)cmd->cdw13 << 32); 2157 if (offset & 3) { 2158 SPDK_ERRLOG("Invalid log page offset 0x%" PRIx64 "\n", offset); 2159 response->status.sct = SPDK_NVME_SCT_GENERIC; 2160 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2161 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2162 } 2163 2164 rae = cmd->cdw10_bits.get_log_page.rae; 2165 numdl = cmd->cdw10_bits.get_log_page.numdl; 2166 numdu = cmd->cdw11_bits.get_log_page.numdu; 2167 len = ((numdu << 16) + numdl + (uint64_t)1) * 4; 2168 if (len > req->length) { 2169 SPDK_ERRLOG("Get log page: len (%" PRIu64 ") > buf size (%u)\n", 2170 len, req->length); 2171 response->status.sct = SPDK_NVME_SCT_GENERIC; 2172 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2173 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2174 } 2175 2176 lid = cmd->cdw10_bits.get_log_page.lid; 2177 SPDK_DEBUGLOG(nvmf, "Get log page: LID=0x%02X offset=0x%" PRIx64 " len=0x%" PRIx64 " rae=%u\n", 2178 lid, offset, len, rae); 2179 2180 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2181 switch (lid) { 2182 case SPDK_NVME_LOG_DISCOVERY: 2183 nvmf_get_discovery_log_page(subsystem->tgt, ctrlr->hostnqn, req->iov, req->iovcnt, offset, 2184 len); 2185 if (!rae) { 2186 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT); 2187 } 2188 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2189 default: 2190 goto invalid_log_page; 2191 } 2192 } else { 2193 switch (lid) { 2194 case SPDK_NVME_LOG_ERROR: 2195 case SPDK_NVME_LOG_HEALTH_INFORMATION: 2196 /* TODO: actually fill out log page data */ 2197 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2198 case SPDK_NVME_LOG_FIRMWARE_SLOT: 2199 nvmf_get_firmware_slot_log_page(req->iov, req->iovcnt, offset, len); 2200 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2201 case SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS: 2202 if (subsystem->flags.ana_reporting) { 2203 nvmf_get_ana_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae); 2204 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2205 } else { 2206 goto invalid_log_page; 2207 } 2208 case SPDK_NVME_LOG_COMMAND_EFFECTS_LOG: 2209 nvmf_get_cmds_and_effects_log_page(req->iov, req->iovcnt, offset, len); 2210 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2211 case SPDK_NVME_LOG_CHANGED_NS_LIST: 2212 nvmf_get_changed_ns_list_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae); 2213 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2214 case SPDK_NVME_LOG_RESERVATION_NOTIFICATION: 2215 nvmf_get_reservation_notification_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae); 2216 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2217 default: 2218 goto invalid_log_page; 2219 } 2220 } 2221 2222 invalid_log_page: 2223 SPDK_DEBUGLOG(nvmf, "Unsupported Get Log Page 0x%02X\n", lid); 2224 response->status.sct = SPDK_NVME_SCT_GENERIC; 2225 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2226 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2227 } 2228 2229 int 2230 spdk_nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr *ctrlr, 2231 struct spdk_nvme_cmd *cmd, 2232 struct spdk_nvme_cpl *rsp, 2233 struct spdk_nvme_ns_data *nsdata) 2234 { 2235 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2236 struct spdk_nvmf_ns *ns; 2237 uint32_t max_num_blocks; 2238 2239 if (cmd->nsid == 0 || cmd->nsid > subsystem->max_nsid) { 2240 SPDK_ERRLOG("Identify Namespace for invalid NSID %u\n", cmd->nsid); 2241 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2242 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 2243 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2244 } 2245 2246 ns = _nvmf_subsystem_get_ns(subsystem, cmd->nsid); 2247 if (ns == NULL || ns->bdev == NULL) { 2248 /* 2249 * Inactive namespaces should return a zero filled data structure. 2250 * The data buffer is already zeroed by nvmf_ctrlr_process_admin_cmd(), 2251 * so we can just return early here. 2252 */ 2253 SPDK_DEBUGLOG(nvmf, "Identify Namespace for inactive NSID %u\n", cmd->nsid); 2254 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2255 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 2256 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2257 } 2258 2259 nvmf_bdev_ctrlr_identify_ns(ns, nsdata, ctrlr->dif_insert_or_strip); 2260 2261 /* Due to bug in the Linux kernel NVMe driver we have to set noiob no larger than mdts */ 2262 max_num_blocks = ctrlr->admin_qpair->transport->opts.max_io_size / 2263 (1U << nsdata->lbaf[nsdata->flbas.format].lbads); 2264 if (nsdata->noiob > max_num_blocks) { 2265 nsdata->noiob = max_num_blocks; 2266 } 2267 2268 if (subsystem->flags.ana_reporting) { 2269 /* ANA group ID matches NSID. */ 2270 nsdata->anagrpid = ns->nsid; 2271 2272 if (ctrlr->listener->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE || 2273 ctrlr->listener->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE) { 2274 nsdata->nuse = 0; 2275 } 2276 } 2277 2278 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2279 } 2280 2281 static void 2282 nvmf_ctrlr_populate_oacs(struct spdk_nvmf_ctrlr *ctrlr, 2283 struct spdk_nvme_ctrlr_data *cdata) 2284 { 2285 cdata->oacs.virtualization_management = 2286 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT].hdlr != NULL; 2287 cdata->oacs.nvme_mi = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NVME_MI_SEND].hdlr != NULL 2288 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NVME_MI_RECEIVE].hdlr != NULL; 2289 cdata->oacs.directives = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DIRECTIVE_SEND].hdlr != NULL 2290 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DIRECTIVE_RECEIVE].hdlr != NULL; 2291 cdata->oacs.device_self_test = 2292 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DEVICE_SELF_TEST].hdlr != NULL; 2293 cdata->oacs.ns_manage = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NS_MANAGEMENT].hdlr != NULL 2294 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NS_ATTACHMENT].hdlr != NULL; 2295 cdata->oacs.firmware = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD].hdlr != 2296 NULL 2297 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FIRMWARE_COMMIT].hdlr != NULL; 2298 cdata->oacs.format = 2299 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FORMAT_NVM].hdlr != NULL; 2300 cdata->oacs.security = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_SECURITY_SEND].hdlr != NULL 2301 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_SECURITY_RECEIVE].hdlr != NULL; 2302 cdata->oacs.get_lba_status = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_GET_LBA_STATUS].hdlr != 2303 NULL; 2304 } 2305 2306 int 2307 spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata) 2308 { 2309 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2310 struct spdk_nvmf_transport *transport = ctrlr->admin_qpair->transport; 2311 2312 /* 2313 * Common fields for discovery and NVM subsystems 2314 */ 2315 spdk_strcpy_pad(cdata->fr, FW_VERSION, sizeof(cdata->fr), ' '); 2316 assert((transport->opts.max_io_size % 4096) == 0); 2317 cdata->mdts = spdk_u32log2(transport->opts.max_io_size / 4096); 2318 cdata->cntlid = ctrlr->cntlid; 2319 cdata->ver = ctrlr->vcprop.vs; 2320 cdata->aerl = NVMF_MAX_ASYNC_EVENTS - 1; 2321 cdata->lpa.edlp = 1; 2322 cdata->elpe = 127; 2323 cdata->maxcmd = transport->opts.max_queue_depth; 2324 cdata->sgls = ctrlr->cdata.sgls; 2325 cdata->fuses.compare_and_write = 1; 2326 cdata->acwu = 1; 2327 if (subsystem->flags.ana_reporting) { 2328 cdata->mnan = subsystem->max_nsid; 2329 } 2330 spdk_strcpy_pad(cdata->subnqn, subsystem->subnqn, sizeof(cdata->subnqn), '\0'); 2331 2332 SPDK_DEBUGLOG(nvmf, "ctrlr data: maxcmd 0x%x\n", cdata->maxcmd); 2333 SPDK_DEBUGLOG(nvmf, "sgls data: 0x%x\n", from_le32(&cdata->sgls)); 2334 2335 2336 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2337 /* 2338 * NVM Discovery subsystem fields 2339 */ 2340 cdata->oaes.discovery_log_change_notices = 1; 2341 } else { 2342 /* 2343 * NVM subsystem fields (reserved for discovery subsystems) 2344 */ 2345 spdk_strcpy_pad(cdata->mn, spdk_nvmf_subsystem_get_mn(subsystem), sizeof(cdata->mn), ' '); 2346 spdk_strcpy_pad(cdata->sn, spdk_nvmf_subsystem_get_sn(subsystem), sizeof(cdata->sn), ' '); 2347 cdata->kas = ctrlr->cdata.kas; 2348 2349 cdata->rab = 6; 2350 cdata->cmic.multi_port = 1; 2351 cdata->cmic.multi_ctrlr = 1; 2352 if (subsystem->flags.ana_reporting) { 2353 /* Asymmetric Namespace Access Reporting is supported. */ 2354 cdata->cmic.ana_reporting = 1; 2355 } 2356 cdata->oaes.ns_attribute_notices = 1; 2357 if (subsystem->flags.ana_reporting) { 2358 cdata->oaes.ana_change_notices = 1; 2359 } 2360 cdata->ctratt.host_id_exhid_supported = 1; 2361 /* TODO: Concurrent execution of multiple abort commands. */ 2362 cdata->acl = 0; 2363 cdata->frmw.slot1_ro = 1; 2364 cdata->frmw.num_slots = 1; 2365 2366 cdata->lpa.celp = 1; /* Command Effects log page supported */ 2367 2368 cdata->sqes.min = 6; 2369 cdata->sqes.max = 6; 2370 cdata->cqes.min = 4; 2371 cdata->cqes.max = 4; 2372 cdata->nn = subsystem->max_nsid; 2373 cdata->vwc.present = 1; 2374 cdata->vwc.flush_broadcast = SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED; 2375 2376 cdata->nvmf_specific = ctrlr->cdata.nvmf_specific; 2377 2378 cdata->oncs.dsm = nvmf_ctrlr_dsm_supported(ctrlr); 2379 cdata->oncs.write_zeroes = nvmf_ctrlr_write_zeroes_supported(ctrlr); 2380 cdata->oncs.reservations = 1; 2381 if (subsystem->flags.ana_reporting) { 2382 cdata->anatt = ANA_TRANSITION_TIME_IN_SEC; 2383 /* ANA Change state is not used, and ANA Persistent Loss state 2384 * is not supported for now. 2385 */ 2386 cdata->anacap.ana_optimized_state = 1; 2387 cdata->anacap.ana_non_optimized_state = 1; 2388 cdata->anacap.ana_inaccessible_state = 1; 2389 /* ANAGRPID does not change while namespace is attached to controller */ 2390 cdata->anacap.no_change_anagrpid = 1; 2391 cdata->anagrpmax = subsystem->max_nsid; 2392 cdata->nanagrpid = subsystem->max_nsid; 2393 } 2394 2395 nvmf_ctrlr_populate_oacs(ctrlr, cdata); 2396 2397 assert(subsystem->tgt != NULL); 2398 cdata->crdt[0] = subsystem->tgt->crdt[0]; 2399 cdata->crdt[1] = subsystem->tgt->crdt[1]; 2400 cdata->crdt[2] = subsystem->tgt->crdt[2]; 2401 2402 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ioccsz 0x%x\n", 2403 cdata->nvmf_specific.ioccsz); 2404 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: iorcsz 0x%x\n", 2405 cdata->nvmf_specific.iorcsz); 2406 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: icdoff 0x%x\n", 2407 cdata->nvmf_specific.icdoff); 2408 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ctrattr 0x%x\n", 2409 *(uint8_t *)&cdata->nvmf_specific.ctrattr); 2410 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: msdbd 0x%x\n", 2411 cdata->nvmf_specific.msdbd); 2412 } 2413 2414 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2415 } 2416 2417 static int 2418 nvmf_ctrlr_identify_active_ns_list(struct spdk_nvmf_subsystem *subsystem, 2419 struct spdk_nvme_cmd *cmd, 2420 struct spdk_nvme_cpl *rsp, 2421 struct spdk_nvme_ns_list *ns_list) 2422 { 2423 struct spdk_nvmf_ns *ns; 2424 uint32_t count = 0; 2425 2426 if (cmd->nsid >= 0xfffffffeUL) { 2427 SPDK_ERRLOG("Identify Active Namespace List with invalid NSID %u\n", cmd->nsid); 2428 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 2429 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2430 } 2431 2432 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 2433 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 2434 if (ns->opts.nsid <= cmd->nsid) { 2435 continue; 2436 } 2437 2438 ns_list->ns_list[count++] = ns->opts.nsid; 2439 if (count == SPDK_COUNTOF(ns_list->ns_list)) { 2440 break; 2441 } 2442 } 2443 2444 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2445 } 2446 2447 static void 2448 _add_ns_id_desc(void **buf_ptr, size_t *buf_remain, 2449 enum spdk_nvme_nidt type, 2450 const void *data, size_t data_size) 2451 { 2452 struct spdk_nvme_ns_id_desc *desc; 2453 size_t desc_size = sizeof(*desc) + data_size; 2454 2455 /* 2456 * These should never fail in practice, since all valid NS ID descriptors 2457 * should be defined so that they fit in the available 4096-byte buffer. 2458 */ 2459 assert(data_size > 0); 2460 assert(data_size <= UINT8_MAX); 2461 assert(desc_size < *buf_remain); 2462 if (data_size == 0 || data_size > UINT8_MAX || desc_size > *buf_remain) { 2463 return; 2464 } 2465 2466 desc = *buf_ptr; 2467 desc->nidt = type; 2468 desc->nidl = data_size; 2469 memcpy(desc->nid, data, data_size); 2470 2471 *buf_ptr += desc_size; 2472 *buf_remain -= desc_size; 2473 } 2474 2475 static int 2476 nvmf_ctrlr_identify_ns_id_descriptor_list( 2477 struct spdk_nvmf_subsystem *subsystem, 2478 struct spdk_nvme_cmd *cmd, 2479 struct spdk_nvme_cpl *rsp, 2480 void *id_desc_list, size_t id_desc_list_size) 2481 { 2482 struct spdk_nvmf_ns *ns; 2483 size_t buf_remain = id_desc_list_size; 2484 void *buf_ptr = id_desc_list; 2485 2486 ns = _nvmf_subsystem_get_ns(subsystem, cmd->nsid); 2487 if (ns == NULL || ns->bdev == NULL) { 2488 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2489 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 2490 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2491 } 2492 2493 #define ADD_ID_DESC(type, data, size) \ 2494 do { \ 2495 if (!spdk_mem_all_zero(data, size)) { \ 2496 _add_ns_id_desc(&buf_ptr, &buf_remain, type, data, size); \ 2497 } \ 2498 } while (0) 2499 2500 ADD_ID_DESC(SPDK_NVME_NIDT_EUI64, ns->opts.eui64, sizeof(ns->opts.eui64)); 2501 ADD_ID_DESC(SPDK_NVME_NIDT_NGUID, ns->opts.nguid, sizeof(ns->opts.nguid)); 2502 ADD_ID_DESC(SPDK_NVME_NIDT_UUID, &ns->opts.uuid, sizeof(ns->opts.uuid)); 2503 2504 /* 2505 * The list is automatically 0-terminated because controller to host buffers in 2506 * admin commands always get zeroed in nvmf_ctrlr_process_admin_cmd(). 2507 */ 2508 2509 #undef ADD_ID_DESC 2510 2511 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2512 } 2513 2514 static int 2515 nvmf_ctrlr_identify(struct spdk_nvmf_request *req) 2516 { 2517 uint8_t cns; 2518 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2519 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2520 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2521 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2522 2523 if (req->data == NULL || req->length < 4096) { 2524 SPDK_ERRLOG("identify command with invalid buffer\n"); 2525 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2526 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2527 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2528 } 2529 2530 cns = cmd->cdw10_bits.identify.cns; 2531 2532 if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY && 2533 cns != SPDK_NVME_IDENTIFY_CTRLR) { 2534 /* Discovery controllers only support Identify Controller */ 2535 goto invalid_cns; 2536 } 2537 2538 switch (cns) { 2539 case SPDK_NVME_IDENTIFY_NS: 2540 return spdk_nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, req->data); 2541 case SPDK_NVME_IDENTIFY_CTRLR: 2542 return spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, req->data); 2543 case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST: 2544 return nvmf_ctrlr_identify_active_ns_list(subsystem, cmd, rsp, req->data); 2545 case SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST: 2546 return nvmf_ctrlr_identify_ns_id_descriptor_list(subsystem, cmd, rsp, req->data, req->length); 2547 default: 2548 goto invalid_cns; 2549 } 2550 2551 invalid_cns: 2552 SPDK_ERRLOG("Identify command with unsupported CNS 0x%02x\n", cns); 2553 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2554 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2555 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2556 } 2557 2558 static bool 2559 nvmf_qpair_abort_aer(struct spdk_nvmf_qpair *qpair, uint16_t cid) 2560 { 2561 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 2562 struct spdk_nvmf_request *req; 2563 int i; 2564 2565 if (!nvmf_qpair_is_admin_queue(qpair)) { 2566 return false; 2567 } 2568 2569 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 2570 if (ctrlr->aer_req[i]->cmd->nvme_cmd.cid == cid) { 2571 SPDK_DEBUGLOG(nvmf, "Aborting AER request\n"); 2572 req = ctrlr->aer_req[i]; 2573 ctrlr->aer_req[i] = NULL; 2574 ctrlr->nr_aer_reqs--; 2575 2576 /* Move the last req to the aborting position for making aer_reqs 2577 * in continuous 2578 */ 2579 if (i < ctrlr->nr_aer_reqs) { 2580 ctrlr->aer_req[i] = ctrlr->aer_req[ctrlr->nr_aer_reqs]; 2581 ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL; 2582 } 2583 2584 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2585 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 2586 _nvmf_request_complete(req); 2587 return true; 2588 } 2589 } 2590 2591 return false; 2592 } 2593 2594 static void 2595 nvmf_qpair_abort_request(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req) 2596 { 2597 uint16_t cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid; 2598 2599 if (nvmf_qpair_abort_aer(qpair, cid)) { 2600 SPDK_DEBUGLOG(nvmf, "abort ctrlr=%p sqid=%u cid=%u successful\n", 2601 qpair->ctrlr, qpair->qid, cid); 2602 req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command successfully aborted */ 2603 2604 spdk_nvmf_request_complete(req); 2605 return; 2606 } 2607 2608 nvmf_transport_qpair_abort_request(qpair, req); 2609 } 2610 2611 static void 2612 nvmf_ctrlr_abort_done(struct spdk_io_channel_iter *i, int status) 2613 { 2614 struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i); 2615 2616 if (status == 0) { 2617 /* There was no qpair whose ID matches SQID of the abort command. 2618 * Hence call _nvmf_request_complete() here. 2619 */ 2620 _nvmf_request_complete(req); 2621 } 2622 } 2623 2624 static void 2625 nvmf_ctrlr_abort_on_pg(struct spdk_io_channel_iter *i) 2626 { 2627 struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i); 2628 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 2629 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 2630 uint16_t sqid = req->cmd->nvme_cmd.cdw10_bits.abort.sqid; 2631 struct spdk_nvmf_qpair *qpair; 2632 2633 TAILQ_FOREACH(qpair, &group->qpairs, link) { 2634 if (qpair->ctrlr == req->qpair->ctrlr && qpair->qid == sqid) { 2635 /* Found the qpair */ 2636 2637 nvmf_qpair_abort_request(qpair, req); 2638 2639 /* Return -1 for the status so the iteration across threads stops. */ 2640 spdk_for_each_channel_continue(i, -1); 2641 return; 2642 } 2643 } 2644 2645 spdk_for_each_channel_continue(i, 0); 2646 } 2647 2648 static int 2649 nvmf_ctrlr_abort(struct spdk_nvmf_request *req) 2650 { 2651 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2652 2653 rsp->cdw0 = 1U; /* Command not aborted */ 2654 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2655 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 2656 2657 /* Send a message to each poll group, searching for this ctrlr, sqid, and command. */ 2658 spdk_for_each_channel(req->qpair->ctrlr->subsys->tgt, 2659 nvmf_ctrlr_abort_on_pg, 2660 req, 2661 nvmf_ctrlr_abort_done 2662 ); 2663 2664 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2665 } 2666 2667 int 2668 nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req) 2669 { 2670 struct spdk_nvmf_request *req_to_abort = req->req_to_abort; 2671 struct spdk_bdev *bdev; 2672 struct spdk_bdev_desc *desc; 2673 struct spdk_io_channel *ch; 2674 int rc; 2675 2676 assert(req_to_abort != NULL); 2677 2678 if (g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_ABORT].hdlr && 2679 nvmf_qpair_is_admin_queue(req_to_abort->qpair)) { 2680 return g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_ABORT].hdlr(req); 2681 } 2682 2683 rc = spdk_nvmf_request_get_bdev(req_to_abort->cmd->nvme_cmd.nsid, req_to_abort, 2684 &bdev, &desc, &ch); 2685 if (rc != 0) { 2686 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2687 } 2688 2689 return spdk_nvmf_bdev_ctrlr_abort_cmd(bdev, desc, ch, req, req_to_abort); 2690 } 2691 2692 static int 2693 get_features_generic(struct spdk_nvmf_request *req, uint32_t cdw0) 2694 { 2695 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2696 2697 rsp->cdw0 = cdw0; 2698 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2699 } 2700 2701 /* we have to use the typedef in the function declaration to appease astyle. */ 2702 typedef enum spdk_nvme_path_status_code spdk_nvme_path_status_code_t; 2703 2704 static spdk_nvme_path_status_code_t 2705 _nvme_ana_state_to_path_status(enum spdk_nvme_ana_state ana_state) 2706 { 2707 switch (ana_state) { 2708 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 2709 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 2710 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 2711 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS; 2712 case SPDK_NVME_ANA_CHANGE_STATE: 2713 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_TRANSITION; 2714 default: 2715 return SPDK_NVME_SC_INTERNAL_PATH_ERROR; 2716 } 2717 } 2718 2719 static int 2720 nvmf_ctrlr_get_features(struct spdk_nvmf_request *req) 2721 { 2722 uint8_t feature; 2723 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2724 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2725 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2726 enum spdk_nvme_ana_state ana_state; 2727 2728 feature = cmd->cdw10_bits.get_features.fid; 2729 2730 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2731 /* 2732 * Features supported by Discovery controller 2733 */ 2734 switch (feature) { 2735 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 2736 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw); 2737 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 2738 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw); 2739 default: 2740 SPDK_DEBUGLOG(nvmf, "Get Features command with unsupported feature ID 0x%02x\n", feature); 2741 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2742 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2743 } 2744 } 2745 /* 2746 * Process Get Features command for non-discovery controller 2747 */ 2748 ana_state = ctrlr->listener->ana_state; 2749 switch (ana_state) { 2750 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 2751 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 2752 case SPDK_NVME_ANA_CHANGE_STATE: 2753 switch (feature) { 2754 case SPDK_NVME_FEAT_ERROR_RECOVERY: 2755 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 2756 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 2757 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 2758 response->status.sct = SPDK_NVME_SCT_PATH; 2759 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 2760 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2761 default: 2762 break; 2763 } 2764 break; 2765 default: 2766 break; 2767 } 2768 2769 switch (feature) { 2770 case SPDK_NVME_FEAT_ARBITRATION: 2771 return get_features_generic(req, ctrlr->feat.arbitration.raw); 2772 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 2773 return get_features_generic(req, ctrlr->feat.power_management.raw); 2774 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 2775 return nvmf_ctrlr_get_features_temperature_threshold(req); 2776 case SPDK_NVME_FEAT_ERROR_RECOVERY: 2777 return get_features_generic(req, ctrlr->feat.error_recovery.raw); 2778 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 2779 return get_features_generic(req, ctrlr->feat.volatile_write_cache.raw); 2780 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 2781 return get_features_generic(req, ctrlr->feat.number_of_queues.raw); 2782 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 2783 return get_features_generic(req, ctrlr->feat.write_atomicity.raw); 2784 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 2785 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw); 2786 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 2787 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw); 2788 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 2789 return nvmf_ctrlr_get_features_host_identifier(req); 2790 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 2791 return nvmf_ctrlr_get_features_reservation_notification_mask(req); 2792 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 2793 return nvmf_ctrlr_get_features_reservation_persistence(req); 2794 default: 2795 SPDK_ERRLOG("Get Features command with unsupported feature ID 0x%02x\n", feature); 2796 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2797 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2798 } 2799 } 2800 2801 static int 2802 nvmf_ctrlr_set_features(struct spdk_nvmf_request *req) 2803 { 2804 uint8_t feature, save; 2805 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2806 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2807 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2808 enum spdk_nvme_ana_state ana_state; 2809 /* 2810 * Features are not saveable by the controller as indicated by 2811 * ONCS field of the Identify Controller data. 2812 * */ 2813 save = cmd->cdw10_bits.set_features.sv; 2814 if (save) { 2815 response->status.sc = SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE; 2816 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2817 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2818 } 2819 2820 feature = cmd->cdw10_bits.set_features.fid; 2821 2822 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2823 /* 2824 * Features supported by Discovery controller 2825 */ 2826 switch (feature) { 2827 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 2828 return nvmf_ctrlr_set_features_keep_alive_timer(req); 2829 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 2830 return nvmf_ctrlr_set_features_async_event_configuration(req); 2831 default: 2832 SPDK_ERRLOG("Set Features command with unsupported feature ID 0x%02x\n", feature); 2833 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2834 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2835 } 2836 } 2837 /* 2838 * Process Set Features command for non-discovery controller 2839 */ 2840 ana_state = ctrlr->listener->ana_state; 2841 switch (ana_state) { 2842 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 2843 case SPDK_NVME_ANA_CHANGE_STATE: 2844 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 2845 response->status.sct = SPDK_NVME_SCT_PATH; 2846 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 2847 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2848 } else { 2849 switch (feature) { 2850 case SPDK_NVME_FEAT_ERROR_RECOVERY: 2851 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 2852 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 2853 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 2854 response->status.sct = SPDK_NVME_SCT_PATH; 2855 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 2856 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2857 default: 2858 break; 2859 } 2860 } 2861 break; 2862 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 2863 response->status.sct = SPDK_NVME_SCT_PATH; 2864 response->status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS; 2865 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2866 default: 2867 break; 2868 } 2869 2870 switch (feature) { 2871 case SPDK_NVME_FEAT_ARBITRATION: 2872 return nvmf_ctrlr_set_features_arbitration(req); 2873 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 2874 return nvmf_ctrlr_set_features_power_management(req); 2875 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 2876 return nvmf_ctrlr_set_features_temperature_threshold(req); 2877 case SPDK_NVME_FEAT_ERROR_RECOVERY: 2878 return nvmf_ctrlr_set_features_error_recovery(req); 2879 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 2880 return nvmf_ctrlr_set_features_volatile_write_cache(req); 2881 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 2882 return nvmf_ctrlr_set_features_number_of_queues(req); 2883 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 2884 return nvmf_ctrlr_set_features_write_atomicity(req); 2885 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 2886 return nvmf_ctrlr_set_features_async_event_configuration(req); 2887 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 2888 return nvmf_ctrlr_set_features_keep_alive_timer(req); 2889 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 2890 return nvmf_ctrlr_set_features_host_identifier(req); 2891 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 2892 return nvmf_ctrlr_set_features_reservation_notification_mask(req); 2893 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 2894 return nvmf_ctrlr_set_features_reservation_persistence(req); 2895 case SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT: 2896 return nvmf_ctrlr_set_features_host_behavior_support(req); 2897 default: 2898 SPDK_ERRLOG("Set Features command with unsupported feature ID 0x%02x\n", feature); 2899 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2900 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2901 } 2902 } 2903 2904 static int 2905 nvmf_ctrlr_keep_alive(struct spdk_nvmf_request *req) 2906 { 2907 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2908 2909 SPDK_DEBUGLOG(nvmf, "Keep Alive\n"); 2910 /* 2911 * To handle keep alive just clear or reset the 2912 * ctrlr based keep alive duration counter. 2913 * When added, a separate timer based process 2914 * will monitor if the time since last recorded 2915 * keep alive has exceeded the max duration and 2916 * take appropriate action. 2917 */ 2918 ctrlr->last_keep_alive_tick = spdk_get_ticks(); 2919 2920 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2921 } 2922 2923 int 2924 nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req) 2925 { 2926 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2927 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2928 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2929 int rc; 2930 2931 if (ctrlr == NULL) { 2932 SPDK_ERRLOG("Admin command sent before CONNECT\n"); 2933 response->status.sct = SPDK_NVME_SCT_GENERIC; 2934 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2935 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2936 } 2937 2938 if (ctrlr->vcprop.cc.bits.en != 1) { 2939 SPDK_ERRLOG("Admin command sent to disabled controller\n"); 2940 response->status.sct = SPDK_NVME_SCT_GENERIC; 2941 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2942 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2943 } 2944 2945 if (req->data && spdk_nvme_opc_get_data_transfer(cmd->opc) == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 2946 memset(req->data, 0, req->length); 2947 } 2948 2949 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) { 2950 /* Discovery controllers only support these admin OPS. */ 2951 switch (cmd->opc) { 2952 case SPDK_NVME_OPC_IDENTIFY: 2953 case SPDK_NVME_OPC_GET_LOG_PAGE: 2954 case SPDK_NVME_OPC_KEEP_ALIVE: 2955 case SPDK_NVME_OPC_SET_FEATURES: 2956 case SPDK_NVME_OPC_GET_FEATURES: 2957 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: 2958 break; 2959 default: 2960 goto invalid_opcode; 2961 } 2962 } 2963 2964 /* Call a custom adm cmd handler if set. Aborts are handled in a different path (see nvmf_passthru_admin_cmd) */ 2965 if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr && cmd->opc != SPDK_NVME_OPC_ABORT) { 2966 rc = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr(req); 2967 if (rc >= SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 2968 /* The handler took care of this commmand */ 2969 return rc; 2970 } 2971 } 2972 2973 switch (cmd->opc) { 2974 case SPDK_NVME_OPC_GET_LOG_PAGE: 2975 return nvmf_ctrlr_get_log_page(req); 2976 case SPDK_NVME_OPC_IDENTIFY: 2977 return nvmf_ctrlr_identify(req); 2978 case SPDK_NVME_OPC_ABORT: 2979 return nvmf_ctrlr_abort(req); 2980 case SPDK_NVME_OPC_GET_FEATURES: 2981 return nvmf_ctrlr_get_features(req); 2982 case SPDK_NVME_OPC_SET_FEATURES: 2983 return nvmf_ctrlr_set_features(req); 2984 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: 2985 return nvmf_ctrlr_async_event_request(req); 2986 case SPDK_NVME_OPC_KEEP_ALIVE: 2987 return nvmf_ctrlr_keep_alive(req); 2988 2989 case SPDK_NVME_OPC_CREATE_IO_SQ: 2990 case SPDK_NVME_OPC_CREATE_IO_CQ: 2991 case SPDK_NVME_OPC_DELETE_IO_SQ: 2992 case SPDK_NVME_OPC_DELETE_IO_CQ: 2993 /* Create and Delete I/O CQ/SQ not allowed in NVMe-oF */ 2994 goto invalid_opcode; 2995 2996 default: 2997 goto invalid_opcode; 2998 } 2999 3000 invalid_opcode: 3001 SPDK_ERRLOG("Unsupported admin opcode 0x%x\n", cmd->opc); 3002 response->status.sct = SPDK_NVME_SCT_GENERIC; 3003 response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3004 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3005 } 3006 3007 static int 3008 nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req) 3009 { 3010 struct spdk_nvmf_qpair *qpair = req->qpair; 3011 struct spdk_nvmf_capsule_cmd *cap_hdr; 3012 3013 cap_hdr = &req->cmd->nvmf_cmd; 3014 3015 if (qpair->ctrlr == NULL) { 3016 /* No ctrlr established yet; the only valid command is Connect */ 3017 if (cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) { 3018 return nvmf_ctrlr_cmd_connect(req); 3019 } else { 3020 SPDK_DEBUGLOG(nvmf, "Got fctype 0x%x, expected Connect\n", 3021 cap_hdr->fctype); 3022 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3023 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 3024 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3025 } 3026 } else if (nvmf_qpair_is_admin_queue(qpair)) { 3027 /* 3028 * Controller session is established, and this is an admin queue. 3029 * Disallow Connect and allow other fabrics commands. 3030 */ 3031 switch (cap_hdr->fctype) { 3032 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET: 3033 return nvmf_property_set(req); 3034 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET: 3035 return nvmf_property_get(req); 3036 default: 3037 SPDK_DEBUGLOG(nvmf, "unknown fctype 0x%02x\n", 3038 cap_hdr->fctype); 3039 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3040 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3041 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3042 } 3043 } else { 3044 /* Controller session is established, and this is an I/O queue */ 3045 /* For now, no I/O-specific Fabrics commands are implemented (other than Connect) */ 3046 SPDK_DEBUGLOG(nvmf, "Unexpected I/O fctype 0x%x\n", cap_hdr->fctype); 3047 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3048 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3049 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3050 } 3051 } 3052 3053 static inline int 3054 nvmf_ctrlr_async_event_notification(struct spdk_nvmf_ctrlr *ctrlr, 3055 union spdk_nvme_async_event_completion *event) 3056 { 3057 struct spdk_nvmf_request *req; 3058 struct spdk_nvme_cpl *rsp; 3059 3060 assert(ctrlr->nr_aer_reqs > 0); 3061 3062 req = ctrlr->aer_req[--ctrlr->nr_aer_reqs]; 3063 rsp = &req->rsp->nvme_cpl; 3064 3065 rsp->cdw0 = event->raw; 3066 3067 _nvmf_request_complete(req); 3068 ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL; 3069 3070 return 0; 3071 } 3072 3073 static inline void 3074 nvmf_ctrlr_queue_pending_async_event(struct spdk_nvmf_ctrlr *ctrlr, 3075 union spdk_nvme_async_event_completion *event) 3076 { 3077 struct spdk_nvmf_async_event_completion *nvmf_event; 3078 3079 nvmf_event = calloc(1, sizeof(*nvmf_event)); 3080 if (!nvmf_event) { 3081 SPDK_ERRLOG("Alloc nvmf event failed, ignore the event\n"); 3082 return; 3083 } 3084 nvmf_event->event.raw = event->raw; 3085 STAILQ_INSERT_TAIL(&ctrlr->async_events, nvmf_event, link); 3086 } 3087 3088 int 3089 nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr) 3090 { 3091 union spdk_nvme_async_event_completion event = {0}; 3092 3093 /* Users may disable the event notification */ 3094 if (!ctrlr->feat.async_event_configuration.bits.ns_attr_notice) { 3095 return 0; 3096 } 3097 3098 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT)) { 3099 return 0; 3100 } 3101 3102 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 3103 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 3104 event.bits.log_page_identifier = SPDK_NVME_LOG_CHANGED_NS_LIST; 3105 3106 /* If there is no outstanding AER request, queue the event. Then 3107 * if an AER is later submitted, this event can be sent as a 3108 * response. 3109 */ 3110 if (ctrlr->nr_aer_reqs == 0) { 3111 nvmf_ctrlr_queue_pending_async_event(ctrlr, &event); 3112 return 0; 3113 } 3114 3115 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 3116 } 3117 3118 int 3119 nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr) 3120 { 3121 union spdk_nvme_async_event_completion event = {0}; 3122 3123 /* Users may disable the event notification */ 3124 if (!ctrlr->feat.async_event_configuration.bits.ana_change_notice) { 3125 return 0; 3126 } 3127 3128 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT)) { 3129 return 0; 3130 } 3131 3132 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 3133 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 3134 event.bits.log_page_identifier = SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS; 3135 3136 /* If there is no outstanding AER request, queue the event. Then 3137 * if an AER is later submitted, this event can be sent as a 3138 * response. 3139 */ 3140 if (ctrlr->nr_aer_reqs == 0) { 3141 nvmf_ctrlr_queue_pending_async_event(ctrlr, &event); 3142 return 0; 3143 } 3144 3145 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 3146 } 3147 3148 void 3149 nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr) 3150 { 3151 union spdk_nvme_async_event_completion event = {0}; 3152 3153 if (!ctrlr->num_avail_log_pages) { 3154 return; 3155 } 3156 3157 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT)) { 3158 return; 3159 } 3160 3161 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_IO; 3162 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL; 3163 event.bits.log_page_identifier = SPDK_NVME_LOG_RESERVATION_NOTIFICATION; 3164 3165 /* If there is no outstanding AER request, queue the event. Then 3166 * if an AER is later submitted, this event can be sent as a 3167 * response. 3168 */ 3169 if (ctrlr->nr_aer_reqs == 0) { 3170 nvmf_ctrlr_queue_pending_async_event(ctrlr, &event); 3171 return; 3172 } 3173 3174 nvmf_ctrlr_async_event_notification(ctrlr, &event); 3175 } 3176 3177 int 3178 nvmf_ctrlr_async_event_discovery_log_change_notice(struct spdk_nvmf_ctrlr *ctrlr) 3179 { 3180 union spdk_nvme_async_event_completion event = {0}; 3181 3182 /* Users may disable the event notification manually or 3183 * it may not be enabled due to keep alive timeout 3184 * not being set in connect command to discovery controller. 3185 */ 3186 if (!ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice) { 3187 return 0; 3188 } 3189 3190 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT)) { 3191 return 0; 3192 } 3193 3194 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 3195 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE; 3196 event.bits.log_page_identifier = SPDK_NVME_LOG_DISCOVERY; 3197 3198 /* If there is no outstanding AER request, queue the event. Then 3199 * if an AER is later submitted, this event can be sent as a 3200 * response. 3201 */ 3202 if (ctrlr->nr_aer_reqs == 0) { 3203 nvmf_ctrlr_queue_pending_async_event(ctrlr, &event); 3204 return 0; 3205 } 3206 3207 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 3208 } 3209 3210 void 3211 nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair) 3212 { 3213 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 3214 int i; 3215 3216 if (!nvmf_qpair_is_admin_queue(qpair)) { 3217 return; 3218 } 3219 3220 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 3221 spdk_nvmf_request_free(ctrlr->aer_req[i]); 3222 ctrlr->aer_req[i] = NULL; 3223 } 3224 3225 ctrlr->nr_aer_reqs = 0; 3226 } 3227 3228 void 3229 nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr) 3230 { 3231 struct spdk_nvmf_request *req; 3232 int i; 3233 3234 if (!ctrlr->nr_aer_reqs) { 3235 return; 3236 } 3237 3238 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 3239 req = ctrlr->aer_req[i]; 3240 3241 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3242 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 3243 _nvmf_request_complete(req); 3244 3245 ctrlr->aer_req[i] = NULL; 3246 } 3247 3248 ctrlr->nr_aer_reqs = 0; 3249 } 3250 3251 static void 3252 _nvmf_ctrlr_add_reservation_log(void *ctx) 3253 { 3254 struct spdk_nvmf_reservation_log *log = (struct spdk_nvmf_reservation_log *)ctx; 3255 struct spdk_nvmf_ctrlr *ctrlr = log->ctrlr; 3256 3257 ctrlr->log_page_count++; 3258 3259 /* Maximum number of queued log pages is 255 */ 3260 if (ctrlr->num_avail_log_pages == 0xff) { 3261 struct spdk_nvmf_reservation_log *entry; 3262 entry = TAILQ_LAST(&ctrlr->log_head, log_page_head); 3263 entry->log.log_page_count = ctrlr->log_page_count; 3264 free(log); 3265 return; 3266 } 3267 3268 log->log.log_page_count = ctrlr->log_page_count; 3269 log->log.num_avail_log_pages = ctrlr->num_avail_log_pages++; 3270 TAILQ_INSERT_TAIL(&ctrlr->log_head, log, link); 3271 3272 nvmf_ctrlr_async_event_reservation_notification(ctrlr); 3273 } 3274 3275 void 3276 nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 3277 struct spdk_nvmf_ns *ns, 3278 enum spdk_nvme_reservation_notification_log_page_type type) 3279 { 3280 struct spdk_nvmf_reservation_log *log; 3281 3282 switch (type) { 3283 case SPDK_NVME_RESERVATION_LOG_PAGE_EMPTY: 3284 return; 3285 case SPDK_NVME_REGISTRATION_PREEMPTED: 3286 if (ns->mask & SPDK_NVME_REGISTRATION_PREEMPTED_MASK) { 3287 return; 3288 } 3289 break; 3290 case SPDK_NVME_RESERVATION_RELEASED: 3291 if (ns->mask & SPDK_NVME_RESERVATION_RELEASED_MASK) { 3292 return; 3293 } 3294 break; 3295 case SPDK_NVME_RESERVATION_PREEMPTED: 3296 if (ns->mask & SPDK_NVME_RESERVATION_PREEMPTED_MASK) { 3297 return; 3298 } 3299 break; 3300 default: 3301 return; 3302 } 3303 3304 log = calloc(1, sizeof(*log)); 3305 if (!log) { 3306 SPDK_ERRLOG("Alloc log page failed, ignore the log\n"); 3307 return; 3308 } 3309 log->ctrlr = ctrlr; 3310 log->log.type = type; 3311 log->log.nsid = ns->nsid; 3312 3313 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_reservation_log, log); 3314 } 3315 3316 /* Check from subsystem poll group's namespace information data structure */ 3317 static bool 3318 nvmf_ns_info_ctrlr_is_registrant(struct spdk_nvmf_subsystem_pg_ns_info *ns_info, 3319 struct spdk_nvmf_ctrlr *ctrlr) 3320 { 3321 uint32_t i; 3322 3323 for (i = 0; i < SPDK_NVMF_MAX_NUM_REGISTRANTS; i++) { 3324 if (!spdk_uuid_compare(&ns_info->reg_hostid[i], &ctrlr->hostid)) { 3325 return true; 3326 } 3327 } 3328 3329 return false; 3330 } 3331 3332 /* 3333 * Check the NVMe command is permitted or not for current controller(Host). 3334 */ 3335 static int 3336 nvmf_ns_reservation_request_check(struct spdk_nvmf_subsystem_pg_ns_info *ns_info, 3337 struct spdk_nvmf_ctrlr *ctrlr, 3338 struct spdk_nvmf_request *req) 3339 { 3340 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3341 enum spdk_nvme_reservation_type rtype = ns_info->rtype; 3342 uint8_t status = SPDK_NVME_SC_SUCCESS; 3343 uint8_t racqa; 3344 bool is_registrant; 3345 3346 /* No valid reservation */ 3347 if (!rtype) { 3348 return 0; 3349 } 3350 3351 is_registrant = nvmf_ns_info_ctrlr_is_registrant(ns_info, ctrlr); 3352 /* All registrants type and current ctrlr is a valid registrant */ 3353 if ((rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 3354 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && is_registrant) { 3355 return 0; 3356 } else if (!spdk_uuid_compare(&ns_info->holder_id, &ctrlr->hostid)) { 3357 return 0; 3358 } 3359 3360 /* Non-holder for current controller */ 3361 switch (cmd->opc) { 3362 case SPDK_NVME_OPC_READ: 3363 case SPDK_NVME_OPC_COMPARE: 3364 if (rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 3365 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3366 goto exit; 3367 } 3368 if ((rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY || 3369 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && !is_registrant) { 3370 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3371 } 3372 break; 3373 case SPDK_NVME_OPC_FLUSH: 3374 case SPDK_NVME_OPC_WRITE: 3375 case SPDK_NVME_OPC_WRITE_UNCORRECTABLE: 3376 case SPDK_NVME_OPC_WRITE_ZEROES: 3377 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 3378 if (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE || 3379 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 3380 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3381 goto exit; 3382 } 3383 if (!is_registrant) { 3384 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3385 } 3386 break; 3387 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 3388 racqa = cmd->cdw10_bits.resv_acquire.racqa; 3389 if (racqa == SPDK_NVME_RESERVE_ACQUIRE) { 3390 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3391 goto exit; 3392 } 3393 if (!is_registrant) { 3394 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3395 } 3396 break; 3397 case SPDK_NVME_OPC_RESERVATION_RELEASE: 3398 if (!is_registrant) { 3399 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3400 } 3401 break; 3402 default: 3403 break; 3404 } 3405 3406 exit: 3407 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3408 req->rsp->nvme_cpl.status.sc = status; 3409 if (status == SPDK_NVME_SC_RESERVATION_CONFLICT) { 3410 return -EPERM; 3411 } 3412 3413 return 0; 3414 } 3415 3416 static int 3417 nvmf_ctrlr_process_io_fused_cmd(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 3418 struct spdk_bdev_desc *desc, struct spdk_io_channel *ch) 3419 { 3420 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3421 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 3422 struct spdk_nvmf_request *first_fused_req = req->qpair->first_fused_req; 3423 int rc; 3424 3425 if (cmd->fuse == SPDK_NVME_CMD_FUSE_FIRST) { 3426 /* first fused operation (should be compare) */ 3427 if (first_fused_req != NULL) { 3428 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 3429 3430 SPDK_ERRLOG("Wrong sequence of fused operations\n"); 3431 3432 /* abort req->qpair->first_fused_request and continue with new fused command */ 3433 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 3434 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 3435 _nvmf_request_complete(first_fused_req); 3436 } else if (cmd->opc != SPDK_NVME_OPC_COMPARE) { 3437 SPDK_ERRLOG("Wrong op code of fused operations\n"); 3438 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3439 rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3440 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3441 } 3442 3443 req->qpair->first_fused_req = req; 3444 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3445 } else if (cmd->fuse == SPDK_NVME_CMD_FUSE_SECOND) { 3446 /* second fused operation (should be write) */ 3447 if (first_fused_req == NULL) { 3448 SPDK_ERRLOG("Wrong sequence of fused operations\n"); 3449 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3450 rsp->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 3451 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3452 } else if (cmd->opc != SPDK_NVME_OPC_WRITE) { 3453 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 3454 3455 SPDK_ERRLOG("Wrong op code of fused operations\n"); 3456 3457 /* abort req->qpair->first_fused_request and fail current command */ 3458 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 3459 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 3460 _nvmf_request_complete(first_fused_req); 3461 3462 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3463 rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3464 req->qpair->first_fused_req = NULL; 3465 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3466 } 3467 3468 /* save request of first command to generate response later */ 3469 req->first_fused_req = first_fused_req; 3470 req->qpair->first_fused_req = NULL; 3471 } else { 3472 SPDK_ERRLOG("Invalid fused command fuse field.\n"); 3473 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3474 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3475 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3476 } 3477 3478 rc = nvmf_bdev_ctrlr_compare_and_write_cmd(bdev, desc, ch, req->first_fused_req, req); 3479 3480 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 3481 if (spdk_nvme_cpl_is_error(rsp)) { 3482 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 3483 3484 fused_response->status = rsp->status; 3485 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3486 rsp->status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED; 3487 /* Complete first of fused commands. Second will be completed by upper layer */ 3488 _nvmf_request_complete(first_fused_req); 3489 req->first_fused_req = NULL; 3490 } 3491 } 3492 3493 return rc; 3494 } 3495 3496 int 3497 nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req) 3498 { 3499 uint32_t nsid; 3500 struct spdk_nvmf_ns *ns; 3501 struct spdk_bdev *bdev; 3502 struct spdk_bdev_desc *desc; 3503 struct spdk_io_channel *ch; 3504 struct spdk_nvmf_poll_group *group = req->qpair->group; 3505 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3506 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3507 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 3508 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 3509 enum spdk_nvme_ana_state ana_state; 3510 3511 /* pre-set response details for this command */ 3512 response->status.sc = SPDK_NVME_SC_SUCCESS; 3513 nsid = cmd->nsid; 3514 3515 if (spdk_unlikely(ctrlr == NULL)) { 3516 SPDK_ERRLOG("I/O command sent before CONNECT\n"); 3517 response->status.sct = SPDK_NVME_SCT_GENERIC; 3518 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 3519 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3520 } 3521 3522 if (spdk_unlikely(ctrlr->vcprop.cc.bits.en != 1)) { 3523 SPDK_ERRLOG("I/O command sent to disabled controller\n"); 3524 response->status.sct = SPDK_NVME_SCT_GENERIC; 3525 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 3526 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3527 } 3528 3529 /* It will be lower overhead to check if ANA state is optimized or 3530 * non-optimized. 3531 */ 3532 ana_state = ctrlr->listener->ana_state; 3533 if (spdk_unlikely(ana_state != SPDK_NVME_ANA_OPTIMIZED_STATE && 3534 ana_state != SPDK_NVME_ANA_NON_OPTIMIZED_STATE)) { 3535 SPDK_DEBUGLOG(nvmf, "Fail I/O command due to ANA state %d\n", 3536 ana_state); 3537 response->status.sct = SPDK_NVME_SCT_PATH; 3538 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 3539 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3540 } 3541 3542 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 3543 if (ns == NULL || ns->bdev == NULL) { 3544 SPDK_DEBUGLOG(nvmf, "Unsuccessful query for nsid %u\n", cmd->nsid); 3545 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 3546 response->status.dnr = 1; 3547 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3548 } 3549 3550 /* scan-build falsely reporting dereference of null pointer */ 3551 assert(group != NULL && group->sgroups != NULL); 3552 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1]; 3553 if (nvmf_ns_reservation_request_check(ns_info, ctrlr, req)) { 3554 SPDK_DEBUGLOG(nvmf, "Reservation Conflict for nsid %u, opcode %u\n", 3555 cmd->nsid, cmd->opc); 3556 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3557 } 3558 3559 bdev = ns->bdev; 3560 desc = ns->desc; 3561 ch = ns_info->channel; 3562 3563 if (spdk_unlikely(cmd->fuse & SPDK_NVME_CMD_FUSE_MASK)) { 3564 return nvmf_ctrlr_process_io_fused_cmd(req, bdev, desc, ch); 3565 } else if (spdk_unlikely(req->qpair->first_fused_req != NULL)) { 3566 struct spdk_nvme_cpl *fused_response = &req->qpair->first_fused_req->rsp->nvme_cpl; 3567 3568 SPDK_ERRLOG("Expected second of fused commands - failing first of fused commands\n"); 3569 3570 /* abort req->qpair->first_fused_request and continue with new command */ 3571 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 3572 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 3573 _nvmf_request_complete(req->qpair->first_fused_req); 3574 req->qpair->first_fused_req = NULL; 3575 } 3576 3577 switch (cmd->opc) { 3578 case SPDK_NVME_OPC_READ: 3579 return nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req); 3580 case SPDK_NVME_OPC_WRITE: 3581 return nvmf_bdev_ctrlr_write_cmd(bdev, desc, ch, req); 3582 case SPDK_NVME_OPC_COMPARE: 3583 return nvmf_bdev_ctrlr_compare_cmd(bdev, desc, ch, req); 3584 case SPDK_NVME_OPC_WRITE_ZEROES: 3585 return nvmf_bdev_ctrlr_write_zeroes_cmd(bdev, desc, ch, req); 3586 case SPDK_NVME_OPC_FLUSH: 3587 return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req); 3588 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 3589 return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req); 3590 case SPDK_NVME_OPC_RESERVATION_REGISTER: 3591 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 3592 case SPDK_NVME_OPC_RESERVATION_RELEASE: 3593 case SPDK_NVME_OPC_RESERVATION_REPORT: 3594 spdk_thread_send_msg(ctrlr->subsys->thread, nvmf_ns_reservation_request, req); 3595 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3596 default: 3597 return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req); 3598 } 3599 } 3600 3601 static void 3602 nvmf_qpair_request_cleanup(struct spdk_nvmf_qpair *qpair) 3603 { 3604 if (qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING) { 3605 assert(qpair->state_cb != NULL); 3606 3607 if (TAILQ_EMPTY(&qpair->outstanding)) { 3608 qpair->state_cb(qpair->state_cb_arg, 0); 3609 } 3610 } 3611 } 3612 3613 int 3614 spdk_nvmf_request_free(struct spdk_nvmf_request *req) 3615 { 3616 struct spdk_nvmf_qpair *qpair = req->qpair; 3617 3618 TAILQ_REMOVE(&qpair->outstanding, req, link); 3619 if (nvmf_transport_req_free(req)) { 3620 SPDK_ERRLOG("Unable to free transport level request resources.\n"); 3621 } 3622 3623 nvmf_qpair_request_cleanup(qpair); 3624 3625 return 0; 3626 } 3627 3628 static void 3629 _nvmf_request_complete(void *ctx) 3630 { 3631 struct spdk_nvmf_request *req = ctx; 3632 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 3633 struct spdk_nvmf_qpair *qpair; 3634 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 3635 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 3636 bool is_aer = false; 3637 uint32_t nsid; 3638 bool paused; 3639 uint8_t opcode; 3640 3641 rsp->sqid = 0; 3642 rsp->status.p = 0; 3643 rsp->cid = req->cmd->nvme_cmd.cid; 3644 nsid = req->cmd->nvme_cmd.nsid; 3645 opcode = req->cmd->nvmf_cmd.opcode; 3646 3647 qpair = req->qpair; 3648 if (qpair->ctrlr) { 3649 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 3650 assert(sgroup != NULL); 3651 is_aer = req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 3652 3653 /* 3654 * Set the crd value. 3655 * If the the IO has any error, and dnr (DoNotRetry) is not 1, 3656 * and ACRE is enabled, we will set the crd to 1 to select the first CRDT. 3657 */ 3658 if (spdk_nvme_cpl_is_error(rsp) && 3659 rsp->status.dnr == 0 && 3660 qpair->ctrlr->acre_enabled) { 3661 rsp->status.crd = 1; 3662 } 3663 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) { 3664 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 3665 } 3666 3667 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) { 3668 spdk_nvme_print_completion(qpair->qid, rsp); 3669 } 3670 3671 TAILQ_REMOVE(&qpair->outstanding, req, link); 3672 if (nvmf_transport_req_complete(req)) { 3673 SPDK_ERRLOG("Transport request completion error!\n"); 3674 } 3675 3676 /* AER cmd is an exception */ 3677 if (sgroup && !is_aer) { 3678 if (spdk_unlikely(opcode == SPDK_NVME_OPC_FABRIC || 3679 nvmf_qpair_is_admin_queue(qpair))) { 3680 assert(sgroup->mgmt_io_outstanding > 0); 3681 sgroup->mgmt_io_outstanding--; 3682 } else { 3683 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 3684 if (spdk_likely(nsid - 1 < sgroup->num_ns)) { 3685 sgroup->ns_info[nsid - 1].io_outstanding--; 3686 } 3687 } 3688 3689 if (spdk_unlikely(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSING && 3690 sgroup->mgmt_io_outstanding == 0)) { 3691 paused = true; 3692 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 3693 ns_info = &sgroup->ns_info[nsid]; 3694 3695 if (ns_info->state == SPDK_NVMF_SUBSYSTEM_PAUSING && 3696 ns_info->io_outstanding > 0) { 3697 paused = false; 3698 break; 3699 } 3700 } 3701 3702 if (paused) { 3703 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 3704 sgroup->cb_fn(sgroup->cb_arg, 0); 3705 sgroup->cb_fn = NULL; 3706 sgroup->cb_arg = NULL; 3707 } 3708 } 3709 3710 } 3711 3712 nvmf_qpair_request_cleanup(qpair); 3713 } 3714 3715 int 3716 spdk_nvmf_request_complete(struct spdk_nvmf_request *req) 3717 { 3718 struct spdk_nvmf_qpair *qpair = req->qpair; 3719 3720 if (spdk_likely(qpair->group->thread == spdk_get_thread())) { 3721 _nvmf_request_complete(req); 3722 } else { 3723 spdk_thread_send_msg(qpair->group->thread, 3724 _nvmf_request_complete, req); 3725 } 3726 3727 return 0; 3728 } 3729 3730 void 3731 spdk_nvmf_request_exec_fabrics(struct spdk_nvmf_request *req) 3732 { 3733 struct spdk_nvmf_qpair *qpair = req->qpair; 3734 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 3735 enum spdk_nvmf_request_exec_status status; 3736 3737 if (qpair->ctrlr) { 3738 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 3739 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) { 3740 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 3741 } 3742 3743 assert(sgroup != NULL); 3744 sgroup->mgmt_io_outstanding++; 3745 3746 /* Place the request on the outstanding list so we can keep track of it */ 3747 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 3748 3749 assert(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC); 3750 status = nvmf_ctrlr_process_fabrics_cmd(req); 3751 3752 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 3753 _nvmf_request_complete(req); 3754 } 3755 } 3756 3757 void 3758 spdk_nvmf_request_exec(struct spdk_nvmf_request *req) 3759 { 3760 struct spdk_nvmf_qpair *qpair = req->qpair; 3761 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 3762 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 3763 enum spdk_nvmf_request_exec_status status; 3764 uint32_t nsid; 3765 3766 if (qpair->ctrlr) { 3767 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 3768 assert(sgroup != NULL); 3769 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) { 3770 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 3771 } 3772 3773 /* Check if the subsystem is paused (if there is a subsystem) */ 3774 if (sgroup != NULL) { 3775 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC || 3776 nvmf_qpair_is_admin_queue(qpair))) { 3777 if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) { 3778 /* The subsystem is not currently active. Queue this request. */ 3779 TAILQ_INSERT_TAIL(&sgroup->queued, req, link); 3780 return; 3781 } 3782 sgroup->mgmt_io_outstanding++; 3783 } else { 3784 nsid = req->cmd->nvme_cmd.nsid; 3785 3786 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 3787 if (spdk_unlikely(nsid - 1 >= sgroup->num_ns)) { 3788 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3789 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 3790 req->rsp->nvme_cpl.status.dnr = 1; 3791 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 3792 _nvmf_request_complete(req); 3793 return; 3794 } 3795 3796 ns_info = &sgroup->ns_info[nsid - 1]; 3797 if (ns_info->channel == NULL) { 3798 /* This can can happen if host sends I/O to a namespace that is 3799 * in the process of being added, but before the full addition 3800 * process is complete. Report invalid namespace in that case. 3801 */ 3802 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3803 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 3804 req->rsp->nvme_cpl.status.dnr = 1; 3805 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 3806 ns_info->io_outstanding++; 3807 _nvmf_request_complete(req); 3808 return; 3809 } 3810 3811 if (ns_info->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) { 3812 /* The namespace is not currently active. Queue this request. */ 3813 TAILQ_INSERT_TAIL(&sgroup->queued, req, link); 3814 return; 3815 } 3816 ns_info->io_outstanding++; 3817 } 3818 } 3819 3820 if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) { 3821 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3822 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 3823 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 3824 _nvmf_request_complete(req); 3825 return; 3826 } 3827 3828 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) { 3829 spdk_nvme_print_command(qpair->qid, &req->cmd->nvme_cmd); 3830 } 3831 3832 /* Place the request on the outstanding list so we can keep track of it */ 3833 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 3834 3835 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) { 3836 status = nvmf_ctrlr_process_fabrics_cmd(req); 3837 } else if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair))) { 3838 status = nvmf_ctrlr_process_admin_cmd(req); 3839 } else { 3840 status = nvmf_ctrlr_process_io_cmd(req); 3841 } 3842 3843 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 3844 _nvmf_request_complete(req); 3845 } 3846 } 3847 3848 static bool 3849 nvmf_ctrlr_get_dif_ctx(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd, 3850 struct spdk_dif_ctx *dif_ctx) 3851 { 3852 struct spdk_nvmf_ns *ns; 3853 struct spdk_bdev *bdev; 3854 3855 if (ctrlr == NULL || cmd == NULL) { 3856 return false; 3857 } 3858 3859 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, cmd->nsid); 3860 if (ns == NULL || ns->bdev == NULL) { 3861 return false; 3862 } 3863 3864 bdev = ns->bdev; 3865 3866 switch (cmd->opc) { 3867 case SPDK_NVME_OPC_READ: 3868 case SPDK_NVME_OPC_WRITE: 3869 case SPDK_NVME_OPC_COMPARE: 3870 return nvmf_bdev_ctrlr_get_dif_ctx(bdev, cmd, dif_ctx); 3871 default: 3872 break; 3873 } 3874 3875 return false; 3876 } 3877 3878 bool 3879 spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx *dif_ctx) 3880 { 3881 struct spdk_nvmf_qpair *qpair = req->qpair; 3882 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 3883 3884 if (spdk_likely(ctrlr == NULL || !ctrlr->dif_insert_or_strip)) { 3885 return false; 3886 } 3887 3888 if (spdk_unlikely(qpair->state != SPDK_NVMF_QPAIR_ACTIVE)) { 3889 return false; 3890 } 3891 3892 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) { 3893 return false; 3894 } 3895 3896 if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair))) { 3897 return false; 3898 } 3899 3900 return nvmf_ctrlr_get_dif_ctx(ctrlr, &req->cmd->nvme_cmd, dif_ctx); 3901 } 3902 3903 void 3904 spdk_nvmf_set_custom_admin_cmd_hdlr(uint8_t opc, spdk_nvmf_custom_cmd_hdlr hdlr) 3905 { 3906 g_nvmf_custom_admin_cmd_hdlrs[opc].hdlr = hdlr; 3907 } 3908 3909 static int 3910 nvmf_passthru_admin_cmd(struct spdk_nvmf_request *req) 3911 { 3912 struct spdk_bdev *bdev; 3913 struct spdk_bdev_desc *desc; 3914 struct spdk_io_channel *ch; 3915 struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req); 3916 struct spdk_nvme_cpl *response = spdk_nvmf_request_get_response(req); 3917 uint32_t bdev_nsid; 3918 int rc; 3919 3920 if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].nsid == 0) { 3921 bdev_nsid = cmd->nsid; 3922 } else { 3923 bdev_nsid = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].nsid; 3924 } 3925 3926 rc = spdk_nvmf_request_get_bdev(bdev_nsid, req, &bdev, &desc, &ch); 3927 if (rc) { 3928 response->status.sct = SPDK_NVME_SCT_GENERIC; 3929 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 3930 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3931 } 3932 return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, NULL); 3933 } 3934 3935 void 3936 spdk_nvmf_set_passthru_admin_cmd(uint8_t opc, uint32_t forward_nsid) 3937 { 3938 g_nvmf_custom_admin_cmd_hdlrs[opc].hdlr = nvmf_passthru_admin_cmd; 3939 g_nvmf_custom_admin_cmd_hdlrs[opc].nsid = forward_nsid; 3940 } 3941 3942 int 3943 spdk_nvmf_request_get_bdev(uint32_t nsid, struct spdk_nvmf_request *req, 3944 struct spdk_bdev **bdev, struct spdk_bdev_desc **desc, struct spdk_io_channel **ch) 3945 { 3946 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3947 struct spdk_nvmf_ns *ns; 3948 struct spdk_nvmf_poll_group *group = req->qpair->group; 3949 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 3950 3951 *bdev = NULL; 3952 *desc = NULL; 3953 *ch = NULL; 3954 3955 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 3956 if (ns == NULL || ns->bdev == NULL) { 3957 return -EINVAL; 3958 } 3959 3960 assert(group != NULL && group->sgroups != NULL); 3961 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1]; 3962 *bdev = ns->bdev; 3963 *desc = ns->desc; 3964 *ch = ns_info->channel; 3965 3966 return 0; 3967 } 3968 3969 struct spdk_nvmf_ctrlr *spdk_nvmf_request_get_ctrlr(struct spdk_nvmf_request *req) 3970 { 3971 return req->qpair->ctrlr; 3972 } 3973 3974 struct spdk_nvme_cmd *spdk_nvmf_request_get_cmd(struct spdk_nvmf_request *req) 3975 { 3976 return &req->cmd->nvme_cmd; 3977 } 3978 3979 struct spdk_nvme_cpl *spdk_nvmf_request_get_response(struct spdk_nvmf_request *req) 3980 { 3981 return &req->rsp->nvme_cpl; 3982 } 3983 3984 struct spdk_nvmf_subsystem *spdk_nvmf_request_get_subsystem(struct spdk_nvmf_request *req) 3985 { 3986 return req->qpair->ctrlr->subsys; 3987 } 3988 3989 void spdk_nvmf_request_get_data(struct spdk_nvmf_request *req, void **data, uint32_t *length) 3990 { 3991 *data = req->data; 3992 *length = req->length; 3993 } 3994 3995 struct spdk_nvmf_subsystem *spdk_nvmf_ctrlr_get_subsystem(struct spdk_nvmf_ctrlr *ctrlr) 3996 { 3997 return ctrlr->subsys; 3998 } 3999 4000 uint16_t spdk_nvmf_ctrlr_get_id(struct spdk_nvmf_ctrlr *ctrlr) 4001 { 4002 return ctrlr->cntlid; 4003 } 4004 4005 struct spdk_nvmf_request *spdk_nvmf_request_get_req_to_abort(struct spdk_nvmf_request *req) 4006 { 4007 return req->req_to_abort; 4008 } 4009