1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All rights reserved. 6 */ 7 8 #include "spdk/stdinc.h" 9 10 #include "nvmf_internal.h" 11 #include "transport.h" 12 13 #include "spdk/bdev.h" 14 #include "spdk/bdev_zone.h" 15 #include "spdk/bit_array.h" 16 #include "spdk/endian.h" 17 #include "spdk/thread.h" 18 #include "spdk/nvme_spec.h" 19 #include "spdk/nvmf_cmd.h" 20 #include "spdk/string.h" 21 #include "spdk/util.h" 22 #include "spdk/version.h" 23 #include "spdk/log.h" 24 #include "spdk_internal/usdt.h" 25 26 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS 10000 27 #define NVMF_DISC_KATO_IN_MS 120000 28 #define KAS_TIME_UNIT_IN_MS 100 29 #define KAS_DEFAULT_VALUE (MIN_KEEP_ALIVE_TIMEOUT_IN_MS / KAS_TIME_UNIT_IN_MS) 30 31 #define NVMF_CC_RESET_SHN_TIMEOUT_IN_MS 10000 32 33 #define NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS (NVMF_CC_RESET_SHN_TIMEOUT_IN_MS + 5000) 34 35 #define DUPLICATE_QID_RETRY_US 1000 36 37 /* 38 * Report the SPDK version as the firmware revision. 39 * SPDK_VERSION_STRING won't fit into FR (only 8 bytes), so try to fit the most important parts. 40 */ 41 #define FW_VERSION SPDK_VERSION_MAJOR_STRING SPDK_VERSION_MINOR_STRING SPDK_VERSION_PATCH_STRING 42 43 #define ANA_TRANSITION_TIME_IN_SEC 10 44 45 #define NVMF_ABORT_COMMAND_LIMIT 3 46 47 /* 48 * Support for custom admin command handlers 49 */ 50 struct spdk_nvmf_custom_admin_cmd { 51 spdk_nvmf_custom_cmd_hdlr hdlr; 52 uint32_t nsid; /* nsid to forward */ 53 }; 54 55 static struct spdk_nvmf_custom_admin_cmd g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_MAX_OPC + 1]; 56 57 static void _nvmf_request_complete(void *ctx); 58 int nvmf_passthru_admin_cmd_for_ctrlr(struct spdk_nvmf_request *req, struct spdk_nvmf_ctrlr *ctrlr); 59 static int nvmf_passthru_admin_cmd(struct spdk_nvmf_request *req); 60 61 static inline void 62 nvmf_invalid_connect_response(struct spdk_nvmf_fabric_connect_rsp *rsp, 63 uint8_t iattr, uint16_t ipo) 64 { 65 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 66 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 67 rsp->status_code_specific.invalid.iattr = iattr; 68 rsp->status_code_specific.invalid.ipo = ipo; 69 } 70 71 #define SPDK_NVMF_INVALID_CONNECT_CMD(rsp, field) \ 72 nvmf_invalid_connect_response(rsp, 0, offsetof(struct spdk_nvmf_fabric_connect_cmd, field)) 73 #define SPDK_NVMF_INVALID_CONNECT_DATA(rsp, field) \ 74 nvmf_invalid_connect_response(rsp, 1, offsetof(struct spdk_nvmf_fabric_connect_data, field)) 75 76 77 static void 78 nvmf_ctrlr_stop_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr) 79 { 80 if (!ctrlr) { 81 SPDK_ERRLOG("Controller is NULL\n"); 82 return; 83 } 84 85 if (ctrlr->keep_alive_poller == NULL) { 86 return; 87 } 88 89 SPDK_DEBUGLOG(nvmf, "Stop keep alive poller\n"); 90 spdk_poller_unregister(&ctrlr->keep_alive_poller); 91 } 92 93 static void 94 nvmf_ctrlr_stop_association_timer(struct spdk_nvmf_ctrlr *ctrlr) 95 { 96 if (!ctrlr) { 97 SPDK_ERRLOG("Controller is NULL\n"); 98 assert(false); 99 return; 100 } 101 102 if (ctrlr->association_timer == NULL) { 103 return; 104 } 105 106 SPDK_DEBUGLOG(nvmf, "Stop association timer\n"); 107 spdk_poller_unregister(&ctrlr->association_timer); 108 } 109 110 static void 111 nvmf_ctrlr_disconnect_qpairs_done(struct spdk_io_channel_iter *i, int status) 112 { 113 if (status == 0) { 114 SPDK_DEBUGLOG(nvmf, "ctrlr disconnect qpairs complete successfully\n"); 115 } else { 116 SPDK_ERRLOG("Fail to disconnect ctrlr qpairs\n"); 117 } 118 } 119 120 static int 121 _nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i, bool include_admin) 122 { 123 int rc = 0; 124 struct spdk_nvmf_ctrlr *ctrlr; 125 struct spdk_nvmf_qpair *qpair, *temp_qpair; 126 struct spdk_io_channel *ch; 127 struct spdk_nvmf_poll_group *group; 128 129 ctrlr = spdk_io_channel_iter_get_ctx(i); 130 ch = spdk_io_channel_iter_get_channel(i); 131 group = spdk_io_channel_get_ctx(ch); 132 133 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, temp_qpair) { 134 if (qpair->ctrlr == ctrlr && (include_admin || !nvmf_qpair_is_admin_queue(qpair))) { 135 rc = spdk_nvmf_qpair_disconnect(qpair); 136 if (rc) { 137 if (rc == -EINPROGRESS) { 138 rc = 0; 139 } else { 140 SPDK_ERRLOG("Qpair disconnect failed\n"); 141 return rc; 142 } 143 } 144 } 145 } 146 147 return rc; 148 } 149 150 static void 151 nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter *i) 152 { 153 spdk_for_each_channel_continue(i, _nvmf_ctrlr_disconnect_qpairs_on_pg(i, true)); 154 } 155 156 static void 157 nvmf_ctrlr_disconnect_io_qpairs_on_pg(struct spdk_io_channel_iter *i) 158 { 159 spdk_for_each_channel_continue(i, _nvmf_ctrlr_disconnect_qpairs_on_pg(i, false)); 160 } 161 162 static int 163 nvmf_ctrlr_keep_alive_poll(void *ctx) 164 { 165 uint64_t keep_alive_timeout_tick; 166 uint64_t now = spdk_get_ticks(); 167 struct spdk_nvmf_ctrlr *ctrlr = ctx; 168 169 if (ctrlr->in_destruct) { 170 nvmf_ctrlr_stop_keep_alive_timer(ctrlr); 171 return SPDK_POLLER_IDLE; 172 } 173 174 SPDK_DEBUGLOG(nvmf, "Polling ctrlr keep alive timeout\n"); 175 176 /* If the Keep alive feature is in use and the timer expires */ 177 keep_alive_timeout_tick = ctrlr->last_keep_alive_tick + 178 ctrlr->feat.keep_alive_timer.bits.kato * spdk_get_ticks_hz() / UINT64_C(1000); 179 if (now > keep_alive_timeout_tick) { 180 SPDK_NOTICELOG("Disconnecting host %s from subsystem %s due to keep alive timeout.\n", 181 ctrlr->hostnqn, ctrlr->subsys->subnqn); 182 /* set the Controller Fatal Status bit to '1' */ 183 if (ctrlr->vcprop.csts.bits.cfs == 0) { 184 nvmf_ctrlr_set_fatal_status(ctrlr); 185 186 /* 187 * disconnect qpairs, terminate Transport connection 188 * destroy ctrlr, break the host to controller association 189 * disconnect qpairs with qpair->ctrlr == ctrlr 190 */ 191 spdk_for_each_channel(ctrlr->subsys->tgt, 192 nvmf_ctrlr_disconnect_qpairs_on_pg, 193 ctrlr, 194 nvmf_ctrlr_disconnect_qpairs_done); 195 return SPDK_POLLER_BUSY; 196 } 197 } 198 199 return SPDK_POLLER_IDLE; 200 } 201 202 static void 203 nvmf_ctrlr_start_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr) 204 { 205 if (!ctrlr) { 206 SPDK_ERRLOG("Controller is NULL\n"); 207 return; 208 } 209 210 /* if cleared to 0 then the Keep Alive Timer is disabled */ 211 if (ctrlr->feat.keep_alive_timer.bits.kato != 0) { 212 213 ctrlr->last_keep_alive_tick = spdk_get_ticks(); 214 215 SPDK_DEBUGLOG(nvmf, "Ctrlr add keep alive poller\n"); 216 ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr, 217 ctrlr->feat.keep_alive_timer.bits.kato * 1000); 218 } 219 } 220 221 static void 222 nvmf_qpair_set_ctrlr(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_ctrlr *ctrlr) 223 { 224 if (qpair->ctrlr != NULL) { 225 /* Admin queues will call this function twice. */ 226 assert(qpair->ctrlr == ctrlr); 227 return; 228 } 229 230 qpair->ctrlr = ctrlr; 231 spdk_trace_owner_append_description(qpair->trace_id, 232 spdk_nvmf_subsystem_get_nqn(ctrlr->subsys)); 233 } 234 235 static int _retry_qid_check(void *ctx); 236 237 static void 238 nvmf_ctrlr_send_connect_rsp(void *ctx) 239 { 240 struct spdk_nvmf_request *req = ctx; 241 struct spdk_nvmf_qpair *qpair = req->qpair; 242 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 243 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 244 int rc; 245 246 /* The qpair might have been disconnected in the meantime */ 247 assert(qpair->state == SPDK_NVMF_QPAIR_CONNECTING || 248 qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); 249 if (qpair->state == SPDK_NVMF_QPAIR_CONNECTING) { 250 if (nvmf_subsystem_host_auth_required(ctrlr->subsys, ctrlr->hostnqn)) { 251 rc = nvmf_qpair_auth_init(qpair); 252 if (rc != 0) { 253 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 254 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 255 spdk_nvmf_request_complete(req); 256 spdk_nvmf_qpair_disconnect(qpair); 257 return; 258 } 259 rsp->status_code_specific.success.authreq.atr = 1; 260 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_AUTHENTICATING); 261 } else { 262 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ENABLED); 263 } 264 } 265 266 SPDK_DEBUGLOG(nvmf, "connect capsule response: cntlid = 0x%04x\n", ctrlr->cntlid); 267 268 assert(spdk_get_thread() == qpair->group->thread); 269 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 270 rsp->status_code_specific.success.cntlid = ctrlr->cntlid; 271 spdk_nvmf_request_complete(req); 272 } 273 274 static void 275 nvmf_ctrlr_add_qpair(struct spdk_nvmf_qpair *qpair, 276 struct spdk_nvmf_ctrlr *ctrlr, 277 struct spdk_nvmf_request *req) 278 { 279 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 280 281 if (!ctrlr->admin_qpair) { 282 SPDK_ERRLOG("Inactive admin qpair\n"); 283 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 284 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 285 qpair->connect_req = NULL; 286 qpair->ctrlr = NULL; 287 spdk_nvmf_request_complete(req); 288 return; 289 } 290 291 assert(ctrlr->admin_qpair->group->thread == spdk_get_thread()); 292 293 if (spdk_bit_array_get(ctrlr->qpair_mask, qpair->qid)) { 294 if (qpair->connect_req != NULL) { 295 SPDK_ERRLOG("Got I/O connect with duplicate QID %u (cntlid:%u)\n", 296 qpair->qid, ctrlr->cntlid); 297 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 298 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 299 qpair->connect_req = NULL; 300 qpair->ctrlr = NULL; 301 spdk_nvmf_request_complete(req); 302 } else { 303 SPDK_WARNLOG("Duplicate QID detected (cntlid:%u, qid:%u), re-check in %dus\n", 304 ctrlr->cntlid, qpair->qid, DUPLICATE_QID_RETRY_US); 305 qpair->connect_req = req; 306 /* Set qpair->ctrlr here so that we'll have it when the poller expires. */ 307 nvmf_qpair_set_ctrlr(qpair, ctrlr); 308 req->poller = SPDK_POLLER_REGISTER(_retry_qid_check, qpair, 309 DUPLICATE_QID_RETRY_US); 310 } 311 return; 312 } 313 314 SPDK_DTRACE_PROBE4_TICKS(nvmf_ctrlr_add_qpair, qpair, qpair->qid, ctrlr->subsys->subnqn, 315 ctrlr->hostnqn); 316 nvmf_qpair_set_ctrlr(qpair, ctrlr); 317 spdk_bit_array_set(ctrlr->qpair_mask, qpair->qid); 318 SPDK_DEBUGLOG(nvmf, "qpair_mask set, qid %u\n", qpair->qid); 319 320 spdk_thread_send_msg(qpair->group->thread, nvmf_ctrlr_send_connect_rsp, req); 321 } 322 323 static int 324 _retry_qid_check(void *ctx) 325 { 326 struct spdk_nvmf_qpair *qpair = ctx; 327 struct spdk_nvmf_request *req = qpair->connect_req; 328 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 329 330 spdk_poller_unregister(&req->poller); 331 SPDK_WARNLOG("Retrying adding qpair, qid:%d\n", qpair->qid); 332 nvmf_ctrlr_add_qpair(qpair, ctrlr, req); 333 return SPDK_POLLER_BUSY; 334 } 335 336 static void 337 _nvmf_ctrlr_add_admin_qpair(void *ctx) 338 { 339 struct spdk_nvmf_request *req = ctx; 340 struct spdk_nvmf_qpair *qpair = req->qpair; 341 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 342 343 ctrlr->admin_qpair = qpair; 344 ctrlr->association_timeout = qpair->transport->opts.association_timeout; 345 nvmf_ctrlr_start_keep_alive_timer(ctrlr); 346 nvmf_ctrlr_add_qpair(qpair, ctrlr, req); 347 } 348 349 static void 350 _nvmf_subsystem_add_ctrlr(void *ctx) 351 { 352 struct spdk_nvmf_request *req = ctx; 353 struct spdk_nvmf_qpair *qpair = req->qpair; 354 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 355 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 356 357 if (nvmf_subsystem_add_ctrlr(ctrlr->subsys, ctrlr)) { 358 SPDK_ERRLOG("Unable to add controller to subsystem\n"); 359 spdk_bit_array_free(&ctrlr->qpair_mask); 360 free(ctrlr); 361 qpair->ctrlr = NULL; 362 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 363 spdk_nvmf_request_complete(req); 364 return; 365 } 366 367 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_admin_qpair, req); 368 } 369 370 static void 371 nvmf_ctrlr_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem, 372 struct spdk_nvmf_ctrlr_data *cdata) 373 { 374 cdata->aerl = SPDK_NVMF_MAX_ASYNC_EVENTS - 1; 375 cdata->kas = KAS_DEFAULT_VALUE; 376 cdata->vid = SPDK_PCI_VID_INTEL; 377 cdata->ssvid = SPDK_PCI_VID_INTEL; 378 /* INTEL OUI */ 379 cdata->ieee[0] = 0xe4; 380 cdata->ieee[1] = 0xd2; 381 cdata->ieee[2] = 0x5c; 382 cdata->oncs.compare = 1; 383 cdata->oncs.dsm = 1; 384 cdata->oncs.write_zeroes = 1; 385 cdata->oncs.reservations = 1; 386 cdata->oncs.copy = 1; 387 cdata->fuses.compare_and_write = 1; 388 cdata->sgls.supported = 1; 389 cdata->sgls.keyed_sgl = 1; 390 cdata->sgls.sgl_offset = 1; 391 cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16; 392 cdata->nvmf_specific.ioccsz += transport->opts.in_capsule_data_size / 16; 393 cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16; 394 cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */ 395 cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC; 396 cdata->nvmf_specific.msdbd = 1; 397 398 if (transport->ops->cdata_init) { 399 transport->ops->cdata_init(transport, subsystem, cdata); 400 } 401 } 402 403 static bool 404 nvmf_subsystem_has_zns_iocs(struct spdk_nvmf_subsystem *subsystem) 405 { 406 struct spdk_nvmf_ns *ns; 407 uint32_t i; 408 409 for (i = 0; i < subsystem->max_nsid; i++) { 410 ns = subsystem->ns[i]; 411 if (ns && ns->bdev && spdk_bdev_is_zoned(ns->bdev)) { 412 return true; 413 } 414 } 415 return false; 416 } 417 418 static void 419 nvmf_ctrlr_init_visible_ns(struct spdk_nvmf_ctrlr *ctrlr) 420 { 421 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 422 struct spdk_nvmf_ns *ns; 423 424 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 425 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 426 if (ns->always_visible || nvmf_ns_find_host(ns, ctrlr->hostnqn) != NULL) { 427 spdk_bit_array_set(ctrlr->visible_ns, ns->nsid - 1); 428 } 429 } 430 } 431 432 static struct spdk_nvmf_ctrlr * 433 nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem, 434 struct spdk_nvmf_request *req, 435 struct spdk_nvmf_fabric_connect_cmd *connect_cmd, 436 struct spdk_nvmf_fabric_connect_data *connect_data) 437 { 438 struct spdk_nvmf_ctrlr *ctrlr; 439 struct spdk_nvmf_transport *transport = req->qpair->transport; 440 struct spdk_nvme_transport_id listen_trid = {}; 441 bool subsys_has_multi_iocs = false; 442 443 ctrlr = calloc(1, sizeof(*ctrlr)); 444 if (ctrlr == NULL) { 445 SPDK_ERRLOG("Memory allocation failed\n"); 446 return NULL; 447 } 448 449 if (spdk_nvme_trtype_is_fabrics(transport->ops->type)) { 450 ctrlr->dynamic_ctrlr = true; 451 } else { 452 ctrlr->cntlid = connect_data->cntlid; 453 } 454 455 SPDK_DTRACE_PROBE3_TICKS(nvmf_ctrlr_create, ctrlr, subsystem->subnqn, 456 spdk_thread_get_id(req->qpair->group->thread)); 457 458 STAILQ_INIT(&ctrlr->async_events); 459 TAILQ_INIT(&ctrlr->log_head); 460 ctrlr->subsys = subsystem; 461 ctrlr->thread = req->qpair->group->thread; 462 ctrlr->disconnect_in_progress = false; 463 464 ctrlr->qpair_mask = spdk_bit_array_create(transport->opts.max_qpairs_per_ctrlr); 465 if (!ctrlr->qpair_mask) { 466 SPDK_ERRLOG("Failed to allocate controller qpair mask\n"); 467 goto err_qpair_mask; 468 } 469 470 nvmf_ctrlr_cdata_init(transport, subsystem, &ctrlr->cdata); 471 472 /* 473 * KAS: This field indicates the granularity of the Keep Alive Timer in 100ms units. 474 * If this field is cleared to 0h, then Keep Alive is not supported. 475 */ 476 if (ctrlr->cdata.kas) { 477 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(connect_cmd->kato, 478 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) * 479 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS; 480 } 481 482 ctrlr->feat.async_event_configuration.bits.ns_attr_notice = 1; 483 if (ctrlr->subsys->flags.ana_reporting) { 484 ctrlr->feat.async_event_configuration.bits.ana_change_notice = 1; 485 } 486 ctrlr->feat.volatile_write_cache.bits.wce = 1; 487 /* Coalescing Disable */ 488 ctrlr->feat.interrupt_vector_configuration.bits.cd = 1; 489 490 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) { 491 /* 492 * If keep-alive timeout is not set, discovery controllers use some 493 * arbitrary high value in order to cleanup stale discovery sessions 494 * 495 * From the 1.0a nvme-of spec: 496 * "The Keep Alive command is reserved for 497 * Discovery controllers. A transport may specify a 498 * fixed Discovery controller activity timeout value 499 * (e.g., 2 minutes). If no commands are received 500 * by a Discovery controller within that time 501 * period, the controller may perform the 502 * actions for Keep Alive Timer expiration". 503 * 504 * From the 1.1 nvme-of spec: 505 * "A host requests an explicit persistent connection 506 * to a Discovery controller and Asynchronous Event Notifications from 507 * the Discovery controller on that persistent connection by specifying 508 * a non-zero Keep Alive Timer value in the Connect command." 509 * 510 * In case non-zero KATO is used, we enable discovery_log_change_notice 511 * otherwise we disable it and use default discovery controller KATO. 512 * KATO is in millisecond. 513 */ 514 if (ctrlr->feat.keep_alive_timer.bits.kato == 0) { 515 ctrlr->feat.keep_alive_timer.bits.kato = NVMF_DISC_KATO_IN_MS; 516 ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 0; 517 } else { 518 ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 1; 519 } 520 } 521 522 /* Subtract 1 for admin queue, 1 for 0's based */ 523 ctrlr->feat.number_of_queues.bits.ncqr = transport->opts.max_qpairs_per_ctrlr - 1 - 524 1; 525 ctrlr->feat.number_of_queues.bits.nsqr = transport->opts.max_qpairs_per_ctrlr - 1 - 526 1; 527 528 spdk_uuid_copy(&ctrlr->hostid, (struct spdk_uuid *)connect_data->hostid); 529 memcpy(ctrlr->hostnqn, connect_data->hostnqn, SPDK_NVMF_NQN_MAX_LEN); 530 531 ctrlr->visible_ns = spdk_bit_array_create(subsystem->max_nsid); 532 if (!ctrlr->visible_ns) { 533 SPDK_ERRLOG("Failed to allocate visible namespace array\n"); 534 goto err_visible_ns; 535 } 536 nvmf_ctrlr_init_visible_ns(ctrlr); 537 538 ctrlr->vcprop.cap.raw = 0; 539 ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */ 540 ctrlr->vcprop.cap.bits.mqes = transport->opts.max_queue_depth - 541 1; /* max queue depth */ 542 ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */ 543 /* ready timeout - 500 msec units */ 544 ctrlr->vcprop.cap.bits.to = NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500; 545 ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */ 546 subsys_has_multi_iocs = nvmf_subsystem_has_zns_iocs(subsystem); 547 if (subsys_has_multi_iocs) { 548 ctrlr->vcprop.cap.bits.css = 549 SPDK_NVME_CAP_CSS_IOCS; /* One or more I/O command sets supported */ 550 } else { 551 ctrlr->vcprop.cap.bits.css = SPDK_NVME_CAP_CSS_NVM; /* NVM command set */ 552 } 553 554 ctrlr->vcprop.cap.bits.mpsmin = 0; /* 2 ^ (12 + mpsmin) == 4k */ 555 ctrlr->vcprop.cap.bits.mpsmax = 0; /* 2 ^ (12 + mpsmax) == 4k */ 556 557 /* Version Supported: 1.3 */ 558 ctrlr->vcprop.vs.bits.mjr = 1; 559 ctrlr->vcprop.vs.bits.mnr = 3; 560 ctrlr->vcprop.vs.bits.ter = 0; 561 562 ctrlr->vcprop.cc.raw = 0; 563 ctrlr->vcprop.cc.bits.en = 0; /* Init controller disabled */ 564 if (subsys_has_multi_iocs) { 565 ctrlr->vcprop.cc.bits.css = 566 SPDK_NVME_CC_CSS_IOCS; /* All supported I/O Command Sets */ 567 } 568 569 ctrlr->vcprop.csts.raw = 0; 570 ctrlr->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */ 571 572 SPDK_DEBUGLOG(nvmf, "cap 0x%" PRIx64 "\n", ctrlr->vcprop.cap.raw); 573 SPDK_DEBUGLOG(nvmf, "vs 0x%x\n", ctrlr->vcprop.vs.raw); 574 SPDK_DEBUGLOG(nvmf, "cc 0x%x\n", ctrlr->vcprop.cc.raw); 575 SPDK_DEBUGLOG(nvmf, "csts 0x%x\n", ctrlr->vcprop.csts.raw); 576 577 ctrlr->dif_insert_or_strip = transport->opts.dif_insert_or_strip; 578 579 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_NVME) { 580 if (spdk_nvmf_qpair_get_listen_trid(req->qpair, &listen_trid) != 0) { 581 SPDK_ERRLOG("Could not get listener transport ID\n"); 582 goto err_listener; 583 } 584 585 ctrlr->listener = nvmf_subsystem_find_listener(ctrlr->subsys, &listen_trid); 586 if (!ctrlr->listener) { 587 SPDK_ERRLOG("Listener was not found\n"); 588 goto err_listener; 589 } 590 } 591 592 nvmf_qpair_set_ctrlr(req->qpair, ctrlr); 593 spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_add_ctrlr, req); 594 595 return ctrlr; 596 err_listener: 597 spdk_bit_array_free(&ctrlr->visible_ns); 598 err_visible_ns: 599 spdk_bit_array_free(&ctrlr->qpair_mask); 600 err_qpair_mask: 601 free(ctrlr); 602 return NULL; 603 } 604 605 static void 606 _nvmf_ctrlr_destruct(void *ctx) 607 { 608 struct spdk_nvmf_ctrlr *ctrlr = ctx; 609 struct spdk_nvmf_reservation_log *log, *log_tmp; 610 struct spdk_nvmf_async_event_completion *event, *event_tmp; 611 612 SPDK_DTRACE_PROBE3_TICKS(nvmf_ctrlr_destruct, ctrlr, ctrlr->subsys->subnqn, 613 spdk_thread_get_id(ctrlr->thread)); 614 615 assert(spdk_get_thread() == ctrlr->thread); 616 assert(ctrlr->in_destruct); 617 618 SPDK_DEBUGLOG(nvmf, "Destroy ctrlr 0x%hx\n", ctrlr->cntlid); 619 if (ctrlr->disconnect_in_progress) { 620 SPDK_ERRLOG("freeing ctrlr with disconnect in progress\n"); 621 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr); 622 return; 623 } 624 625 nvmf_ctrlr_stop_keep_alive_timer(ctrlr); 626 nvmf_ctrlr_stop_association_timer(ctrlr); 627 spdk_bit_array_free(&ctrlr->qpair_mask); 628 629 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) { 630 TAILQ_REMOVE(&ctrlr->log_head, log, link); 631 free(log); 632 } 633 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 634 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 635 free(event); 636 } 637 spdk_bit_array_free(&ctrlr->visible_ns); 638 free(ctrlr); 639 } 640 641 void 642 nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr) 643 { 644 nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr); 645 646 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr); 647 } 648 649 static void 650 nvmf_ctrlr_add_io_qpair(void *ctx) 651 { 652 struct spdk_nvmf_request *req = ctx; 653 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 654 struct spdk_nvmf_qpair *qpair = req->qpair; 655 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 656 struct spdk_nvmf_qpair *admin_qpair = ctrlr->admin_qpair; 657 struct spdk_nvmf_poll_group *admin_qpair_group = NULL; 658 enum spdk_nvmf_qpair_state admin_qpair_state = SPDK_NVMF_QPAIR_UNINITIALIZED; 659 bool admin_qpair_active = false; 660 661 SPDK_DTRACE_PROBE4_TICKS(nvmf_ctrlr_add_io_qpair, ctrlr, req->qpair, req->qpair->qid, 662 spdk_thread_get_id(ctrlr->thread)); 663 664 /* Unit test will check qpair->ctrlr after calling spdk_nvmf_ctrlr_connect. 665 * For error case, the value should be NULL. So set it to NULL at first. 666 */ 667 qpair->ctrlr = NULL; 668 669 /* Make sure the controller is not being destroyed. */ 670 if (ctrlr->in_destruct) { 671 SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n"); 672 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 673 goto end; 674 } 675 676 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) { 677 SPDK_ERRLOG("I/O connect not allowed on discovery controller\n"); 678 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 679 goto end; 680 } 681 682 if (!ctrlr->vcprop.cc.bits.en) { 683 SPDK_ERRLOG("Got I/O connect before ctrlr was enabled\n"); 684 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 685 goto end; 686 } 687 688 if (1u << ctrlr->vcprop.cc.bits.iosqes != sizeof(struct spdk_nvme_cmd)) { 689 SPDK_ERRLOG("Got I/O connect with invalid IOSQES %u\n", 690 ctrlr->vcprop.cc.bits.iosqes); 691 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 692 goto end; 693 } 694 695 if (1u << ctrlr->vcprop.cc.bits.iocqes != sizeof(struct spdk_nvme_cpl)) { 696 SPDK_ERRLOG("Got I/O connect with invalid IOCQES %u\n", 697 ctrlr->vcprop.cc.bits.iocqes); 698 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 699 goto end; 700 } 701 702 /* There is a chance that admin qpair was destroyed. This is an issue that was observed only with ESX initiators */ 703 if (admin_qpair) { 704 admin_qpair_active = spdk_nvmf_qpair_is_active(admin_qpair); 705 admin_qpair_group = admin_qpair->group; 706 admin_qpair_state = admin_qpair->state; 707 } 708 709 if (!admin_qpair_active || admin_qpair_group == NULL) { 710 /* There is a chance that admin qpair was destroyed or is being destroyed at this moment due to e.g. 711 * expired keep alive timer. Part of the qpair destruction process is change of qpair's 712 * state to DEACTIVATING and removing it from poll group */ 713 SPDK_ERRLOG("Inactive admin qpair (state %d, group %p)\n", admin_qpair_state, admin_qpair_group); 714 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 715 goto end; 716 } 717 718 /* check if we would exceed ctrlr connection limit */ 719 if (qpair->qid >= spdk_bit_array_capacity(ctrlr->qpair_mask)) { 720 SPDK_ERRLOG("Requested QID %u but Max QID is %u\n", 721 qpair->qid, spdk_bit_array_capacity(ctrlr->qpair_mask) - 1); 722 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 723 rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 724 goto end; 725 } 726 727 nvmf_ctrlr_add_qpair(qpair, ctrlr, req); 728 return; 729 end: 730 spdk_nvmf_request_complete(req); 731 } 732 733 static void 734 _nvmf_ctrlr_add_io_qpair(void *ctx) 735 { 736 struct spdk_nvmf_request *req = ctx; 737 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 738 struct spdk_nvmf_fabric_connect_data *data; 739 struct spdk_nvmf_ctrlr *ctrlr; 740 struct spdk_nvmf_qpair *qpair = req->qpair; 741 struct spdk_nvmf_qpair *admin_qpair; 742 struct spdk_nvmf_tgt *tgt = qpair->transport->tgt; 743 struct spdk_nvmf_subsystem *subsystem; 744 struct spdk_nvme_transport_id listen_trid = {}; 745 const struct spdk_nvmf_subsystem_listener *listener; 746 struct spdk_nvmf_poll_group *admin_qpair_group = NULL; 747 enum spdk_nvmf_qpair_state admin_qpair_state = SPDK_NVMF_QPAIR_UNINITIALIZED; 748 bool admin_qpair_active = false; 749 750 assert(req->iovcnt == 1); 751 752 data = req->iov[0].iov_base; 753 754 SPDK_DEBUGLOG(nvmf, "Connect I/O Queue for controller id 0x%x\n", data->cntlid); 755 756 subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); 757 /* We already checked this in spdk_nvmf_ctrlr_connect */ 758 assert(subsystem != NULL); 759 760 ctrlr = nvmf_subsystem_get_ctrlr(subsystem, data->cntlid); 761 if (ctrlr == NULL) { 762 SPDK_ERRLOG("Unknown controller ID 0x%x\n", data->cntlid); 763 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid); 764 spdk_nvmf_request_complete(req); 765 return; 766 } 767 768 /* fail before passing a message to the controller thread. */ 769 if (ctrlr->in_destruct) { 770 SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n"); 771 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 772 spdk_nvmf_request_complete(req); 773 return; 774 } 775 776 /* If ANA reporting is enabled, check if I/O connect is on the same listener. */ 777 if (subsystem->flags.ana_reporting) { 778 if (spdk_nvmf_qpair_get_listen_trid(req->qpair, &listen_trid) != 0) { 779 SPDK_ERRLOG("Could not get listener transport ID\n"); 780 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 781 spdk_nvmf_request_complete(req); 782 return; 783 } 784 785 listener = nvmf_subsystem_find_listener(subsystem, &listen_trid); 786 if (listener != ctrlr->listener) { 787 SPDK_ERRLOG("I/O connect is on a listener different from admin connect\n"); 788 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 789 spdk_nvmf_request_complete(req); 790 return; 791 } 792 } 793 794 admin_qpair = ctrlr->admin_qpair; 795 796 /* There is a chance that admin qpair was destroyed. This is an issue that was observed only with ESX initiators */ 797 if (admin_qpair) { 798 admin_qpair_active = spdk_nvmf_qpair_is_active(admin_qpair); 799 admin_qpair_group = admin_qpair->group; 800 admin_qpair_state = admin_qpair->state; 801 } 802 803 if (!admin_qpair_active || admin_qpair_group == NULL) { 804 /* There is a chance that admin qpair was destroyed or is being destroyed at this moment due to e.g. 805 * expired keep alive timer. Part of the qpair destruction process is change of qpair's 806 * state to DEACTIVATING and removing it from poll group */ 807 SPDK_ERRLOG("Inactive admin qpair (state %d, group %p)\n", admin_qpair_state, admin_qpair_group); 808 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid); 809 spdk_nvmf_request_complete(req); 810 return; 811 } 812 qpair->ctrlr = ctrlr; 813 spdk_thread_send_msg(admin_qpair_group->thread, nvmf_ctrlr_add_io_qpair, req); 814 } 815 816 static bool 817 nvmf_qpair_access_allowed(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_subsystem *subsystem, 818 const char *hostnqn) 819 { 820 struct spdk_nvme_transport_id listen_trid = {}; 821 822 if (!spdk_nvmf_subsystem_host_allowed(subsystem, hostnqn)) { 823 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s'\n", subsystem->subnqn, hostnqn); 824 return false; 825 } 826 827 if (spdk_nvmf_qpair_get_listen_trid(qpair, &listen_trid)) { 828 SPDK_ERRLOG("Subsystem '%s' is unable to enforce access control due to an internal error.\n", 829 subsystem->subnqn); 830 return false; 831 } 832 833 if (!spdk_nvmf_subsystem_listener_allowed(subsystem, &listen_trid)) { 834 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s' to connect at this address.\n", 835 subsystem->subnqn, hostnqn); 836 return false; 837 } 838 839 return true; 840 } 841 842 static int 843 _nvmf_ctrlr_connect(struct spdk_nvmf_request *req) 844 { 845 struct spdk_nvmf_fabric_connect_data *data = req->iov[0].iov_base; 846 struct spdk_nvmf_fabric_connect_cmd *cmd = &req->cmd->connect_cmd; 847 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 848 struct spdk_nvmf_qpair *qpair = req->qpair; 849 struct spdk_nvmf_transport *transport = qpair->transport; 850 struct spdk_nvmf_ctrlr *ctrlr; 851 struct spdk_nvmf_subsystem *subsystem; 852 853 SPDK_DEBUGLOG(nvmf, "recfmt 0x%x qid %u sqsize %u\n", 854 cmd->recfmt, cmd->qid, cmd->sqsize); 855 856 SPDK_DEBUGLOG(nvmf, "Connect data:\n"); 857 SPDK_DEBUGLOG(nvmf, " cntlid: 0x%04x\n", data->cntlid); 858 SPDK_DEBUGLOG(nvmf, " hostid: %08x-%04x-%04x-%02x%02x-%04x%08x ***\n", 859 ntohl(*(uint32_t *)&data->hostid[0]), 860 ntohs(*(uint16_t *)&data->hostid[4]), 861 ntohs(*(uint16_t *)&data->hostid[6]), 862 data->hostid[8], 863 data->hostid[9], 864 ntohs(*(uint16_t *)&data->hostid[10]), 865 ntohl(*(uint32_t *)&data->hostid[12])); 866 SPDK_DEBUGLOG(nvmf, " subnqn: \"%s\"\n", data->subnqn); 867 SPDK_DEBUGLOG(nvmf, " hostnqn: \"%s\"\n", data->hostnqn); 868 869 subsystem = spdk_nvmf_tgt_find_subsystem(transport->tgt, data->subnqn); 870 if (!subsystem) { 871 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 872 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 873 } 874 875 if (cmd->recfmt != 0) { 876 SPDK_ERRLOG("Connect command unsupported RECFMT %u\n", cmd->recfmt); 877 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 878 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT; 879 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 880 } 881 882 /* 883 * SQSIZE is a 0-based value, so it must be at least 1 (minimum queue depth is 2) and 884 * strictly less than max_aq_depth (admin queues) or max_queue_depth (io queues). 885 */ 886 if (cmd->sqsize == 0) { 887 SPDK_ERRLOG("Invalid SQSIZE = 0\n"); 888 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 889 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 890 } 891 892 if (cmd->qid == 0) { 893 if (cmd->sqsize >= transport->opts.max_aq_depth) { 894 SPDK_ERRLOG("Invalid SQSIZE for admin queue %u (min 1, max %u)\n", 895 cmd->sqsize, transport->opts.max_aq_depth - 1); 896 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 897 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 898 } 899 } else if (cmd->sqsize >= transport->opts.max_queue_depth) { 900 SPDK_ERRLOG("Invalid SQSIZE %u (min 1, max %u)\n", 901 cmd->sqsize, transport->opts.max_queue_depth - 1); 902 SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize); 903 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 904 } 905 906 qpair->sq_head_max = cmd->sqsize; 907 qpair->qid = cmd->qid; 908 qpair->connect_received = true; 909 910 pthread_mutex_lock(&qpair->group->mutex); 911 qpair->group->current_unassociated_qpairs--; 912 pthread_mutex_unlock(&qpair->group->mutex); 913 914 if (0 == qpair->qid) { 915 qpair->group->stat.admin_qpairs++; 916 qpair->group->stat.current_admin_qpairs++; 917 } else { 918 qpair->group->stat.io_qpairs++; 919 qpair->group->stat.current_io_qpairs++; 920 } 921 922 if (cmd->qid == 0) { 923 SPDK_DEBUGLOG(nvmf, "Connect Admin Queue for controller ID 0x%x\n", data->cntlid); 924 925 if (spdk_nvme_trtype_is_fabrics(transport->ops->type) && data->cntlid != 0xFFFF) { 926 /* This NVMf target only supports dynamic mode. */ 927 SPDK_ERRLOG("The NVMf target only supports dynamic mode (CNTLID = 0x%x).\n", data->cntlid); 928 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, cntlid); 929 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 930 } 931 932 /* Establish a new ctrlr */ 933 ctrlr = nvmf_ctrlr_create(subsystem, req, cmd, data); 934 if (!ctrlr) { 935 SPDK_ERRLOG("nvmf_ctrlr_create() failed\n"); 936 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 937 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 938 } else { 939 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 940 } 941 } else { 942 spdk_thread_send_msg(subsystem->thread, _nvmf_ctrlr_add_io_qpair, req); 943 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 944 } 945 } 946 947 static struct spdk_nvmf_subsystem_poll_group * 948 nvmf_subsystem_pg_from_connect_cmd(struct spdk_nvmf_request *req) 949 { 950 struct spdk_nvmf_fabric_connect_data *data; 951 struct spdk_nvmf_subsystem *subsystem; 952 struct spdk_nvmf_tgt *tgt; 953 954 assert(nvmf_request_is_fabric_connect(req)); 955 assert(req->qpair->ctrlr == NULL); 956 assert(req->iovcnt == 1); 957 958 data = req->iov[0].iov_base; 959 tgt = req->qpair->transport->tgt; 960 961 subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); 962 if (subsystem == NULL) { 963 return NULL; 964 } 965 966 return &req->qpair->group->sgroups[subsystem->id]; 967 } 968 969 int 970 spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req) 971 { 972 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 973 struct spdk_nvmf_subsystem_poll_group *sgroup; 974 struct spdk_nvmf_qpair *qpair = req->qpair; 975 enum spdk_nvmf_request_exec_status status; 976 977 if (req->iovcnt > 1) { 978 SPDK_ERRLOG("Connect command invalid iovcnt: %d\n", req->iovcnt); 979 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 980 status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 981 goto out; 982 } 983 984 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 985 if (!sgroup) { 986 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 987 status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 988 goto out; 989 } 990 991 sgroup->mgmt_io_outstanding++; 992 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 993 994 status = _nvmf_ctrlr_connect(req); 995 996 out: 997 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 998 _nvmf_request_complete(req); 999 } 1000 1001 return status; 1002 } 1003 1004 static int 1005 nvmf_ctrlr_cmd_connect(struct spdk_nvmf_request *req) 1006 { 1007 struct spdk_nvmf_fabric_connect_data *data = req->iov[0].iov_base; 1008 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; 1009 struct spdk_nvmf_transport *transport = req->qpair->transport; 1010 struct spdk_nvmf_subsystem *subsystem; 1011 1012 if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) { 1013 SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length); 1014 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1015 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1016 } 1017 1018 if (req->iovcnt > 1) { 1019 SPDK_ERRLOG("Connect command invalid iovcnt: %d\n", req->iovcnt); 1020 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1021 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1022 } 1023 1024 subsystem = spdk_nvmf_tgt_find_subsystem(transport->tgt, data->subnqn); 1025 if (!subsystem) { 1026 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); 1027 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1028 } 1029 1030 if ((subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE) || 1031 (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSING) || 1032 (subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED) || 1033 (subsystem->state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING)) { 1034 struct spdk_nvmf_subsystem_poll_group *sgroup; 1035 1036 /* Subsystem is not ready to handle a connect. Decrement 1037 * the mgmt_io_outstanding to avoid the subsystem waiting 1038 * for this command to complete before unpausing. Queued 1039 * requests get retried when subsystem resumes. 1040 */ 1041 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 1042 assert(sgroup != NULL); 1043 sgroup->mgmt_io_outstanding--; 1044 TAILQ_REMOVE(&req->qpair->outstanding, req, link); 1045 TAILQ_INSERT_TAIL(&sgroup->queued, req, link); 1046 SPDK_DEBUGLOG(nvmf, "Subsystem '%s' is not ready for connect, retrying...\n", subsystem->subnqn); 1047 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 1048 } 1049 1050 /* Ensure that hostnqn is null terminated */ 1051 if (!memchr(data->hostnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) { 1052 SPDK_ERRLOG("Connect HOSTNQN is not null terminated\n"); 1053 SPDK_NVMF_INVALID_CONNECT_DATA(rsp, hostnqn); 1054 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1055 } 1056 1057 if (!nvmf_qpair_access_allowed(req->qpair, subsystem, data->hostnqn)) { 1058 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1059 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_HOST; 1060 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1061 } 1062 1063 return _nvmf_ctrlr_connect(req); 1064 } 1065 1066 static int 1067 nvmf_ctrlr_association_remove(void *ctx) 1068 { 1069 struct spdk_nvmf_ctrlr *ctrlr = ctx; 1070 int rc; 1071 1072 nvmf_ctrlr_stop_association_timer(ctrlr); 1073 1074 if (ctrlr->in_destruct) { 1075 return SPDK_POLLER_IDLE; 1076 } 1077 SPDK_DEBUGLOG(nvmf, "Disconnecting host from subsystem %s due to association timeout.\n", 1078 ctrlr->subsys->subnqn); 1079 1080 if (ctrlr->admin_qpair) { 1081 rc = spdk_nvmf_qpair_disconnect(ctrlr->admin_qpair); 1082 if (rc < 0 && rc != -EINPROGRESS) { 1083 SPDK_ERRLOG("Fail to disconnect admin ctrlr qpair\n"); 1084 assert(false); 1085 } 1086 } 1087 1088 return SPDK_POLLER_BUSY; 1089 } 1090 1091 static int 1092 _nvmf_ctrlr_cc_reset_shn_done(void *ctx) 1093 { 1094 struct spdk_nvmf_ctrlr *ctrlr = ctx; 1095 uint64_t now = spdk_get_ticks(); 1096 uint32_t count; 1097 1098 if (ctrlr->cc_timer) { 1099 spdk_poller_unregister(&ctrlr->cc_timer); 1100 } 1101 1102 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 1103 SPDK_DEBUGLOG(nvmf, "ctrlr %p active queue count %u\n", ctrlr, count); 1104 1105 if (count > 1) { 1106 if (now < ctrlr->cc_timeout_tsc) { 1107 /* restart cc timer */ 1108 ctrlr->cc_timer = SPDK_POLLER_REGISTER(_nvmf_ctrlr_cc_reset_shn_done, ctrlr, 100 * 1000); 1109 return SPDK_POLLER_IDLE; 1110 } else { 1111 /* controller fatal status */ 1112 SPDK_WARNLOG("IO timeout, ctrlr %p is in fatal status\n", ctrlr); 1113 nvmf_ctrlr_set_fatal_status(ctrlr); 1114 } 1115 } 1116 1117 spdk_poller_unregister(&ctrlr->cc_timeout_timer); 1118 1119 if (ctrlr->disconnect_is_shn) { 1120 ctrlr->vcprop.csts.bits.shst = SPDK_NVME_SHST_COMPLETE; 1121 ctrlr->disconnect_is_shn = false; 1122 } else { 1123 /* Only a subset of the registers are cleared out on a reset */ 1124 ctrlr->vcprop.cc.raw = 0; 1125 ctrlr->vcprop.csts.raw = 0; 1126 } 1127 1128 /* After CC.EN transitions to 0 (due to shutdown or reset), the association 1129 * between the host and controller shall be preserved for at least 2 minutes */ 1130 if (ctrlr->association_timer) { 1131 SPDK_DEBUGLOG(nvmf, "Association timer already set\n"); 1132 nvmf_ctrlr_stop_association_timer(ctrlr); 1133 } 1134 if (ctrlr->association_timeout) { 1135 ctrlr->association_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_association_remove, ctrlr, 1136 ctrlr->association_timeout * 1000); 1137 } 1138 ctrlr->disconnect_in_progress = false; 1139 return SPDK_POLLER_BUSY; 1140 } 1141 1142 static void 1143 nvmf_ctrlr_cc_reset_shn_done(struct spdk_io_channel_iter *i, int status) 1144 { 1145 struct spdk_nvmf_ctrlr *ctrlr = spdk_io_channel_iter_get_ctx(i); 1146 1147 if (status < 0) { 1148 SPDK_ERRLOG("Fail to disconnect io ctrlr qpairs\n"); 1149 assert(false); 1150 } 1151 1152 _nvmf_ctrlr_cc_reset_shn_done((void *)ctrlr); 1153 } 1154 1155 static void 1156 nvmf_bdev_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1157 { 1158 SPDK_NOTICELOG("Resetting bdev done with %s\n", success ? "success" : "failure"); 1159 1160 spdk_bdev_free_io(bdev_io); 1161 } 1162 1163 1164 static int 1165 nvmf_ctrlr_cc_timeout(void *ctx) 1166 { 1167 struct spdk_nvmf_ctrlr *ctrlr = ctx; 1168 struct spdk_nvmf_poll_group *group; 1169 struct spdk_nvmf_ns *ns; 1170 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 1171 1172 spdk_poller_unregister(&ctrlr->cc_timeout_timer); 1173 SPDK_DEBUGLOG(nvmf, "Ctrlr %p reset or shutdown timeout\n", ctrlr); 1174 1175 if (!ctrlr->admin_qpair) { 1176 SPDK_NOTICELOG("Ctrlr %p admin qpair disconnected\n", ctrlr); 1177 return SPDK_POLLER_IDLE; 1178 } 1179 1180 group = ctrlr->admin_qpair->group; 1181 assert(group != NULL && group->sgroups != NULL); 1182 1183 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL; 1184 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 1185 if (ns->bdev == NULL) { 1186 continue; 1187 } 1188 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[ns->opts.nsid - 1]; 1189 SPDK_NOTICELOG("Ctrlr %p resetting NSID %u\n", ctrlr, ns->opts.nsid); 1190 spdk_bdev_reset(ns->desc, ns_info->channel, nvmf_bdev_complete_reset, NULL); 1191 } 1192 1193 return SPDK_POLLER_BUSY; 1194 } 1195 1196 const struct spdk_nvmf_registers * 1197 spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr) 1198 { 1199 return &ctrlr->vcprop; 1200 } 1201 1202 void 1203 nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr) 1204 { 1205 ctrlr->vcprop.csts.bits.cfs = 1; 1206 } 1207 1208 static uint64_t 1209 nvmf_prop_get_cap(struct spdk_nvmf_ctrlr *ctrlr) 1210 { 1211 return ctrlr->vcprop.cap.raw; 1212 } 1213 1214 static uint64_t 1215 nvmf_prop_get_vs(struct spdk_nvmf_ctrlr *ctrlr) 1216 { 1217 return ctrlr->vcprop.vs.raw; 1218 } 1219 1220 static uint64_t 1221 nvmf_prop_get_cc(struct spdk_nvmf_ctrlr *ctrlr) 1222 { 1223 return ctrlr->vcprop.cc.raw; 1224 } 1225 1226 static bool 1227 nvmf_prop_set_cc(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1228 { 1229 union spdk_nvme_cc_register cc, diff; 1230 uint32_t cc_timeout_ms; 1231 1232 cc.raw = value; 1233 1234 SPDK_DEBUGLOG(nvmf, "cur CC: 0x%08x\n", ctrlr->vcprop.cc.raw); 1235 SPDK_DEBUGLOG(nvmf, "new CC: 0x%08x\n", cc.raw); 1236 1237 /* 1238 * Calculate which bits changed between the current and new CC. 1239 * Mark each bit as 0 once it is handled to determine if any unhandled bits were changed. 1240 */ 1241 diff.raw = cc.raw ^ ctrlr->vcprop.cc.raw; 1242 1243 if (diff.bits.en) { 1244 if (cc.bits.en) { 1245 SPDK_DEBUGLOG(nvmf, "Property Set CC Enable!\n"); 1246 nvmf_ctrlr_stop_association_timer(ctrlr); 1247 1248 ctrlr->vcprop.cc.bits.en = 1; 1249 ctrlr->vcprop.csts.bits.rdy = 1; 1250 } else { 1251 SPDK_DEBUGLOG(nvmf, "Property Set CC Disable!\n"); 1252 if (ctrlr->disconnect_in_progress) { 1253 SPDK_DEBUGLOG(nvmf, "Disconnect in progress\n"); 1254 return true; 1255 } 1256 1257 ctrlr->cc_timeout_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_cc_timeout, ctrlr, 1258 NVMF_CC_RESET_SHN_TIMEOUT_IN_MS * 1000); 1259 /* Make sure cc_timeout_ms is between cc_timeout_timer and Host reset/shutdown timeout */ 1260 cc_timeout_ms = (NVMF_CC_RESET_SHN_TIMEOUT_IN_MS + NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS) / 2; 1261 ctrlr->cc_timeout_tsc = spdk_get_ticks() + cc_timeout_ms * spdk_get_ticks_hz() / (uint64_t)1000; 1262 1263 ctrlr->vcprop.cc.bits.en = 0; 1264 ctrlr->disconnect_in_progress = true; 1265 ctrlr->disconnect_is_shn = false; 1266 spdk_for_each_channel(ctrlr->subsys->tgt, 1267 nvmf_ctrlr_disconnect_io_qpairs_on_pg, 1268 ctrlr, 1269 nvmf_ctrlr_cc_reset_shn_done); 1270 } 1271 diff.bits.en = 0; 1272 } 1273 1274 if (diff.bits.shn) { 1275 if (cc.bits.shn == SPDK_NVME_SHN_NORMAL || 1276 cc.bits.shn == SPDK_NVME_SHN_ABRUPT) { 1277 SPDK_DEBUGLOG(nvmf, "Property Set CC Shutdown %u%ub!\n", 1278 cc.bits.shn >> 1, cc.bits.shn & 1); 1279 if (ctrlr->disconnect_in_progress) { 1280 SPDK_DEBUGLOG(nvmf, "Disconnect in progress\n"); 1281 return true; 1282 } 1283 1284 ctrlr->cc_timeout_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_cc_timeout, ctrlr, 1285 NVMF_CC_RESET_SHN_TIMEOUT_IN_MS * 1000); 1286 /* Make sure cc_timeout_ms is between cc_timeout_timer and Host reset/shutdown timeout */ 1287 cc_timeout_ms = (NVMF_CC_RESET_SHN_TIMEOUT_IN_MS + NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS) / 2; 1288 ctrlr->cc_timeout_tsc = spdk_get_ticks() + cc_timeout_ms * spdk_get_ticks_hz() / (uint64_t)1000; 1289 1290 ctrlr->vcprop.cc.bits.shn = cc.bits.shn; 1291 ctrlr->disconnect_in_progress = true; 1292 ctrlr->disconnect_is_shn = true; 1293 spdk_for_each_channel(ctrlr->subsys->tgt, 1294 nvmf_ctrlr_disconnect_io_qpairs_on_pg, 1295 ctrlr, 1296 nvmf_ctrlr_cc_reset_shn_done); 1297 1298 /* From the time a shutdown is initiated the controller shall disable 1299 * Keep Alive timer */ 1300 nvmf_ctrlr_stop_keep_alive_timer(ctrlr); 1301 } else if (cc.bits.shn == 0) { 1302 ctrlr->vcprop.cc.bits.shn = 0; 1303 } else { 1304 SPDK_ERRLOG("Prop Set CC: Invalid SHN value %u%ub\n", 1305 cc.bits.shn >> 1, cc.bits.shn & 1); 1306 return false; 1307 } 1308 diff.bits.shn = 0; 1309 } 1310 1311 if (diff.bits.iosqes) { 1312 SPDK_DEBUGLOG(nvmf, "Prop Set IOSQES = %u (%u bytes)\n", 1313 cc.bits.iosqes, 1u << cc.bits.iosqes); 1314 ctrlr->vcprop.cc.bits.iosqes = cc.bits.iosqes; 1315 diff.bits.iosqes = 0; 1316 } 1317 1318 if (diff.bits.iocqes) { 1319 SPDK_DEBUGLOG(nvmf, "Prop Set IOCQES = %u (%u bytes)\n", 1320 cc.bits.iocqes, 1u << cc.bits.iocqes); 1321 ctrlr->vcprop.cc.bits.iocqes = cc.bits.iocqes; 1322 diff.bits.iocqes = 0; 1323 } 1324 1325 if (diff.bits.ams) { 1326 SPDK_ERRLOG("Arbitration Mechanism Selected (AMS) 0x%x not supported!\n", cc.bits.ams); 1327 return false; 1328 } 1329 1330 if (diff.bits.mps) { 1331 SPDK_ERRLOG("Memory Page Size (MPS) %u KiB not supported!\n", (1 << (2 + cc.bits.mps))); 1332 return false; 1333 } 1334 1335 if (diff.bits.css) { 1336 if (cc.bits.css > SPDK_NVME_CC_CSS_IOCS) { 1337 SPDK_ERRLOG("I/O Command Set Selected (CSS) 0x%x not supported!\n", cc.bits.css); 1338 return false; 1339 } 1340 diff.bits.css = 0; 1341 } 1342 1343 if (diff.raw != 0) { 1344 /* Print an error message, but don't fail the command in this case. 1345 * If we did want to fail in this case, we'd need to ensure we acted 1346 * on no other bits or the initiator gets confused. */ 1347 SPDK_ERRLOG("Prop Set CC toggled reserved bits 0x%x!\n", diff.raw); 1348 } 1349 1350 return true; 1351 } 1352 1353 static uint64_t 1354 nvmf_prop_get_csts(struct spdk_nvmf_ctrlr *ctrlr) 1355 { 1356 return ctrlr->vcprop.csts.raw; 1357 } 1358 1359 static uint64_t 1360 nvmf_prop_get_aqa(struct spdk_nvmf_ctrlr *ctrlr) 1361 { 1362 return ctrlr->vcprop.aqa.raw; 1363 } 1364 1365 static bool 1366 nvmf_prop_set_aqa(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1367 { 1368 union spdk_nvme_aqa_register aqa; 1369 1370 aqa.raw = value; 1371 1372 /* 1373 * We don't need to explicitly check for maximum size, as the fields are 1374 * limited to 12 bits (4096). 1375 */ 1376 if (aqa.bits.asqs < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES - 1 || 1377 aqa.bits.acqs < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES - 1 || 1378 aqa.bits.reserved1 != 0 || aqa.bits.reserved2 != 0) { 1379 return false; 1380 } 1381 1382 ctrlr->vcprop.aqa.raw = value; 1383 1384 return true; 1385 } 1386 1387 static uint64_t 1388 nvmf_prop_get_asq(struct spdk_nvmf_ctrlr *ctrlr) 1389 { 1390 return ctrlr->vcprop.asq; 1391 } 1392 1393 static bool 1394 nvmf_prop_set_asq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1395 { 1396 ctrlr->vcprop.asq = (ctrlr->vcprop.asq & (0xFFFFFFFFULL << 32ULL)) | value; 1397 1398 return true; 1399 } 1400 1401 static bool 1402 nvmf_prop_set_asq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1403 { 1404 ctrlr->vcprop.asq = (ctrlr->vcprop.asq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL); 1405 1406 return true; 1407 } 1408 1409 static uint64_t 1410 nvmf_prop_get_acq(struct spdk_nvmf_ctrlr *ctrlr) 1411 { 1412 return ctrlr->vcprop.acq; 1413 } 1414 1415 static bool 1416 nvmf_prop_set_acq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1417 { 1418 ctrlr->vcprop.acq = (ctrlr->vcprop.acq & (0xFFFFFFFFULL << 32ULL)) | value; 1419 1420 return true; 1421 } 1422 1423 static bool 1424 nvmf_prop_set_acq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value) 1425 { 1426 ctrlr->vcprop.acq = (ctrlr->vcprop.acq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL); 1427 1428 return true; 1429 } 1430 1431 struct nvmf_prop { 1432 uint32_t ofst; 1433 uint8_t size; 1434 char name[11]; 1435 uint64_t (*get_cb)(struct spdk_nvmf_ctrlr *ctrlr); 1436 bool (*set_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value); 1437 bool (*set_upper_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value); 1438 }; 1439 1440 #define PROP(field, size, get_cb, set_cb, set_upper_cb) \ 1441 { \ 1442 offsetof(struct spdk_nvme_registers, field), \ 1443 size, \ 1444 #field, \ 1445 get_cb, set_cb, set_upper_cb \ 1446 } 1447 1448 static const struct nvmf_prop nvmf_props[] = { 1449 PROP(cap, 8, nvmf_prop_get_cap, NULL, NULL), 1450 PROP(vs, 4, nvmf_prop_get_vs, NULL, NULL), 1451 PROP(cc, 4, nvmf_prop_get_cc, nvmf_prop_set_cc, NULL), 1452 PROP(csts, 4, nvmf_prop_get_csts, NULL, NULL), 1453 PROP(aqa, 4, nvmf_prop_get_aqa, nvmf_prop_set_aqa, NULL), 1454 PROP(asq, 8, nvmf_prop_get_asq, nvmf_prop_set_asq_lower, nvmf_prop_set_asq_upper), 1455 PROP(acq, 8, nvmf_prop_get_acq, nvmf_prop_set_acq_lower, nvmf_prop_set_acq_upper), 1456 }; 1457 1458 static const struct nvmf_prop * 1459 find_prop(uint32_t ofst, uint8_t size) 1460 { 1461 size_t i; 1462 1463 for (i = 0; i < SPDK_COUNTOF(nvmf_props); i++) { 1464 const struct nvmf_prop *prop = &nvmf_props[i]; 1465 1466 if ((ofst >= prop->ofst) && (ofst + size <= prop->ofst + prop->size)) { 1467 return prop; 1468 } 1469 } 1470 1471 return NULL; 1472 } 1473 1474 static int 1475 nvmf_property_get(struct spdk_nvmf_request *req) 1476 { 1477 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1478 struct spdk_nvmf_fabric_prop_get_cmd *cmd = &req->cmd->prop_get_cmd; 1479 struct spdk_nvmf_fabric_prop_get_rsp *response = &req->rsp->prop_get_rsp; 1480 const struct nvmf_prop *prop; 1481 uint8_t size; 1482 1483 response->status.sc = 0; 1484 response->value.u64 = 0; 1485 1486 SPDK_DEBUGLOG(nvmf, "size %d, offset 0x%x\n", 1487 cmd->attrib.size, cmd->ofst); 1488 1489 switch (cmd->attrib.size) { 1490 case SPDK_NVMF_PROP_SIZE_4: 1491 size = 4; 1492 break; 1493 case SPDK_NVMF_PROP_SIZE_8: 1494 size = 8; 1495 break; 1496 default: 1497 SPDK_DEBUGLOG(nvmf, "Invalid size value %d\n", cmd->attrib.size); 1498 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1499 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1500 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1501 } 1502 1503 prop = find_prop(cmd->ofst, size); 1504 if (prop == NULL || prop->get_cb == NULL) { 1505 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1506 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1507 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1508 } 1509 1510 SPDK_DEBUGLOG(nvmf, "name: %s\n", prop->name); 1511 1512 response->value.u64 = prop->get_cb(ctrlr); 1513 1514 if (size != prop->size) { 1515 /* The size must be 4 and the prop->size is 8. Figure out which part of the property to read. */ 1516 assert(size == 4); 1517 assert(prop->size == 8); 1518 1519 if (cmd->ofst == prop->ofst) { 1520 /* Keep bottom 4 bytes only */ 1521 response->value.u64 &= 0xFFFFFFFF; 1522 } else { 1523 /* Keep top 4 bytes only */ 1524 response->value.u64 >>= 32; 1525 } 1526 } 1527 1528 SPDK_DEBUGLOG(nvmf, "response value: 0x%" PRIx64 "\n", response->value.u64); 1529 1530 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1531 } 1532 1533 static int 1534 nvmf_property_set(struct spdk_nvmf_request *req) 1535 { 1536 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1537 struct spdk_nvmf_fabric_prop_set_cmd *cmd = &req->cmd->prop_set_cmd; 1538 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1539 const struct nvmf_prop *prop; 1540 uint64_t value; 1541 uint8_t size; 1542 bool ret; 1543 1544 SPDK_DEBUGLOG(nvmf, "size %d, offset 0x%x, value 0x%" PRIx64 "\n", 1545 cmd->attrib.size, cmd->ofst, cmd->value.u64); 1546 1547 switch (cmd->attrib.size) { 1548 case SPDK_NVMF_PROP_SIZE_4: 1549 size = 4; 1550 break; 1551 case SPDK_NVMF_PROP_SIZE_8: 1552 size = 8; 1553 break; 1554 default: 1555 SPDK_DEBUGLOG(nvmf, "Invalid size value %d\n", cmd->attrib.size); 1556 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1557 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1558 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1559 } 1560 1561 prop = find_prop(cmd->ofst, size); 1562 if (prop == NULL || prop->set_cb == NULL) { 1563 SPDK_INFOLOG(nvmf, "Invalid offset 0x%x\n", cmd->ofst); 1564 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1565 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1566 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1567 } 1568 1569 SPDK_DEBUGLOG(nvmf, "name: %s\n", prop->name); 1570 1571 value = cmd->value.u64; 1572 1573 if (prop->size == 4) { 1574 ret = prop->set_cb(ctrlr, (uint32_t)value); 1575 } else if (size != prop->size) { 1576 /* The size must be 4 and the prop->size is 8. Figure out which part of the property to write. */ 1577 assert(size == 4); 1578 assert(prop->size == 8); 1579 1580 if (cmd->ofst == prop->ofst) { 1581 ret = prop->set_cb(ctrlr, (uint32_t)value); 1582 } else { 1583 ret = prop->set_upper_cb(ctrlr, (uint32_t)value); 1584 } 1585 } else { 1586 ret = prop->set_cb(ctrlr, (uint32_t)value); 1587 if (ret) { 1588 ret = prop->set_upper_cb(ctrlr, (uint32_t)(value >> 32)); 1589 } 1590 } 1591 1592 if (!ret) { 1593 SPDK_ERRLOG("prop set_cb failed\n"); 1594 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1595 response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM; 1596 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1597 } 1598 1599 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1600 } 1601 1602 static int 1603 nvmf_ctrlr_set_features_arbitration(struct spdk_nvmf_request *req) 1604 { 1605 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1606 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1607 1608 SPDK_DEBUGLOG(nvmf, "Set Features - Arbitration (cdw11 = 0x%0x)\n", cmd->cdw11); 1609 1610 ctrlr->feat.arbitration.raw = cmd->cdw11; 1611 ctrlr->feat.arbitration.bits.reserved = 0; 1612 1613 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1614 } 1615 1616 static int 1617 nvmf_ctrlr_set_features_power_management(struct spdk_nvmf_request *req) 1618 { 1619 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1620 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1621 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1622 1623 SPDK_DEBUGLOG(nvmf, "Set Features - Power Management (cdw11 = 0x%0x)\n", cmd->cdw11); 1624 1625 /* Only PS = 0 is allowed, since we report NPSS = 0 */ 1626 if (cmd->cdw11_bits.feat_power_management.bits.ps != 0) { 1627 SPDK_ERRLOG("Invalid power state %u\n", cmd->cdw11_bits.feat_power_management.bits.ps); 1628 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1629 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1630 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1631 } 1632 1633 ctrlr->feat.power_management.raw = cmd->cdw11; 1634 ctrlr->feat.power_management.bits.reserved = 0; 1635 1636 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1637 } 1638 1639 static bool 1640 temp_threshold_opts_valid(const union spdk_nvme_feat_temperature_threshold *opts) 1641 { 1642 /* 1643 * Valid TMPSEL values: 1644 * 0000b - 1000b: temperature sensors 1645 * 1111b: set all implemented temperature sensors 1646 */ 1647 if (opts->bits.tmpsel >= 9 && opts->bits.tmpsel != 15) { 1648 /* 1001b - 1110b: reserved */ 1649 SPDK_ERRLOG("Invalid TMPSEL %u\n", opts->bits.tmpsel); 1650 return false; 1651 } 1652 1653 /* 1654 * Valid THSEL values: 1655 * 00b: over temperature threshold 1656 * 01b: under temperature threshold 1657 */ 1658 if (opts->bits.thsel > 1) { 1659 /* 10b - 11b: reserved */ 1660 SPDK_ERRLOG("Invalid THSEL %u\n", opts->bits.thsel); 1661 return false; 1662 } 1663 1664 return true; 1665 } 1666 1667 static int 1668 nvmf_ctrlr_set_features_temperature_threshold(struct spdk_nvmf_request *req) 1669 { 1670 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1671 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1672 1673 SPDK_DEBUGLOG(nvmf, "Set Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11); 1674 1675 if (!temp_threshold_opts_valid(&cmd->cdw11_bits.feat_temp_threshold)) { 1676 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1677 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1678 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1679 } 1680 1681 /* TODO: no sensors implemented - ignore new values */ 1682 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1683 } 1684 1685 static int 1686 nvmf_ctrlr_get_features_temperature_threshold(struct spdk_nvmf_request *req) 1687 { 1688 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1689 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1690 1691 SPDK_DEBUGLOG(nvmf, "Get Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd->cdw11); 1692 1693 if (!temp_threshold_opts_valid(&cmd->cdw11_bits.feat_temp_threshold)) { 1694 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1695 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1696 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1697 } 1698 1699 /* TODO: no sensors implemented - return 0 for all thresholds */ 1700 rsp->cdw0 = 0; 1701 1702 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1703 } 1704 1705 static int 1706 nvmf_ctrlr_get_features_interrupt_vector_configuration(struct spdk_nvmf_request *req) 1707 { 1708 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1709 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1710 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1711 union spdk_nvme_feat_interrupt_vector_configuration iv_conf = {}; 1712 1713 SPDK_DEBUGLOG(nvmf, "Get Features - Interrupt Vector Configuration (cdw11 = 0x%0x)\n", cmd->cdw11); 1714 1715 iv_conf.bits.iv = cmd->cdw11_bits.feat_interrupt_vector_configuration.bits.iv; 1716 iv_conf.bits.cd = ctrlr->feat.interrupt_vector_configuration.bits.cd; 1717 rsp->cdw0 = iv_conf.raw; 1718 1719 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1720 } 1721 1722 static int 1723 nvmf_ctrlr_set_features_error_recovery(struct spdk_nvmf_request *req) 1724 { 1725 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1726 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1727 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1728 1729 SPDK_DEBUGLOG(nvmf, "Set Features - Error Recovery (cdw11 = 0x%0x)\n", cmd->cdw11); 1730 1731 if (cmd->cdw11_bits.feat_error_recovery.bits.dulbe) { 1732 /* 1733 * Host is not allowed to set this bit, since we don't advertise it in 1734 * Identify Namespace. 1735 */ 1736 SPDK_ERRLOG("Host set unsupported DULBE bit\n"); 1737 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 1738 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1739 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1740 } 1741 1742 ctrlr->feat.error_recovery.raw = cmd->cdw11; 1743 ctrlr->feat.error_recovery.bits.reserved = 0; 1744 1745 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1746 } 1747 1748 static int 1749 nvmf_ctrlr_set_features_volatile_write_cache(struct spdk_nvmf_request *req) 1750 { 1751 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1752 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1753 1754 SPDK_DEBUGLOG(nvmf, "Set Features - Volatile Write Cache (cdw11 = 0x%0x)\n", cmd->cdw11); 1755 1756 ctrlr->feat.volatile_write_cache.raw = cmd->cdw11; 1757 ctrlr->feat.volatile_write_cache.bits.reserved = 0; 1758 1759 SPDK_DEBUGLOG(nvmf, "Set Features - Volatile Write Cache %s\n", 1760 ctrlr->feat.volatile_write_cache.bits.wce ? "Enabled" : "Disabled"); 1761 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1762 } 1763 1764 static int 1765 nvmf_ctrlr_set_features_write_atomicity(struct spdk_nvmf_request *req) 1766 { 1767 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1768 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1769 1770 SPDK_DEBUGLOG(nvmf, "Set Features - Write Atomicity (cdw11 = 0x%0x)\n", cmd->cdw11); 1771 1772 ctrlr->feat.write_atomicity.raw = cmd->cdw11; 1773 ctrlr->feat.write_atomicity.bits.reserved = 0; 1774 1775 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1776 } 1777 1778 static int 1779 nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req) 1780 { 1781 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1782 1783 SPDK_ERRLOG("Set Features - Host Identifier not allowed\n"); 1784 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 1785 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1786 } 1787 1788 static int 1789 nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req) 1790 { 1791 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1792 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1793 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1794 struct spdk_iov_xfer ix; 1795 1796 SPDK_DEBUGLOG(nvmf, "Get Features - Host Identifier\n"); 1797 1798 if (!cmd->cdw11_bits.feat_host_identifier.bits.exhid) { 1799 /* NVMe over Fabrics requires EXHID=1 (128-bit/16-byte host ID) */ 1800 SPDK_ERRLOG("Get Features - Host Identifier with EXHID=0 not allowed\n"); 1801 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1802 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1803 } 1804 1805 if (req->iovcnt < 1 || req->length < sizeof(ctrlr->hostid)) { 1806 SPDK_ERRLOG("Invalid data buffer for Get Features - Host Identifier\n"); 1807 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1808 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1809 } 1810 1811 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 1812 spdk_iov_xfer_from_buf(&ix, &ctrlr->hostid, sizeof(ctrlr->hostid)); 1813 1814 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1815 } 1816 1817 static int 1818 nvmf_ctrlr_get_features_reservation_notification_mask(struct spdk_nvmf_request *req) 1819 { 1820 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1821 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1822 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1823 struct spdk_nvmf_ns *ns; 1824 1825 SPDK_DEBUGLOG(nvmf, "get Features - Reservation Notification Mask\n"); 1826 1827 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1828 SPDK_ERRLOG("get Features - Invalid Namespace ID\n"); 1829 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1830 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1831 } 1832 1833 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid); 1834 if (ns == NULL) { 1835 SPDK_ERRLOG("get Features - Invalid Namespace ID\n"); 1836 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1837 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1838 } 1839 rsp->cdw0 = ns->mask; 1840 1841 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1842 } 1843 1844 static int 1845 nvmf_ctrlr_set_features_reservation_notification_mask(struct spdk_nvmf_request *req) 1846 { 1847 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1848 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 1849 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1850 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1851 struct spdk_nvmf_ns *ns; 1852 1853 SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Notification Mask\n"); 1854 1855 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1856 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 1857 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 1858 ns->mask = cmd->cdw11; 1859 } 1860 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1861 } 1862 1863 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid); 1864 if (ns == NULL) { 1865 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n"); 1866 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1867 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1868 } 1869 ns->mask = cmd->cdw11; 1870 1871 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1872 } 1873 1874 static int 1875 nvmf_ctrlr_get_features_reservation_persistence(struct spdk_nvmf_request *req) 1876 { 1877 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1878 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1879 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1880 struct spdk_nvmf_ns *ns; 1881 1882 SPDK_DEBUGLOG(nvmf, "Get Features - Reservation Persistence\n"); 1883 1884 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid); 1885 /* NSID with SPDK_NVME_GLOBAL_NS_TAG (=0xffffffff) also included */ 1886 if (ns == NULL) { 1887 SPDK_ERRLOG("Get Features - Invalid Namespace ID\n"); 1888 response->status.sct = SPDK_NVME_SCT_GENERIC; 1889 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1890 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1891 } 1892 1893 response->cdw0 = ns->ptpl_activated; 1894 1895 response->status.sct = SPDK_NVME_SCT_GENERIC; 1896 response->status.sc = SPDK_NVME_SC_SUCCESS; 1897 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1898 } 1899 1900 static int 1901 nvmf_ctrlr_set_features_reservation_persistence(struct spdk_nvmf_request *req) 1902 { 1903 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1904 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1905 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1906 struct spdk_nvmf_ns *ns; 1907 bool ptpl; 1908 1909 SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Persistence\n"); 1910 1911 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid); 1912 ptpl = cmd->cdw11_bits.feat_rsv_persistence.bits.ptpl; 1913 1914 if (cmd->nsid != SPDK_NVME_GLOBAL_NS_TAG && ns && nvmf_ns_is_ptpl_capable(ns)) { 1915 ns->ptpl_activated = ptpl; 1916 } else if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1917 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns; 1918 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 1919 if (nvmf_ns_is_ptpl_capable(ns)) { 1920 ns->ptpl_activated = ptpl; 1921 } 1922 } 1923 } else { 1924 SPDK_ERRLOG("Set Features - Invalid Namespace ID or Reservation Configuration\n"); 1925 response->status.sct = SPDK_NVME_SCT_GENERIC; 1926 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1927 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1928 } 1929 1930 /* TODO: Feature not changeable for now */ 1931 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 1932 response->status.sc = SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE; 1933 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1934 } 1935 1936 static int 1937 nvmf_ctrlr_get_features_host_behavior_support(struct spdk_nvmf_request *req) 1938 { 1939 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1940 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1941 struct spdk_nvme_host_behavior host_behavior = {}; 1942 struct spdk_iov_xfer ix; 1943 1944 SPDK_DEBUGLOG(nvmf, "Get Features - Host Behavior Support\n"); 1945 1946 if (req->iovcnt < 1 || req->length < sizeof(struct spdk_nvme_host_behavior)) { 1947 SPDK_ERRLOG("invalid data buffer for Host Behavior Support\n"); 1948 response->status.sct = SPDK_NVME_SCT_GENERIC; 1949 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1950 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1951 } 1952 1953 host_behavior.acre = ctrlr->acre_enabled; 1954 host_behavior.lbafee = ctrlr->lbafee_enabled; 1955 1956 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 1957 spdk_iov_xfer_from_buf(&ix, &host_behavior, sizeof(host_behavior)); 1958 1959 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1960 } 1961 1962 static int 1963 nvmf_ctrlr_set_features_host_behavior_support(struct spdk_nvmf_request *req) 1964 { 1965 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 1966 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 1967 struct spdk_nvme_host_behavior *host_behavior; 1968 1969 SPDK_DEBUGLOG(nvmf, "Set Features - Host Behavior Support\n"); 1970 if (req->iovcnt != 1) { 1971 SPDK_ERRLOG("Host Behavior Support invalid iovcnt: %d\n", req->iovcnt); 1972 response->status.sct = SPDK_NVME_SCT_GENERIC; 1973 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1974 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1975 } 1976 if (req->iov[0].iov_len != sizeof(struct spdk_nvme_host_behavior)) { 1977 SPDK_ERRLOG("Host Behavior Support invalid iov_len: %zd\n", req->iov[0].iov_len); 1978 response->status.sct = SPDK_NVME_SCT_GENERIC; 1979 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1980 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1981 } 1982 1983 host_behavior = (struct spdk_nvme_host_behavior *)req->iov[0].iov_base; 1984 if (host_behavior->acre == 0) { 1985 ctrlr->acre_enabled = false; 1986 } else if (host_behavior->acre == 1) { 1987 ctrlr->acre_enabled = true; 1988 } else { 1989 SPDK_ERRLOG("Host Behavior Support invalid acre: 0x%02x\n", host_behavior->acre); 1990 response->status.sct = SPDK_NVME_SCT_GENERIC; 1991 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 1992 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1993 } 1994 if (host_behavior->lbafee == 0) { 1995 ctrlr->lbafee_enabled = false; 1996 } else if (host_behavior->lbafee == 1) { 1997 ctrlr->lbafee_enabled = true; 1998 } else { 1999 SPDK_ERRLOG("Host Behavior Support invalid acre: 0x%02x\n", host_behavior->lbafee); 2000 response->status.sct = SPDK_NVME_SCT_GENERIC; 2001 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2002 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2003 } 2004 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2005 } 2006 2007 static int 2008 nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req) 2009 { 2010 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2011 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2012 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2013 2014 SPDK_DEBUGLOG(nvmf, "Set Features - Keep Alive Timer (%u ms)\n", cmd->cdw11); 2015 2016 /* 2017 * if attempts to disable keep alive by setting kato to 0h 2018 * a status value of keep alive invalid shall be returned 2019 */ 2020 if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato == 0) { 2021 rsp->status.sc = SPDK_NVME_SC_KEEP_ALIVE_INVALID; 2022 } else if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato < MIN_KEEP_ALIVE_TIMEOUT_IN_MS) { 2023 ctrlr->feat.keep_alive_timer.bits.kato = MIN_KEEP_ALIVE_TIMEOUT_IN_MS; 2024 } else { 2025 /* round up to milliseconds */ 2026 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up( 2027 cmd->cdw11_bits.feat_keep_alive_timer.bits.kato, 2028 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) * 2029 KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS; 2030 } 2031 2032 /* 2033 * if change the keep alive timeout value successfully 2034 * update the keep alive poller. 2035 */ 2036 if (cmd->cdw11_bits.feat_keep_alive_timer.bits.kato != 0) { 2037 if (ctrlr->keep_alive_poller != NULL) { 2038 spdk_poller_unregister(&ctrlr->keep_alive_poller); 2039 } 2040 ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr, 2041 ctrlr->feat.keep_alive_timer.bits.kato * 1000); 2042 } 2043 2044 SPDK_DEBUGLOG(nvmf, "Set Features - Keep Alive Timer set to %u ms\n", 2045 ctrlr->feat.keep_alive_timer.bits.kato); 2046 2047 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2048 } 2049 2050 static int 2051 nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req) 2052 { 2053 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2054 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2055 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2056 uint32_t count; 2057 2058 SPDK_DEBUGLOG(nvmf, "Set Features - Number of Queues, cdw11 0x%x\n", 2059 req->cmd->nvme_cmd.cdw11); 2060 2061 if (cmd->cdw11_bits.feat_num_of_queues.bits.ncqr == UINT16_MAX || 2062 cmd->cdw11_bits.feat_num_of_queues.bits.nsqr == UINT16_MAX) { 2063 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2064 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2065 } 2066 2067 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 2068 /* verify that the controller is ready to process commands */ 2069 if (count > 1) { 2070 SPDK_DEBUGLOG(nvmf, "Queue pairs already active!\n"); 2071 rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 2072 } else { 2073 /* 2074 * Ignore the value requested by the host - 2075 * always return the pre-configured value based on max_qpairs_allowed. 2076 */ 2077 rsp->cdw0 = ctrlr->feat.number_of_queues.raw; 2078 } 2079 2080 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2081 } 2082 2083 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ctrlr) == 4928, 2084 "Please check migration fields that need to be added or not"); 2085 2086 static void 2087 nvmf_ctrlr_migr_data_copy(struct spdk_nvmf_ctrlr_migr_data *data, 2088 const struct spdk_nvmf_ctrlr_migr_data *data_src, size_t data_size) 2089 { 2090 assert(data); 2091 assert(data_src); 2092 assert(data_size); 2093 2094 memcpy(&data->regs, &data_src->regs, spdk_min(data->regs_size, data_src->regs_size)); 2095 memcpy(&data->feat, &data_src->feat, spdk_min(data->feat_size, data_src->feat_size)); 2096 2097 #define SET_FIELD(field) \ 2098 if (offsetof(struct spdk_nvmf_ctrlr_migr_data, field) + sizeof(data->field) <= data_size) { \ 2099 data->field = data_src->field; \ 2100 } \ 2101 2102 SET_FIELD(cntlid); 2103 SET_FIELD(acre); 2104 SET_FIELD(num_aer_cids); 2105 SET_FIELD(num_async_events); 2106 SET_FIELD(notice_aen_mask); 2107 #undef SET_FIELD 2108 2109 #define SET_ARRAY(arr) \ 2110 if (offsetof(struct spdk_nvmf_ctrlr_migr_data, arr) + sizeof(data->arr) <= data_size) { \ 2111 memcpy(&data->arr, &data_src->arr, sizeof(data->arr)); \ 2112 } \ 2113 2114 SET_ARRAY(async_events); 2115 SET_ARRAY(aer_cids); 2116 #undef SET_ARRAY 2117 } 2118 2119 int 2120 spdk_nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr, 2121 struct spdk_nvmf_ctrlr_migr_data *data) 2122 { 2123 struct spdk_nvmf_async_event_completion *event, *event_tmp; 2124 uint32_t i; 2125 struct spdk_nvmf_ctrlr_migr_data data_local = { 2126 .data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused), 2127 .regs_size = sizeof(struct spdk_nvmf_registers), 2128 .feat_size = sizeof(struct spdk_nvmf_ctrlr_feat) 2129 }; 2130 2131 assert(data->data_size <= sizeof(data_local)); 2132 assert(spdk_get_thread() == ctrlr->thread); 2133 2134 memcpy(&data_local.regs, &ctrlr->vcprop, sizeof(struct spdk_nvmf_registers)); 2135 memcpy(&data_local.feat, &ctrlr->feat, sizeof(struct spdk_nvmf_ctrlr_feat)); 2136 2137 data_local.cntlid = ctrlr->cntlid; 2138 data_local.acre = ctrlr->acre_enabled; 2139 data_local.num_aer_cids = ctrlr->nr_aer_reqs; 2140 2141 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 2142 if (data_local.num_async_events + 1 > SPDK_NVMF_MIGR_MAX_PENDING_AERS) { 2143 SPDK_ERRLOG("ctrlr %p has too many pending AERs\n", ctrlr); 2144 break; 2145 } 2146 2147 data_local.async_events[data_local.num_async_events++].raw = event->event.raw; 2148 } 2149 2150 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 2151 struct spdk_nvmf_request *req = ctrlr->aer_req[i]; 2152 data_local.aer_cids[i] = req->cmd->nvme_cmd.cid; 2153 } 2154 data_local.notice_aen_mask = ctrlr->notice_aen_mask; 2155 2156 nvmf_ctrlr_migr_data_copy(data, &data_local, spdk_min(data->data_size, data_local.data_size)); 2157 return 0; 2158 } 2159 2160 int 2161 spdk_nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr, 2162 const struct spdk_nvmf_ctrlr_migr_data *data) 2163 { 2164 uint32_t i; 2165 struct spdk_nvmf_ctrlr_migr_data data_local = { 2166 .data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused), 2167 .regs_size = sizeof(struct spdk_nvmf_registers), 2168 .feat_size = sizeof(struct spdk_nvmf_ctrlr_feat) 2169 }; 2170 2171 assert(data->data_size <= sizeof(data_local)); 2172 assert(spdk_get_thread() == ctrlr->thread); 2173 2174 /* local version of data should have defaults set before copy */ 2175 nvmf_ctrlr_migr_data_copy(&data_local, data, spdk_min(data->data_size, data_local.data_size)); 2176 memcpy(&ctrlr->vcprop, &data_local.regs, sizeof(struct spdk_nvmf_registers)); 2177 memcpy(&ctrlr->feat, &data_local.feat, sizeof(struct spdk_nvmf_ctrlr_feat)); 2178 2179 ctrlr->cntlid = data_local.cntlid; 2180 ctrlr->acre_enabled = data_local.acre; 2181 2182 for (i = 0; i < data_local.num_async_events; i++) { 2183 struct spdk_nvmf_async_event_completion *event; 2184 2185 event = calloc(1, sizeof(*event)); 2186 if (!event) { 2187 return -ENOMEM; 2188 } 2189 2190 event->event.raw = data_local.async_events[i].raw; 2191 STAILQ_INSERT_TAIL(&ctrlr->async_events, event, link); 2192 } 2193 ctrlr->notice_aen_mask = data_local.notice_aen_mask; 2194 2195 return 0; 2196 } 2197 2198 static int 2199 nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req) 2200 { 2201 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2202 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2203 2204 SPDK_DEBUGLOG(nvmf, "Set Features - Async Event Configuration, cdw11 0x%08x\n", 2205 cmd->cdw11); 2206 ctrlr->feat.async_event_configuration.raw = cmd->cdw11; 2207 ctrlr->feat.async_event_configuration.bits.reserved1 = 0; 2208 ctrlr->feat.async_event_configuration.bits.reserved2 = 0; 2209 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2210 } 2211 2212 static int 2213 nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req) 2214 { 2215 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2216 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 2217 struct spdk_nvmf_async_event_completion *pending_event; 2218 2219 SPDK_DEBUGLOG(nvmf, "Async Event Request\n"); 2220 2221 /* Four asynchronous events are supported for now */ 2222 if (ctrlr->nr_aer_reqs >= SPDK_NVMF_MAX_ASYNC_EVENTS) { 2223 SPDK_DEBUGLOG(nvmf, "AERL exceeded\n"); 2224 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2225 rsp->status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED; 2226 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2227 } 2228 2229 if (!STAILQ_EMPTY(&ctrlr->async_events)) { 2230 pending_event = STAILQ_FIRST(&ctrlr->async_events); 2231 rsp->cdw0 = pending_event->event.raw; 2232 STAILQ_REMOVE(&ctrlr->async_events, pending_event, spdk_nvmf_async_event_completion, link); 2233 free(pending_event); 2234 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2235 } 2236 2237 ctrlr->aer_req[ctrlr->nr_aer_reqs++] = req; 2238 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2239 } 2240 2241 static void 2242 nvmf_get_firmware_slot_log_page(struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length) 2243 { 2244 struct spdk_nvme_firmware_page fw_page; 2245 size_t copy_len; 2246 struct spdk_iov_xfer ix; 2247 2248 spdk_iov_xfer_init(&ix, iovs, iovcnt); 2249 2250 memset(&fw_page, 0, sizeof(fw_page)); 2251 fw_page.afi.active_slot = 1; 2252 fw_page.afi.next_reset_slot = 0; 2253 spdk_strcpy_pad(fw_page.revision[0], FW_VERSION, sizeof(fw_page.revision[0]), ' '); 2254 2255 if (offset < sizeof(fw_page)) { 2256 copy_len = spdk_min(sizeof(fw_page) - offset, length); 2257 if (copy_len > 0) { 2258 spdk_iov_xfer_from_buf(&ix, (const char *)&fw_page + offset, copy_len); 2259 } 2260 } 2261 } 2262 2263 /* 2264 * Asynchronous Event Mask Bit 2265 */ 2266 enum spdk_nvme_async_event_mask_bit { 2267 /* Mask Namespace Change Notification */ 2268 SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT = 0, 2269 /* Mask Asymmetric Namespace Access Change Notification */ 2270 SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT = 1, 2271 /* Mask Discovery Log Change Notification */ 2272 SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT = 2, 2273 /* Mask Reservation Log Page Available Notification */ 2274 SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT = 3, 2275 /* Mask Error Event */ 2276 SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT = 4, 2277 /* 4 - 63 Reserved */ 2278 }; 2279 2280 static inline void 2281 nvmf_ctrlr_unmask_aen(struct spdk_nvmf_ctrlr *ctrlr, 2282 enum spdk_nvme_async_event_mask_bit mask) 2283 { 2284 ctrlr->notice_aen_mask &= ~(1 << mask); 2285 } 2286 2287 static inline bool 2288 nvmf_ctrlr_mask_aen(struct spdk_nvmf_ctrlr *ctrlr, 2289 enum spdk_nvme_async_event_mask_bit mask) 2290 { 2291 if (ctrlr->notice_aen_mask & (1 << mask)) { 2292 return false; 2293 } else { 2294 ctrlr->notice_aen_mask |= (1 << mask); 2295 return true; 2296 } 2297 } 2298 2299 /* we have to use the typedef in the function declaration to appease astyle. */ 2300 typedef enum spdk_nvme_ana_state spdk_nvme_ana_state_t; 2301 2302 static inline spdk_nvme_ana_state_t 2303 nvmf_ctrlr_get_ana_state(struct spdk_nvmf_ctrlr *ctrlr, uint32_t anagrpid) 2304 { 2305 if (!ctrlr->subsys->flags.ana_reporting) { 2306 return SPDK_NVME_ANA_OPTIMIZED_STATE; 2307 } 2308 2309 if (spdk_unlikely(ctrlr->listener == NULL)) { 2310 return SPDK_NVME_ANA_INACCESSIBLE_STATE; 2311 } 2312 2313 assert(anagrpid - 1 < ctrlr->subsys->max_nsid); 2314 return ctrlr->listener->ana_state[anagrpid - 1]; 2315 } 2316 2317 static spdk_nvme_ana_state_t 2318 nvmf_ctrlr_get_ana_state_from_nsid(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 2319 { 2320 struct spdk_nvmf_ns *ns; 2321 2322 /* We do not have NVM subsystem specific ANA state. Hence if NSID is either 2323 * SPDK_NVMF_GLOBAL_NS_TAG, invalid, or for inactive namespace, return 2324 * the optimized state. 2325 */ 2326 ns = nvmf_ctrlr_get_ns(ctrlr, nsid); 2327 if (ns == NULL) { 2328 return SPDK_NVME_ANA_OPTIMIZED_STATE; 2329 } 2330 2331 return nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid); 2332 } 2333 2334 static void 2335 nvmf_get_error_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt, 2336 uint64_t offset, uint32_t length, uint32_t rae) 2337 { 2338 if (!rae) { 2339 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT); 2340 } 2341 2342 /* TODO: actually fill out log page data */ 2343 } 2344 2345 static void 2346 nvmf_get_ana_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt, 2347 uint64_t offset, uint32_t length, uint32_t rae) 2348 { 2349 struct spdk_nvme_ana_page ana_hdr; 2350 struct spdk_nvme_ana_group_descriptor ana_desc; 2351 size_t copy_len, copied_len; 2352 uint32_t num_anagrp = 0, anagrpid; 2353 struct spdk_nvmf_ns *ns; 2354 struct spdk_iov_xfer ix; 2355 2356 spdk_iov_xfer_init(&ix, iovs, iovcnt); 2357 2358 if (length == 0) { 2359 goto done; 2360 } 2361 2362 if (offset >= sizeof(ana_hdr)) { 2363 offset -= sizeof(ana_hdr); 2364 } else { 2365 for (anagrpid = 1; anagrpid <= ctrlr->subsys->max_nsid; anagrpid++) { 2366 if (ctrlr->subsys->ana_group[anagrpid - 1] > 0) { 2367 num_anagrp++; 2368 } 2369 } 2370 2371 memset(&ana_hdr, 0, sizeof(ana_hdr)); 2372 2373 ana_hdr.num_ana_group_desc = num_anagrp; 2374 /* TODO: Support Change Count. */ 2375 ana_hdr.change_count = 0; 2376 2377 copy_len = spdk_min(sizeof(ana_hdr) - offset, length); 2378 copied_len = spdk_iov_xfer_from_buf(&ix, (const char *)&ana_hdr + offset, copy_len); 2379 assert(copied_len == copy_len); 2380 length -= copied_len; 2381 offset = 0; 2382 } 2383 2384 if (length == 0) { 2385 goto done; 2386 } 2387 2388 for (anagrpid = 1; anagrpid <= ctrlr->subsys->max_nsid; anagrpid++) { 2389 if (ctrlr->subsys->ana_group[anagrpid - 1] == 0) { 2390 continue; 2391 } 2392 2393 if (offset >= sizeof(ana_desc)) { 2394 offset -= sizeof(ana_desc); 2395 } else { 2396 memset(&ana_desc, 0, sizeof(ana_desc)); 2397 2398 ana_desc.ana_group_id = anagrpid; 2399 ana_desc.num_of_nsid = ctrlr->subsys->ana_group[anagrpid - 1]; 2400 ana_desc.ana_state = nvmf_ctrlr_get_ana_state(ctrlr, anagrpid); 2401 2402 copy_len = spdk_min(sizeof(ana_desc) - offset, length); 2403 copied_len = spdk_iov_xfer_from_buf(&ix, (const char *)&ana_desc + offset, 2404 copy_len); 2405 assert(copied_len == copy_len); 2406 length -= copied_len; 2407 offset = 0; 2408 2409 if (length == 0) { 2410 goto done; 2411 } 2412 } 2413 2414 /* TODO: Revisit here about O(n^2) cost if we have subsystem with 2415 * many namespaces in the future. 2416 */ 2417 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL; 2418 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 2419 if (ns->anagrpid != anagrpid) { 2420 continue; 2421 } 2422 2423 if (offset >= sizeof(uint32_t)) { 2424 offset -= sizeof(uint32_t); 2425 continue; 2426 } 2427 2428 copy_len = spdk_min(sizeof(uint32_t) - offset, length); 2429 copied_len = spdk_iov_xfer_from_buf(&ix, (const char *)&ns->nsid + offset, 2430 copy_len); 2431 assert(copied_len == copy_len); 2432 length -= copied_len; 2433 offset = 0; 2434 2435 if (length == 0) { 2436 goto done; 2437 } 2438 } 2439 } 2440 2441 done: 2442 if (!rae) { 2443 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT); 2444 } 2445 } 2446 2447 void 2448 nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 2449 { 2450 uint16_t max_changes = SPDK_COUNTOF(ctrlr->changed_ns_list.ns_list); 2451 uint16_t i; 2452 bool found = false; 2453 2454 for (i = 0; i < ctrlr->changed_ns_list_count; i++) { 2455 if (ctrlr->changed_ns_list.ns_list[i] == nsid) { 2456 /* nsid is already in the list */ 2457 found = true; 2458 break; 2459 } 2460 } 2461 2462 if (!found) { 2463 if (ctrlr->changed_ns_list_count == max_changes) { 2464 /* Out of space - set first entry to FFFFFFFFh and zero-fill the rest. */ 2465 ctrlr->changed_ns_list.ns_list[0] = 0xFFFFFFFFu; 2466 for (i = 1; i < max_changes; i++) { 2467 ctrlr->changed_ns_list.ns_list[i] = 0; 2468 } 2469 } else { 2470 ctrlr->changed_ns_list.ns_list[ctrlr->changed_ns_list_count++] = nsid; 2471 } 2472 } 2473 } 2474 2475 static void 2476 nvmf_get_changed_ns_list_log_page(struct spdk_nvmf_ctrlr *ctrlr, 2477 struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length, uint32_t rae) 2478 { 2479 size_t copy_length; 2480 struct spdk_iov_xfer ix; 2481 2482 spdk_iov_xfer_init(&ix, iovs, iovcnt); 2483 2484 if (offset < sizeof(ctrlr->changed_ns_list)) { 2485 copy_length = spdk_min(length, sizeof(ctrlr->changed_ns_list) - offset); 2486 if (copy_length) { 2487 spdk_iov_xfer_from_buf(&ix, (char *)&ctrlr->changed_ns_list + offset, copy_length); 2488 } 2489 } 2490 2491 /* Clear log page each time it is read */ 2492 ctrlr->changed_ns_list_count = 0; 2493 memset(&ctrlr->changed_ns_list, 0, sizeof(ctrlr->changed_ns_list)); 2494 2495 if (!rae) { 2496 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT); 2497 } 2498 } 2499 2500 /* The structure can be modified if we provide support for other commands in future */ 2501 static const struct spdk_nvme_cmds_and_effect_log_page g_cmds_and_effect_log_page = { 2502 .admin_cmds_supported = { 2503 /* CSUPP, LBCC, NCC, NIC, CCC, CSE */ 2504 /* Get Log Page */ 2505 [SPDK_NVME_OPC_GET_LOG_PAGE] = {1, 0, 0, 0, 0, 0, 0, 0}, 2506 /* Identify */ 2507 [SPDK_NVME_OPC_IDENTIFY] = {1, 0, 0, 0, 0, 0, 0, 0}, 2508 /* Abort */ 2509 [SPDK_NVME_OPC_ABORT] = {1, 0, 0, 0, 0, 0, 0, 0}, 2510 /* Set Features */ 2511 [SPDK_NVME_OPC_SET_FEATURES] = {1, 0, 0, 0, 0, 0, 0, 0}, 2512 /* Get Features */ 2513 [SPDK_NVME_OPC_GET_FEATURES] = {1, 0, 0, 0, 0, 0, 0, 0}, 2514 /* Async Event Request */ 2515 [SPDK_NVME_OPC_ASYNC_EVENT_REQUEST] = {1, 0, 0, 0, 0, 0, 0, 0}, 2516 /* Keep Alive */ 2517 [SPDK_NVME_OPC_KEEP_ALIVE] = {1, 0, 0, 0, 0, 0, 0, 0}, 2518 }, 2519 .io_cmds_supported = { 2520 /* FLUSH */ 2521 [SPDK_NVME_OPC_FLUSH] = {1, 1, 0, 0, 0, 0, 0, 0}, 2522 /* WRITE */ 2523 [SPDK_NVME_OPC_WRITE] = {1, 1, 0, 0, 0, 0, 0, 0}, 2524 /* READ */ 2525 [SPDK_NVME_OPC_READ] = {1, 0, 0, 0, 0, 0, 0, 0}, 2526 /* WRITE ZEROES */ 2527 [SPDK_NVME_OPC_WRITE_ZEROES] = {1, 1, 0, 0, 0, 0, 0, 0}, 2528 /* DATASET MANAGEMENT */ 2529 [SPDK_NVME_OPC_DATASET_MANAGEMENT] = {1, 1, 0, 0, 0, 0, 0, 0}, 2530 /* COMPARE */ 2531 [SPDK_NVME_OPC_COMPARE] = {1, 0, 0, 0, 0, 0, 0, 0}, 2532 /* ZONE MANAGEMENT SEND */ 2533 [SPDK_NVME_OPC_ZONE_MGMT_SEND] = {1, 1, 0, 0, 0, 0, 0, 0}, 2534 /* ZONE MANAGEMENT RECEIVE */ 2535 [SPDK_NVME_OPC_ZONE_MGMT_RECV] = {1, 0, 0, 0, 0, 0, 0, 0}, 2536 /* ZONE APPEND */ 2537 [SPDK_NVME_OPC_ZONE_APPEND] = {1, 1, 0, 0, 0, 0, 0, 0}, 2538 /* COPY */ 2539 [SPDK_NVME_OPC_COPY] = {1, 1, 0, 0, 0, 0, 0, 0}, 2540 }, 2541 }; 2542 2543 static void 2544 nvmf_get_cmds_and_effects_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt, 2545 uint64_t offset, uint32_t length) 2546 { 2547 uint32_t page_size = sizeof(struct spdk_nvme_cmds_and_effect_log_page); 2548 size_t copy_len = 0; 2549 struct spdk_nvme_cmds_and_effect_log_page cmds_and_effect_log_page = g_cmds_and_effect_log_page; 2550 struct spdk_nvme_cmds_and_effect_entry zero = {}; 2551 struct spdk_iov_xfer ix; 2552 2553 if (!ctrlr->cdata.oncs.write_zeroes || !nvmf_ctrlr_write_zeroes_supported(ctrlr)) { 2554 cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_WRITE_ZEROES] = zero; 2555 } 2556 if (!ctrlr->cdata.oncs.dsm || !nvmf_ctrlr_dsm_supported(ctrlr)) { 2557 cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_DATASET_MANAGEMENT] = zero; 2558 } 2559 if (!ctrlr->cdata.oncs.compare) { 2560 cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_COMPARE] = zero; 2561 } 2562 if (!nvmf_subsystem_has_zns_iocs(ctrlr->subsys)) { 2563 cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_ZONE_MGMT_SEND] = zero; 2564 cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_ZONE_MGMT_RECV] = zero; 2565 } 2566 if (!nvmf_subsystem_zone_append_supported(ctrlr->subsys)) { 2567 cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_ZONE_APPEND] = zero; 2568 } 2569 if (!ctrlr->cdata.oncs.copy) { 2570 cmds_and_effect_log_page.io_cmds_supported[SPDK_NVME_OPC_COPY] = zero; 2571 } 2572 2573 spdk_iov_xfer_init(&ix, iovs, iovcnt); 2574 if (offset < page_size) { 2575 copy_len = spdk_min(page_size - offset, length); 2576 spdk_iov_xfer_from_buf(&ix, (char *)(&cmds_and_effect_log_page) + offset, copy_len); 2577 } 2578 } 2579 2580 static void 2581 nvmf_get_reservation_notification_log_page(struct spdk_nvmf_ctrlr *ctrlr, 2582 struct iovec *iovs, int iovcnt, uint64_t offset, uint32_t length, uint32_t rae) 2583 { 2584 uint32_t unit_log_len, avail_log_len, next_pos, copy_len; 2585 struct spdk_nvmf_reservation_log *log, *log_tmp; 2586 struct spdk_iov_xfer ix; 2587 2588 spdk_iov_xfer_init(&ix, iovs, iovcnt); 2589 2590 unit_log_len = sizeof(struct spdk_nvme_reservation_notification_log); 2591 /* No available log, return zeroed log pages */ 2592 if (!ctrlr->num_avail_log_pages) { 2593 return; 2594 } 2595 2596 avail_log_len = ctrlr->num_avail_log_pages * unit_log_len; 2597 if (offset >= avail_log_len) { 2598 return; 2599 } 2600 2601 next_pos = 0; 2602 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) { 2603 TAILQ_REMOVE(&ctrlr->log_head, log, link); 2604 ctrlr->num_avail_log_pages--; 2605 2606 next_pos += unit_log_len; 2607 if (next_pos > offset) { 2608 copy_len = spdk_min(next_pos - offset, length); 2609 spdk_iov_xfer_from_buf(&ix, &log->log, copy_len); 2610 length -= copy_len; 2611 offset += copy_len; 2612 } 2613 free(log); 2614 2615 if (length == 0) { 2616 break; 2617 } 2618 } 2619 2620 if (!rae) { 2621 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT); 2622 } 2623 return; 2624 } 2625 2626 static int 2627 nvmf_ctrlr_get_log_page(struct spdk_nvmf_request *req) 2628 { 2629 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2630 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2631 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2632 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 2633 struct spdk_nvme_transport_id cmd_source_trid; 2634 uint64_t offset, len; 2635 uint32_t rae, numdl, numdu; 2636 uint8_t lid; 2637 2638 if (req->iovcnt < 1) { 2639 SPDK_DEBUGLOG(nvmf, "get log command with no buffer\n"); 2640 response->status.sct = SPDK_NVME_SCT_GENERIC; 2641 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2642 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2643 } 2644 2645 offset = (uint64_t)cmd->cdw12 | ((uint64_t)cmd->cdw13 << 32); 2646 if (offset & 3) { 2647 SPDK_ERRLOG("Invalid log page offset 0x%" PRIx64 "\n", offset); 2648 response->status.sct = SPDK_NVME_SCT_GENERIC; 2649 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2650 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2651 } 2652 2653 rae = cmd->cdw10_bits.get_log_page.rae; 2654 numdl = cmd->cdw10_bits.get_log_page.numdl; 2655 numdu = cmd->cdw11_bits.get_log_page.numdu; 2656 len = ((numdu << 16) + numdl + (uint64_t)1) * 4; 2657 if (len > req->length) { 2658 SPDK_ERRLOG("Get log page: len (%" PRIu64 ") > buf size (%u)\n", 2659 len, req->length); 2660 response->status.sct = SPDK_NVME_SCT_GENERIC; 2661 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2662 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2663 } 2664 2665 lid = cmd->cdw10_bits.get_log_page.lid; 2666 SPDK_DEBUGLOG(nvmf, "Get log page: LID=0x%02X offset=0x%" PRIx64 " len=0x%" PRIx64 " rae=%u\n", 2667 lid, offset, len, rae); 2668 2669 if (spdk_nvmf_subsystem_is_discovery(subsystem)) { 2670 switch (lid) { 2671 case SPDK_NVME_LOG_DISCOVERY: 2672 if (spdk_nvmf_qpair_get_listen_trid(req->qpair, &cmd_source_trid)) { 2673 SPDK_ERRLOG("Failed to get LOG_DISCOVERY source trid\n"); 2674 response->status.sct = SPDK_NVME_SCT_GENERIC; 2675 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2676 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2677 } 2678 nvmf_get_discovery_log_page(subsystem->tgt, ctrlr->hostnqn, req->iov, req->iovcnt, 2679 offset, len, &cmd_source_trid); 2680 if (!rae) { 2681 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT); 2682 } 2683 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2684 default: 2685 goto invalid_log_page; 2686 } 2687 } else { 2688 if (offset > len) { 2689 SPDK_ERRLOG("Get log page: offset (%" PRIu64 ") > len (%" PRIu64 ")\n", 2690 offset, len); 2691 response->status.sct = SPDK_NVME_SCT_GENERIC; 2692 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2693 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2694 } 2695 2696 switch (lid) { 2697 case SPDK_NVME_LOG_ERROR: 2698 nvmf_get_error_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae); 2699 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2700 case SPDK_NVME_LOG_HEALTH_INFORMATION: 2701 /* TODO: actually fill out log page data */ 2702 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2703 case SPDK_NVME_LOG_FIRMWARE_SLOT: 2704 nvmf_get_firmware_slot_log_page(req->iov, req->iovcnt, offset, len); 2705 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2706 case SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS: 2707 if (subsystem->flags.ana_reporting) { 2708 nvmf_get_ana_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae); 2709 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2710 } else { 2711 goto invalid_log_page; 2712 } 2713 case SPDK_NVME_LOG_COMMAND_EFFECTS_LOG: 2714 nvmf_get_cmds_and_effects_log_page(ctrlr, req->iov, req->iovcnt, offset, len); 2715 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2716 case SPDK_NVME_LOG_CHANGED_NS_LIST: 2717 nvmf_get_changed_ns_list_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae); 2718 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2719 case SPDK_NVME_LOG_RESERVATION_NOTIFICATION: 2720 nvmf_get_reservation_notification_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae); 2721 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2722 default: 2723 goto invalid_log_page; 2724 } 2725 } 2726 2727 invalid_log_page: 2728 SPDK_INFOLOG(nvmf, "Unsupported Get Log Page 0x%02X\n", lid); 2729 response->status.sct = SPDK_NVME_SCT_GENERIC; 2730 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 2731 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2732 } 2733 2734 static struct spdk_nvmf_ns * 2735 _nvmf_ctrlr_get_ns_safe(struct spdk_nvmf_ctrlr *ctrlr, 2736 uint32_t nsid, 2737 struct spdk_nvme_cpl *rsp) 2738 { 2739 struct spdk_nvmf_ns *ns; 2740 if (nsid == 0 || nsid > ctrlr->subsys->max_nsid) { 2741 SPDK_ERRLOG("Identify Namespace for invalid NSID %u\n", nsid); 2742 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2743 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 2744 return NULL; 2745 } 2746 2747 ns = nvmf_ctrlr_get_ns(ctrlr, nsid); 2748 if (ns == NULL || ns->bdev == NULL) { 2749 /* 2750 * Inactive namespaces should return a zero filled data structure. 2751 * The data buffer is already zeroed by nvmf_ctrlr_process_admin_cmd(), 2752 * so we can just return early here. 2753 */ 2754 SPDK_DEBUGLOG(nvmf, "Identify Namespace for inactive NSID %u\n", nsid); 2755 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2756 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 2757 return NULL; 2758 } 2759 return ns; 2760 } 2761 2762 int 2763 spdk_nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr *ctrlr, 2764 struct spdk_nvme_cmd *cmd, 2765 struct spdk_nvme_cpl *rsp, 2766 struct spdk_nvme_ns_data *nsdata) 2767 { 2768 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2769 struct spdk_nvmf_ns *ns; 2770 uint32_t max_num_blocks, format_index; 2771 enum spdk_nvme_ana_state ana_state; 2772 2773 ns = _nvmf_ctrlr_get_ns_safe(ctrlr, cmd->nsid, rsp); 2774 if (ns == NULL) { 2775 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2776 } 2777 2778 nvmf_bdev_ctrlr_identify_ns(ns, nsdata, ctrlr->dif_insert_or_strip); 2779 2780 assert(ctrlr->admin_qpair); 2781 2782 format_index = spdk_nvme_ns_get_format_index(nsdata); 2783 2784 /* Due to bug in the Linux kernel NVMe driver we have to set noiob no larger than mdts */ 2785 max_num_blocks = ctrlr->admin_qpair->transport->opts.max_io_size / 2786 (1U << nsdata->lbaf[format_index].lbads); 2787 if (nsdata->noiob > max_num_blocks) { 2788 nsdata->noiob = max_num_blocks; 2789 } 2790 2791 /* Set NOWS equal to Controller MDTS */ 2792 if (nsdata->nsfeat.optperf) { 2793 nsdata->nows = max_num_blocks - 1; 2794 } 2795 2796 if (subsystem->flags.ana_reporting) { 2797 assert(ns->anagrpid - 1 < subsystem->max_nsid); 2798 nsdata->anagrpid = ns->anagrpid; 2799 2800 ana_state = nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid); 2801 if (ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE || 2802 ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE) { 2803 nsdata->nuse = 0; 2804 } 2805 } 2806 2807 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2808 } 2809 2810 static void 2811 nvmf_ctrlr_populate_oacs(struct spdk_nvmf_ctrlr *ctrlr, 2812 struct spdk_nvme_ctrlr_data *cdata) 2813 { 2814 cdata->oacs = ctrlr->cdata.oacs; 2815 2816 cdata->oacs.virtualization_management = 2817 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT].hdlr != NULL; 2818 cdata->oacs.nvme_mi = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NVME_MI_SEND].hdlr != NULL 2819 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NVME_MI_RECEIVE].hdlr != NULL; 2820 cdata->oacs.directives = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DIRECTIVE_SEND].hdlr != NULL 2821 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DIRECTIVE_RECEIVE].hdlr != NULL; 2822 cdata->oacs.device_self_test = 2823 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_DEVICE_SELF_TEST].hdlr != NULL; 2824 cdata->oacs.ns_manage = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NS_MANAGEMENT].hdlr != NULL 2825 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_NS_ATTACHMENT].hdlr != NULL; 2826 cdata->oacs.firmware = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD].hdlr != 2827 NULL 2828 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FIRMWARE_COMMIT].hdlr != NULL; 2829 cdata->oacs.format = 2830 g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_FORMAT_NVM].hdlr != NULL; 2831 cdata->oacs.security = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_SECURITY_SEND].hdlr != NULL 2832 && g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_SECURITY_RECEIVE].hdlr != NULL; 2833 cdata->oacs.get_lba_status = g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_GET_LBA_STATUS].hdlr != 2834 NULL; 2835 } 2836 2837 int 2838 spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata) 2839 { 2840 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2841 struct spdk_nvmf_transport *transport; 2842 2843 /* 2844 * Common fields for discovery and NVM subsystems 2845 */ 2846 assert(ctrlr->admin_qpair); 2847 transport = ctrlr->admin_qpair->transport; 2848 spdk_strcpy_pad(cdata->fr, FW_VERSION, sizeof(cdata->fr), ' '); 2849 assert((transport->opts.max_io_size % 4096) == 0); 2850 cdata->mdts = spdk_u32log2(transport->opts.max_io_size / 4096); 2851 cdata->cntlid = ctrlr->cntlid; 2852 cdata->ver = ctrlr->vcprop.vs; 2853 cdata->aerl = ctrlr->cdata.aerl; 2854 cdata->lpa.edlp = 1; 2855 cdata->elpe = 127; 2856 cdata->maxcmd = transport->opts.max_queue_depth; 2857 cdata->sgls = ctrlr->cdata.sgls; 2858 cdata->fuses = ctrlr->cdata.fuses; 2859 cdata->acwu = 0; /* ACWU is 0-based. */ 2860 if (subsystem->flags.ana_reporting) { 2861 cdata->mnan = subsystem->max_nsid; 2862 } 2863 spdk_strcpy_pad(cdata->subnqn, subsystem->subnqn, sizeof(cdata->subnqn), '\0'); 2864 2865 SPDK_DEBUGLOG(nvmf, "ctrlr data: maxcmd 0x%x\n", cdata->maxcmd); 2866 SPDK_DEBUGLOG(nvmf, "sgls data: 0x%x\n", from_le32(&cdata->sgls)); 2867 2868 2869 if (spdk_nvmf_subsystem_is_discovery(subsystem)) { 2870 /* 2871 * NVM Discovery subsystem fields 2872 */ 2873 cdata->oaes.discovery_log_change_notices = 1; 2874 cdata->cntrltype = SPDK_NVME_CTRLR_DISCOVERY; 2875 } else { 2876 cdata->vid = ctrlr->cdata.vid; 2877 cdata->ssvid = ctrlr->cdata.ssvid; 2878 cdata->ieee[0] = ctrlr->cdata.ieee[0]; 2879 cdata->ieee[1] = ctrlr->cdata.ieee[1]; 2880 cdata->ieee[2] = ctrlr->cdata.ieee[2]; 2881 2882 /* 2883 * NVM subsystem fields (reserved for discovery subsystems) 2884 */ 2885 spdk_strcpy_pad(cdata->mn, spdk_nvmf_subsystem_get_mn(subsystem), sizeof(cdata->mn), ' '); 2886 spdk_strcpy_pad(cdata->sn, spdk_nvmf_subsystem_get_sn(subsystem), sizeof(cdata->sn), ' '); 2887 cdata->kas = ctrlr->cdata.kas; 2888 2889 cdata->rab = 6; 2890 cdata->cmic.multi_port = 1; 2891 cdata->cmic.multi_ctrlr = 1; 2892 cdata->oaes.ns_attribute_notices = 1; 2893 cdata->ctratt.bits.host_id_exhid_supported = 1; 2894 cdata->ctratt.bits.fdps = ctrlr->subsys->fdp_supported; 2895 cdata->cntrltype = SPDK_NVME_CTRLR_IO; 2896 /* We do not have any actual limitation to the number of abort commands. 2897 * We follow the recommendation by the NVMe specification. 2898 */ 2899 cdata->acl = NVMF_ABORT_COMMAND_LIMIT; 2900 cdata->frmw.slot1_ro = 1; 2901 cdata->frmw.num_slots = 1; 2902 2903 cdata->lpa.celp = 1; /* Command Effects log page supported */ 2904 2905 cdata->sqes.min = 6; 2906 cdata->sqes.max = 6; 2907 cdata->cqes.min = 4; 2908 cdata->cqes.max = 4; 2909 cdata->nn = subsystem->max_nsid; 2910 cdata->vwc.present = 1; 2911 cdata->vwc.flush_broadcast = SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED; 2912 2913 cdata->nvmf_specific = ctrlr->cdata.nvmf_specific; 2914 2915 cdata->oncs.compare = ctrlr->cdata.oncs.compare; 2916 cdata->oncs.dsm = ctrlr->cdata.oncs.dsm && nvmf_ctrlr_dsm_supported(ctrlr); 2917 cdata->oncs.write_zeroes = ctrlr->cdata.oncs.write_zeroes && 2918 nvmf_ctrlr_write_zeroes_supported(ctrlr); 2919 cdata->oncs.reservations = ctrlr->cdata.oncs.reservations; 2920 cdata->oncs.copy = ctrlr->cdata.oncs.copy; 2921 cdata->ocfs.copy_format0 = cdata->oncs.copy; 2922 if (subsystem->flags.ana_reporting) { 2923 /* Asymmetric Namespace Access Reporting is supported. */ 2924 cdata->cmic.ana_reporting = 1; 2925 cdata->oaes.ana_change_notices = 1; 2926 2927 cdata->anatt = ANA_TRANSITION_TIME_IN_SEC; 2928 /* ANA Change state is not used, and ANA Persistent Loss state 2929 * is not supported for now. 2930 */ 2931 cdata->anacap.ana_optimized_state = 1; 2932 cdata->anacap.ana_non_optimized_state = 1; 2933 cdata->anacap.ana_inaccessible_state = 1; 2934 /* ANAGRPID does not change while namespace is attached to controller */ 2935 cdata->anacap.no_change_anagrpid = 1; 2936 cdata->anagrpmax = subsystem->max_nsid; 2937 cdata->nanagrpid = subsystem->max_nsid; 2938 } 2939 2940 nvmf_ctrlr_populate_oacs(ctrlr, cdata); 2941 2942 assert(subsystem->tgt != NULL); 2943 cdata->crdt[0] = subsystem->tgt->crdt[0]; 2944 cdata->crdt[1] = subsystem->tgt->crdt[1]; 2945 cdata->crdt[2] = subsystem->tgt->crdt[2]; 2946 2947 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ioccsz 0x%x\n", 2948 cdata->nvmf_specific.ioccsz); 2949 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: iorcsz 0x%x\n", 2950 cdata->nvmf_specific.iorcsz); 2951 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: icdoff 0x%x\n", 2952 cdata->nvmf_specific.icdoff); 2953 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ctrattr 0x%x\n", 2954 *(uint8_t *)&cdata->nvmf_specific.ctrattr); 2955 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: msdbd 0x%x\n", 2956 cdata->nvmf_specific.msdbd); 2957 } 2958 2959 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2960 } 2961 2962 static int 2963 nvmf_ns_identify_iocs_zns(struct spdk_nvmf_ns *ns, 2964 struct spdk_nvme_cmd *cmd, 2965 struct spdk_nvme_cpl *rsp, 2966 struct spdk_nvme_zns_ns_data *nsdata_zns) 2967 { 2968 nsdata_zns->zoc.variable_zone_capacity = 0; 2969 nsdata_zns->zoc.zone_active_excursions = 0; 2970 nsdata_zns->ozcs.read_across_zone_boundaries = 1; 2971 /* Underflowing the zero based mar and mor bdev helper results in the correct 2972 value of FFFFFFFFh. */ 2973 nsdata_zns->mar = spdk_bdev_get_max_active_zones(ns->bdev) - 1; 2974 nsdata_zns->mor = spdk_bdev_get_max_open_zones(ns->bdev) - 1; 2975 nsdata_zns->rrl = 0; 2976 nsdata_zns->frl = 0; 2977 nsdata_zns->lbafe[0].zsze = spdk_bdev_get_zone_size(ns->bdev); 2978 2979 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2980 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 2981 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 2982 } 2983 2984 int 2985 spdk_nvmf_ns_identify_iocs_specific(struct spdk_nvmf_ctrlr *ctrlr, 2986 struct spdk_nvme_cmd *cmd, 2987 struct spdk_nvme_cpl *rsp, 2988 void *nsdata, 2989 size_t nsdata_size) 2990 { 2991 uint8_t csi = cmd->cdw11_bits.identify.csi; 2992 struct spdk_nvmf_ns *ns = _nvmf_ctrlr_get_ns_safe(ctrlr, cmd->nsid, rsp); 2993 2994 memset(nsdata, 0, nsdata_size); 2995 2996 if (ns == NULL) { 2997 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 2998 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 2999 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3000 } 3001 3002 switch (csi) { 3003 case SPDK_NVME_CSI_ZNS: 3004 return nvmf_ns_identify_iocs_zns(ns, cmd, rsp, nsdata); 3005 default: 3006 break; 3007 } 3008 3009 SPDK_DEBUGLOG(nvmf, 3010 "Returning zero filled struct for the iocs specific ns " 3011 "identify command and CSI 0x%02x\n", 3012 csi); 3013 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3014 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 3015 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3016 } 3017 3018 static int 3019 nvmf_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ctrlr *ctrlr, 3020 struct spdk_nvme_cmd *cmd, 3021 struct spdk_nvme_cpl *rsp, 3022 struct spdk_nvme_nvm_ctrlr_data *cdata_nvm) 3023 { 3024 /* The unit of max_write_zeroes_size_kib is KiB. 3025 * The unit of wzsl is the minimum memory page size(2 ^ (12 + CAP.MPSMIN) bytes) 3026 * and is reported as a power of two (2^n). 3027 */ 3028 cdata_nvm->wzsl = spdk_u64log2(ctrlr->subsys->max_write_zeroes_size_kib >> 3029 (2 + ctrlr->vcprop.cap.bits.mpsmin)); 3030 3031 /* The unit of max_discard_size_kib is KiB. 3032 * The dmrsl indicates the maximum number of logical blocks for 3033 * dataset management command. 3034 */ 3035 cdata_nvm->dmrsl = ctrlr->subsys->max_discard_size_kib << 1; 3036 cdata_nvm->dmrl = 1; 3037 3038 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3039 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 3040 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3041 } 3042 3043 static int 3044 nvmf_ctrlr_identify_iocs_zns(struct spdk_nvmf_ctrlr *ctrlr, 3045 struct spdk_nvme_cmd *cmd, 3046 struct spdk_nvme_cpl *rsp, 3047 struct spdk_nvme_zns_ctrlr_data *cdata_zns) 3048 { 3049 /* The unit of max_zone_append_size_kib is KiB. 3050 The unit of zasl is the minimum memory page size 3051 (2 ^ (12 + CAP.MPSMIN) KiB) 3052 and is reported as a power of two (2^n). */ 3053 cdata_zns->zasl = spdk_u64log2(ctrlr->subsys->max_zone_append_size_kib >> 3054 (12 + ctrlr->vcprop.cap.bits.mpsmin)); 3055 3056 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3057 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 3058 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3059 } 3060 3061 int 3062 spdk_nvmf_ctrlr_identify_iocs_specific(struct spdk_nvmf_ctrlr *ctrlr, 3063 struct spdk_nvme_cmd *cmd, 3064 struct spdk_nvme_cpl *rsp, 3065 void *cdata, 3066 size_t cdata_size) 3067 { 3068 uint8_t csi = cmd->cdw11_bits.identify.csi; 3069 3070 memset(cdata, 0, cdata_size); 3071 3072 switch (csi) { 3073 case SPDK_NVME_CSI_NVM: 3074 return nvmf_ctrlr_identify_iocs_nvm(ctrlr, cmd, rsp, cdata); 3075 case SPDK_NVME_CSI_ZNS: 3076 return nvmf_ctrlr_identify_iocs_zns(ctrlr, cmd, rsp, cdata); 3077 default: 3078 break; 3079 } 3080 3081 SPDK_DEBUGLOG(nvmf, 3082 "Returning zero filled struct for the iocs specific ctrlr " 3083 "identify command and CSI 0x%02x\n", 3084 csi); 3085 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3086 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 3087 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3088 } 3089 3090 static int 3091 nvmf_ctrlr_identify_active_ns_list(struct spdk_nvmf_ctrlr *ctrlr, 3092 struct spdk_nvme_cmd *cmd, 3093 struct spdk_nvme_cpl *rsp, 3094 struct spdk_nvme_ns_list *ns_list) 3095 { 3096 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 3097 struct spdk_nvmf_ns *ns; 3098 uint32_t count = 0; 3099 3100 if (cmd->nsid >= 0xfffffffeUL) { 3101 SPDK_ERRLOG("Identify Active Namespace List with invalid NSID %u\n", cmd->nsid); 3102 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 3103 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3104 } 3105 3106 memset(ns_list, 0, sizeof(*ns_list)); 3107 3108 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 3109 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 3110 if (ns->opts.nsid <= cmd->nsid || !nvmf_ctrlr_ns_is_visible(ctrlr, ns->opts.nsid)) { 3111 continue; 3112 } 3113 3114 ns_list->ns_list[count++] = ns->opts.nsid; 3115 if (count == SPDK_COUNTOF(ns_list->ns_list)) { 3116 break; 3117 } 3118 } 3119 3120 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3121 } 3122 3123 static void 3124 _add_ns_id_desc(void **buf_ptr, size_t *buf_remain, 3125 enum spdk_nvme_nidt type, 3126 const void *data, size_t data_size) 3127 { 3128 struct spdk_nvme_ns_id_desc *desc; 3129 size_t desc_size = sizeof(*desc) + data_size; 3130 3131 /* 3132 * These should never fail in practice, since all valid NS ID descriptors 3133 * should be defined so that they fit in the available 4096-byte buffer. 3134 */ 3135 assert(data_size > 0); 3136 assert(data_size <= UINT8_MAX); 3137 assert(desc_size < *buf_remain); 3138 if (data_size == 0 || data_size > UINT8_MAX || desc_size > *buf_remain) { 3139 return; 3140 } 3141 3142 desc = *buf_ptr; 3143 desc->nidt = type; 3144 desc->nidl = data_size; 3145 memcpy(desc->nid, data, data_size); 3146 3147 *buf_ptr += desc_size; 3148 *buf_remain -= desc_size; 3149 } 3150 3151 static int 3152 nvmf_ctrlr_identify_ns_id_descriptor_list( 3153 struct spdk_nvmf_ctrlr *ctrlr, 3154 struct spdk_nvme_cmd *cmd, 3155 struct spdk_nvme_cpl *rsp, 3156 void *id_desc_list, size_t id_desc_list_size) 3157 { 3158 struct spdk_nvmf_ns *ns; 3159 size_t buf_remain = id_desc_list_size; 3160 void *buf_ptr = id_desc_list; 3161 3162 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid); 3163 if (ns == NULL || ns->bdev == NULL) { 3164 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3165 rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 3166 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3167 } 3168 3169 #define ADD_ID_DESC(type, data, size) \ 3170 do { \ 3171 if (!spdk_mem_all_zero(data, size)) { \ 3172 _add_ns_id_desc(&buf_ptr, &buf_remain, type, data, size); \ 3173 } \ 3174 } while (0) 3175 3176 ADD_ID_DESC(SPDK_NVME_NIDT_EUI64, ns->opts.eui64, sizeof(ns->opts.eui64)); 3177 ADD_ID_DESC(SPDK_NVME_NIDT_NGUID, ns->opts.nguid, sizeof(ns->opts.nguid)); 3178 ADD_ID_DESC(SPDK_NVME_NIDT_UUID, &ns->opts.uuid, sizeof(ns->opts.uuid)); 3179 ADD_ID_DESC(SPDK_NVME_NIDT_CSI, &ns->csi, sizeof(uint8_t)); 3180 3181 /* 3182 * The list is automatically 0-terminated, both in the temporary buffer 3183 * used by nvmf_ctrlr_identify(), and the eventual iov destination - 3184 * controller to host buffers in admin commands always get zeroed in 3185 * nvmf_ctrlr_process_admin_cmd(). 3186 */ 3187 3188 #undef ADD_ID_DESC 3189 3190 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3191 } 3192 3193 static int 3194 nvmf_ctrlr_identify_iocs(struct spdk_nvmf_ctrlr *ctrlr, 3195 struct spdk_nvme_cmd *cmd, 3196 struct spdk_nvme_cpl *rsp, 3197 void *cdata, size_t cdata_size) 3198 { 3199 struct spdk_nvme_iocs_vector *vector; 3200 struct spdk_nvmf_ns *ns; 3201 3202 if (cdata_size < sizeof(struct spdk_nvme_iocs_vector)) { 3203 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3204 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3205 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3206 } 3207 3208 /* For now we only support this command sent to the current 3209 * controller. 3210 */ 3211 if (cmd->cdw10_bits.identify.cntid != 0xFFFF && 3212 cmd->cdw10_bits.identify.cntid != ctrlr->cntlid) { 3213 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3214 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3215 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3216 } 3217 memset(cdata, 0, cdata_size); 3218 3219 vector = cdata; 3220 vector->nvm = 1; 3221 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL; 3222 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) { 3223 if (ns->bdev == NULL) { 3224 continue; 3225 } 3226 if (spdk_bdev_is_zoned(ns->bdev)) { 3227 vector->zns = 1; 3228 } 3229 } 3230 3231 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3232 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 3233 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3234 } 3235 3236 static int 3237 nvmf_ctrlr_identify(struct spdk_nvmf_request *req) 3238 { 3239 uint8_t cns; 3240 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3241 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3242 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 3243 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 3244 int ret = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3245 char tmpbuf[SPDK_NVME_IDENTIFY_BUFLEN] = ""; 3246 struct spdk_iov_xfer ix; 3247 3248 if (req->iovcnt < 1 || req->length < SPDK_NVME_IDENTIFY_BUFLEN) { 3249 SPDK_DEBUGLOG(nvmf, "identify command with invalid buffer\n"); 3250 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3251 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3252 return ret; 3253 } 3254 3255 cns = cmd->cdw10_bits.identify.cns; 3256 3257 if (spdk_nvmf_subsystem_is_discovery(subsystem) && 3258 cns != SPDK_NVME_IDENTIFY_CTRLR) { 3259 /* Discovery controllers only support Identify Controller */ 3260 goto invalid_cns; 3261 } 3262 3263 /* 3264 * We must use a temporary buffer: it's entirely possible the out buffer 3265 * is split across more than one IOV. 3266 */ 3267 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3268 3269 SPDK_DEBUGLOG(nvmf, "Received identify command with CNS 0x%02x\n", cns); 3270 3271 switch (cns) { 3272 case SPDK_NVME_IDENTIFY_NS: 3273 ret = spdk_nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, (void *)&tmpbuf); 3274 break; 3275 case SPDK_NVME_IDENTIFY_CTRLR: 3276 ret = spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, (void *)&tmpbuf); 3277 break; 3278 case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST: 3279 ret = nvmf_ctrlr_identify_active_ns_list(ctrlr, cmd, rsp, (void *)&tmpbuf); 3280 break; 3281 case SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST: 3282 ret = nvmf_ctrlr_identify_ns_id_descriptor_list(ctrlr, cmd, rsp, 3283 tmpbuf, req->length); 3284 break; 3285 case SPDK_NVME_IDENTIFY_NS_IOCS: 3286 ret = spdk_nvmf_ns_identify_iocs_specific(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length); 3287 break; 3288 case SPDK_NVME_IDENTIFY_CTRLR_IOCS: 3289 ret = spdk_nvmf_ctrlr_identify_iocs_specific(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length); 3290 break; 3291 case SPDK_NVME_IDENTIFY_IOCS: 3292 ret = nvmf_ctrlr_identify_iocs(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length); 3293 break; 3294 default: 3295 goto invalid_cns; 3296 } 3297 3298 if (ret == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 3299 spdk_iov_xfer_from_buf(&ix, tmpbuf, sizeof(tmpbuf)); 3300 } 3301 3302 return ret; 3303 3304 invalid_cns: 3305 SPDK_DEBUGLOG(nvmf, "Identify command with unsupported CNS 0x%02x\n", cns); 3306 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3307 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3308 return ret; 3309 } 3310 3311 static bool 3312 nvmf_qpair_abort_aer(struct spdk_nvmf_qpair *qpair, uint16_t cid) 3313 { 3314 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 3315 struct spdk_nvmf_request *req; 3316 int i; 3317 3318 if (!nvmf_qpair_is_admin_queue(qpair)) { 3319 return false; 3320 } 3321 3322 assert(spdk_get_thread() == ctrlr->thread); 3323 3324 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 3325 if (ctrlr->aer_req[i]->cmd->nvme_cmd.cid == cid) { 3326 SPDK_DEBUGLOG(nvmf, "Aborting AER request\n"); 3327 req = ctrlr->aer_req[i]; 3328 ctrlr->aer_req[i] = NULL; 3329 ctrlr->nr_aer_reqs--; 3330 3331 /* Move the last req to the aborting position for making aer_reqs 3332 * in continuous 3333 */ 3334 if (i < ctrlr->nr_aer_reqs) { 3335 ctrlr->aer_req[i] = ctrlr->aer_req[ctrlr->nr_aer_reqs]; 3336 ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL; 3337 } 3338 3339 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3340 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 3341 _nvmf_request_complete(req); 3342 return true; 3343 } 3344 } 3345 3346 return false; 3347 } 3348 3349 void 3350 nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair) 3351 { 3352 struct spdk_nvmf_request *req, *tmp; 3353 3354 TAILQ_FOREACH_SAFE(req, &qpair->outstanding, link, tmp) { 3355 if (req->zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE) { 3356 /* Zero-copy requests are kept on the outstanding queue from the moment 3357 * zcopy_start is sent until a zcopy_end callback is received. Therefore, 3358 * we can't remove them from the outstanding queue here, but need to rely on 3359 * the transport to do a zcopy_end to release their buffers and, in turn, 3360 * remove them from the queue. 3361 */ 3362 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3363 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 3364 nvmf_transport_req_free(req); 3365 } 3366 } 3367 } 3368 3369 static void 3370 nvmf_qpair_abort_request(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req) 3371 { 3372 uint16_t cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid; 3373 3374 if (nvmf_qpair_abort_aer(qpair, cid)) { 3375 SPDK_DEBUGLOG(nvmf, "abort ctrlr=%p sqid=%u cid=%u successful\n", 3376 qpair->ctrlr, qpair->qid, cid); 3377 req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command successfully aborted */ 3378 3379 spdk_nvmf_request_complete(req); 3380 return; 3381 } 3382 3383 nvmf_transport_qpair_abort_request(qpair, req); 3384 } 3385 3386 static void 3387 nvmf_ctrlr_abort_done(struct spdk_io_channel_iter *i, int status) 3388 { 3389 struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i); 3390 3391 if (status == 0) { 3392 /* There was no qpair whose ID matches SQID of the abort command. 3393 * Hence call _nvmf_request_complete() here. 3394 */ 3395 _nvmf_request_complete(req); 3396 } 3397 } 3398 3399 static void 3400 nvmf_ctrlr_abort_on_pg(struct spdk_io_channel_iter *i) 3401 { 3402 struct spdk_nvmf_request *req = spdk_io_channel_iter_get_ctx(i); 3403 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 3404 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 3405 uint16_t sqid = req->cmd->nvme_cmd.cdw10_bits.abort.sqid; 3406 struct spdk_nvmf_qpair *qpair; 3407 3408 TAILQ_FOREACH(qpair, &group->qpairs, link) { 3409 if (qpair->ctrlr == req->qpair->ctrlr && qpair->qid == sqid) { 3410 /* Found the qpair */ 3411 3412 nvmf_qpair_abort_request(qpair, req); 3413 3414 /* Return -1 for the status so the iteration across threads stops. */ 3415 spdk_for_each_channel_continue(i, -1); 3416 return; 3417 } 3418 } 3419 3420 spdk_for_each_channel_continue(i, 0); 3421 } 3422 3423 static int 3424 nvmf_ctrlr_abort(struct spdk_nvmf_request *req) 3425 { 3426 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 3427 3428 rsp->cdw0 = 1U; /* Command not aborted */ 3429 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 3430 rsp->status.sc = SPDK_NVME_SC_SUCCESS; 3431 3432 /* Send a message to each poll group, searching for this ctrlr, sqid, and command. */ 3433 spdk_for_each_channel(req->qpair->ctrlr->subsys->tgt, 3434 nvmf_ctrlr_abort_on_pg, 3435 req, 3436 nvmf_ctrlr_abort_done 3437 ); 3438 3439 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3440 } 3441 3442 int 3443 nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req) 3444 { 3445 struct spdk_nvmf_request *req_to_abort = req->req_to_abort; 3446 struct spdk_bdev *bdev; 3447 struct spdk_bdev_desc *desc; 3448 struct spdk_io_channel *ch; 3449 int rc; 3450 3451 assert(req_to_abort != NULL); 3452 3453 if (g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_ABORT].hdlr && 3454 nvmf_qpair_is_admin_queue(req_to_abort->qpair)) { 3455 return g_nvmf_custom_admin_cmd_hdlrs[SPDK_NVME_OPC_ABORT].hdlr(req); 3456 } 3457 3458 rc = spdk_nvmf_request_get_bdev(req_to_abort->cmd->nvme_cmd.nsid, req_to_abort, 3459 &bdev, &desc, &ch); 3460 if (rc != 0) { 3461 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3462 } 3463 3464 return spdk_nvmf_bdev_ctrlr_abort_cmd(bdev, desc, ch, req, req_to_abort); 3465 } 3466 3467 static int 3468 get_features_generic(struct spdk_nvmf_request *req, uint32_t cdw0) 3469 { 3470 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 3471 3472 rsp->cdw0 = cdw0; 3473 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3474 } 3475 3476 /* we have to use the typedef in the function declaration to appease astyle. */ 3477 typedef enum spdk_nvme_path_status_code spdk_nvme_path_status_code_t; 3478 3479 static spdk_nvme_path_status_code_t 3480 _nvme_ana_state_to_path_status(enum spdk_nvme_ana_state ana_state) 3481 { 3482 switch (ana_state) { 3483 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 3484 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 3485 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 3486 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS; 3487 case SPDK_NVME_ANA_CHANGE_STATE: 3488 return SPDK_NVME_SC_ASYMMETRIC_ACCESS_TRANSITION; 3489 default: 3490 return SPDK_NVME_SC_INTERNAL_PATH_ERROR; 3491 } 3492 } 3493 3494 static int 3495 nvmf_ctrlr_get_features(struct spdk_nvmf_request *req) 3496 { 3497 uint8_t feature; 3498 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3499 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3500 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 3501 enum spdk_nvme_ana_state ana_state; 3502 3503 feature = cmd->cdw10_bits.get_features.fid; 3504 3505 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) { 3506 /* 3507 * Features supported by Discovery controller 3508 */ 3509 switch (feature) { 3510 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 3511 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw); 3512 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 3513 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw); 3514 default: 3515 SPDK_INFOLOG(nvmf, "Get Features command with unsupported feature ID 0x%02x\n", feature); 3516 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3517 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3518 } 3519 } 3520 /* 3521 * Process Get Features command for non-discovery controller 3522 */ 3523 ana_state = nvmf_ctrlr_get_ana_state_from_nsid(ctrlr, cmd->nsid); 3524 switch (ana_state) { 3525 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 3526 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 3527 case SPDK_NVME_ANA_CHANGE_STATE: 3528 switch (feature) { 3529 case SPDK_NVME_FEAT_ERROR_RECOVERY: 3530 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 3531 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 3532 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 3533 response->status.sct = SPDK_NVME_SCT_PATH; 3534 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 3535 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3536 default: 3537 break; 3538 } 3539 break; 3540 default: 3541 break; 3542 } 3543 3544 switch (feature) { 3545 case SPDK_NVME_FEAT_ARBITRATION: 3546 return get_features_generic(req, ctrlr->feat.arbitration.raw); 3547 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 3548 return get_features_generic(req, ctrlr->feat.power_management.raw); 3549 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 3550 return nvmf_ctrlr_get_features_temperature_threshold(req); 3551 case SPDK_NVME_FEAT_ERROR_RECOVERY: 3552 return get_features_generic(req, ctrlr->feat.error_recovery.raw); 3553 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 3554 return get_features_generic(req, ctrlr->feat.volatile_write_cache.raw); 3555 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 3556 return get_features_generic(req, ctrlr->feat.number_of_queues.raw); 3557 case SPDK_NVME_FEAT_INTERRUPT_COALESCING: 3558 return get_features_generic(req, ctrlr->feat.interrupt_coalescing.raw); 3559 case SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION: 3560 return nvmf_ctrlr_get_features_interrupt_vector_configuration(req); 3561 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 3562 return get_features_generic(req, ctrlr->feat.write_atomicity.raw); 3563 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 3564 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw); 3565 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 3566 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw); 3567 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 3568 return nvmf_ctrlr_get_features_host_identifier(req); 3569 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 3570 return nvmf_ctrlr_get_features_reservation_notification_mask(req); 3571 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 3572 return nvmf_ctrlr_get_features_reservation_persistence(req); 3573 case SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT: 3574 return nvmf_ctrlr_get_features_host_behavior_support(req); 3575 default: 3576 SPDK_INFOLOG(nvmf, "Get Features command with unsupported feature ID 0x%02x\n", feature); 3577 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3578 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3579 } 3580 } 3581 3582 static int 3583 nvmf_ctrlr_set_features(struct spdk_nvmf_request *req) 3584 { 3585 uint8_t feature, save; 3586 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3587 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3588 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 3589 enum spdk_nvme_ana_state ana_state; 3590 /* 3591 * Features are not saveable by the controller as indicated by 3592 * ONCS field of the Identify Controller data. 3593 * */ 3594 save = cmd->cdw10_bits.set_features.sv; 3595 if (save) { 3596 response->status.sc = SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE; 3597 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 3598 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3599 } 3600 3601 feature = cmd->cdw10_bits.set_features.fid; 3602 3603 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) { 3604 /* 3605 * Features supported by Discovery controller 3606 */ 3607 switch (feature) { 3608 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 3609 return nvmf_ctrlr_set_features_keep_alive_timer(req); 3610 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 3611 return nvmf_ctrlr_set_features_async_event_configuration(req); 3612 default: 3613 SPDK_INFOLOG(nvmf, "Set Features command with unsupported feature ID 0x%02x\n", feature); 3614 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3615 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3616 } 3617 } 3618 /* 3619 * Process Set Features command for non-discovery controller 3620 */ 3621 ana_state = nvmf_ctrlr_get_ana_state_from_nsid(ctrlr, cmd->nsid); 3622 switch (ana_state) { 3623 case SPDK_NVME_ANA_INACCESSIBLE_STATE: 3624 case SPDK_NVME_ANA_CHANGE_STATE: 3625 if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) { 3626 response->status.sct = SPDK_NVME_SCT_PATH; 3627 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 3628 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3629 } else { 3630 switch (feature) { 3631 case SPDK_NVME_FEAT_ERROR_RECOVERY: 3632 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 3633 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 3634 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 3635 response->status.sct = SPDK_NVME_SCT_PATH; 3636 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 3637 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3638 default: 3639 break; 3640 } 3641 } 3642 break; 3643 case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE: 3644 response->status.sct = SPDK_NVME_SCT_PATH; 3645 response->status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS; 3646 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3647 default: 3648 break; 3649 } 3650 3651 switch (feature) { 3652 case SPDK_NVME_FEAT_ARBITRATION: 3653 return nvmf_ctrlr_set_features_arbitration(req); 3654 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 3655 return nvmf_ctrlr_set_features_power_management(req); 3656 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 3657 return nvmf_ctrlr_set_features_temperature_threshold(req); 3658 case SPDK_NVME_FEAT_ERROR_RECOVERY: 3659 return nvmf_ctrlr_set_features_error_recovery(req); 3660 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 3661 return nvmf_ctrlr_set_features_volatile_write_cache(req); 3662 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 3663 return nvmf_ctrlr_set_features_number_of_queues(req); 3664 case SPDK_NVME_FEAT_INTERRUPT_COALESCING: 3665 response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 3666 response->status.sc = SPDK_NVME_SC_FEATURE_NOT_CHANGEABLE; 3667 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3668 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 3669 return nvmf_ctrlr_set_features_write_atomicity(req); 3670 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 3671 return nvmf_ctrlr_set_features_async_event_configuration(req); 3672 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 3673 return nvmf_ctrlr_set_features_keep_alive_timer(req); 3674 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 3675 return nvmf_ctrlr_set_features_host_identifier(req); 3676 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 3677 return nvmf_ctrlr_set_features_reservation_notification_mask(req); 3678 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 3679 return nvmf_ctrlr_set_features_reservation_persistence(req); 3680 case SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT: 3681 return nvmf_ctrlr_set_features_host_behavior_support(req); 3682 default: 3683 SPDK_INFOLOG(nvmf, "Set Features command with unsupported feature ID 0x%02x\n", feature); 3684 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3685 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3686 } 3687 } 3688 3689 static int 3690 nvmf_ctrlr_keep_alive(struct spdk_nvmf_request *req) 3691 { 3692 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3693 3694 SPDK_DEBUGLOG(nvmf, "Keep Alive\n"); 3695 /* 3696 * To handle keep alive just clear or reset the 3697 * ctrlr based keep alive duration counter. 3698 * When added, a separate timer based process 3699 * will monitor if the time since last recorded 3700 * keep alive has exceeded the max duration and 3701 * take appropriate action. 3702 */ 3703 ctrlr->last_keep_alive_tick = spdk_get_ticks(); 3704 3705 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3706 } 3707 3708 int 3709 nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req) 3710 { 3711 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3712 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3713 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 3714 struct spdk_nvmf_subsystem_poll_group *sgroup; 3715 int rc; 3716 3717 assert(ctrlr != NULL); 3718 if (cmd->opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) { 3719 /* We do not want to treat AERs as outstanding commands, 3720 * so decrement mgmt_io_outstanding here to offset 3721 * the increment that happened prior to this call. 3722 */ 3723 sgroup = &req->qpair->group->sgroups[ctrlr->subsys->id]; 3724 assert(sgroup != NULL); 3725 sgroup->mgmt_io_outstanding--; 3726 } 3727 3728 assert(spdk_get_thread() == ctrlr->thread); 3729 3730 if (cmd->fuse != 0) { 3731 /* Fused admin commands are not supported. */ 3732 response->status.sct = SPDK_NVME_SCT_GENERIC; 3733 response->status.sc = SPDK_NVME_SC_INVALID_FIELD; 3734 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3735 } 3736 3737 if (ctrlr->vcprop.cc.bits.en != 1) { 3738 SPDK_ERRLOG("Admin command sent to disabled controller\n"); 3739 response->status.sct = SPDK_NVME_SCT_GENERIC; 3740 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 3741 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3742 } 3743 3744 if (req->iovcnt && spdk_nvme_opc_get_data_transfer(cmd->opc) == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 3745 spdk_iov_memset(req->iov, req->iovcnt, 0); 3746 } 3747 3748 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) { 3749 /* Discovery controllers only support these admin OPS. */ 3750 switch (cmd->opc) { 3751 case SPDK_NVME_OPC_IDENTIFY: 3752 case SPDK_NVME_OPC_GET_LOG_PAGE: 3753 case SPDK_NVME_OPC_KEEP_ALIVE: 3754 case SPDK_NVME_OPC_SET_FEATURES: 3755 case SPDK_NVME_OPC_GET_FEATURES: 3756 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: 3757 break; 3758 default: 3759 goto invalid_opcode; 3760 } 3761 } 3762 3763 /* Call a custom adm cmd handler if set. Aborts are handled in a different path (see nvmf_passthru_admin_cmd) */ 3764 if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr && cmd->opc != SPDK_NVME_OPC_ABORT) { 3765 rc = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr(req); 3766 if (rc >= SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 3767 /* The handler took care of this command */ 3768 return rc; 3769 } 3770 } 3771 3772 /* We only want to send passthrough admin commands to namespaces. 3773 * However, we don't want to passthrough a command with intended for all namespaces. 3774 */ 3775 if (ctrlr->subsys->passthrough && cmd->nsid && cmd->nsid != SPDK_NVME_GLOBAL_NS_TAG) { 3776 return nvmf_passthru_admin_cmd(req); 3777 } 3778 3779 switch (cmd->opc) { 3780 case SPDK_NVME_OPC_GET_LOG_PAGE: 3781 return nvmf_ctrlr_get_log_page(req); 3782 case SPDK_NVME_OPC_IDENTIFY: 3783 return nvmf_ctrlr_identify(req); 3784 case SPDK_NVME_OPC_ABORT: 3785 return nvmf_ctrlr_abort(req); 3786 case SPDK_NVME_OPC_GET_FEATURES: 3787 return nvmf_ctrlr_get_features(req); 3788 case SPDK_NVME_OPC_SET_FEATURES: 3789 return nvmf_ctrlr_set_features(req); 3790 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: 3791 return nvmf_ctrlr_async_event_request(req); 3792 case SPDK_NVME_OPC_KEEP_ALIVE: 3793 return nvmf_ctrlr_keep_alive(req); 3794 3795 case SPDK_NVME_OPC_CREATE_IO_SQ: 3796 case SPDK_NVME_OPC_CREATE_IO_CQ: 3797 case SPDK_NVME_OPC_DELETE_IO_SQ: 3798 case SPDK_NVME_OPC_DELETE_IO_CQ: 3799 /* Create and Delete I/O CQ/SQ not allowed in NVMe-oF */ 3800 goto invalid_opcode; 3801 3802 default: 3803 goto invalid_opcode; 3804 } 3805 3806 invalid_opcode: 3807 SPDK_INFOLOG(nvmf, "Unsupported admin opcode 0x%x\n", cmd->opc); 3808 response->status.sct = SPDK_NVME_SCT_GENERIC; 3809 response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3810 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3811 } 3812 3813 static int 3814 nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req) 3815 { 3816 struct spdk_nvmf_qpair *qpair = req->qpair; 3817 struct spdk_nvmf_capsule_cmd *cap_hdr; 3818 3819 cap_hdr = &req->cmd->nvmf_cmd; 3820 3821 if (qpair->ctrlr == NULL) { 3822 /* No ctrlr established yet; the only valid command is Connect */ 3823 assert(cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT); 3824 return nvmf_ctrlr_cmd_connect(req); 3825 } else if (nvmf_qpair_is_admin_queue(qpair)) { 3826 /* 3827 * Controller session is established, and this is an admin queue. 3828 * Disallow Connect and allow other fabrics commands. 3829 */ 3830 switch (cap_hdr->fctype) { 3831 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET: 3832 return nvmf_property_set(req); 3833 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET: 3834 return nvmf_property_get(req); 3835 case SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND: 3836 case SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV: 3837 return nvmf_auth_request_exec(req); 3838 default: 3839 SPDK_DEBUGLOG(nvmf, "unknown fctype 0x%02x\n", 3840 cap_hdr->fctype); 3841 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3842 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3843 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3844 } 3845 } else { 3846 /* 3847 * Controller session is established, and this is an I/O queue. 3848 * Disallow everything besides authentication commands. 3849 */ 3850 switch (cap_hdr->fctype) { 3851 case SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND: 3852 case SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV: 3853 return nvmf_auth_request_exec(req); 3854 default: 3855 SPDK_DEBUGLOG(nvmf, "Unexpected I/O fctype 0x%x\n", cap_hdr->fctype); 3856 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3857 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 3858 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 3859 } 3860 } 3861 } 3862 3863 static inline void 3864 nvmf_ctrlr_queue_pending_async_event(struct spdk_nvmf_ctrlr *ctrlr, 3865 union spdk_nvme_async_event_completion *event) 3866 { 3867 struct spdk_nvmf_async_event_completion *nvmf_event; 3868 3869 nvmf_event = calloc(1, sizeof(*nvmf_event)); 3870 if (!nvmf_event) { 3871 SPDK_ERRLOG("Alloc nvmf event failed, ignore the event\n"); 3872 return; 3873 } 3874 nvmf_event->event.raw = event->raw; 3875 STAILQ_INSERT_TAIL(&ctrlr->async_events, nvmf_event, link); 3876 } 3877 3878 static inline int 3879 nvmf_ctrlr_async_event_notification(struct spdk_nvmf_ctrlr *ctrlr, 3880 union spdk_nvme_async_event_completion *event) 3881 { 3882 struct spdk_nvmf_request *req; 3883 struct spdk_nvme_cpl *rsp; 3884 3885 assert(spdk_get_thread() == ctrlr->thread); 3886 3887 /* If there is no outstanding AER request, queue the event. Then 3888 * if an AER is later submitted, this event can be sent as a 3889 * response. 3890 */ 3891 if (ctrlr->nr_aer_reqs == 0) { 3892 nvmf_ctrlr_queue_pending_async_event(ctrlr, event); 3893 return 0; 3894 } 3895 3896 req = ctrlr->aer_req[--ctrlr->nr_aer_reqs]; 3897 rsp = &req->rsp->nvme_cpl; 3898 3899 rsp->cdw0 = event->raw; 3900 3901 _nvmf_request_complete(req); 3902 ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL; 3903 3904 return 0; 3905 } 3906 3907 int 3908 nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr) 3909 { 3910 union spdk_nvme_async_event_completion event = {0}; 3911 3912 /* Users may disable the event notification */ 3913 if (!ctrlr->feat.async_event_configuration.bits.ns_attr_notice) { 3914 return 0; 3915 } 3916 3917 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT)) { 3918 return 0; 3919 } 3920 3921 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 3922 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 3923 event.bits.log_page_identifier = SPDK_NVME_LOG_CHANGED_NS_LIST; 3924 3925 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 3926 } 3927 3928 int 3929 nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr) 3930 { 3931 union spdk_nvme_async_event_completion event = {0}; 3932 3933 /* Users may disable the event notification */ 3934 if (!ctrlr->feat.async_event_configuration.bits.ana_change_notice) { 3935 return 0; 3936 } 3937 3938 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT)) { 3939 return 0; 3940 } 3941 3942 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 3943 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 3944 event.bits.log_page_identifier = SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS; 3945 3946 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 3947 } 3948 3949 void 3950 nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr) 3951 { 3952 union spdk_nvme_async_event_completion event = {0}; 3953 3954 if (!ctrlr->num_avail_log_pages) { 3955 return; 3956 } 3957 3958 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT)) { 3959 return; 3960 } 3961 3962 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_IO; 3963 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL; 3964 event.bits.log_page_identifier = SPDK_NVME_LOG_RESERVATION_NOTIFICATION; 3965 3966 nvmf_ctrlr_async_event_notification(ctrlr, &event); 3967 } 3968 3969 void 3970 nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx) 3971 { 3972 union spdk_nvme_async_event_completion event = {0}; 3973 struct spdk_nvmf_ctrlr *ctrlr = ctx; 3974 3975 /* Users may disable the event notification manually or 3976 * it may not be enabled due to keep alive timeout 3977 * not being set in connect command to discovery controller. 3978 */ 3979 if (!ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice) { 3980 return; 3981 } 3982 3983 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT)) { 3984 return; 3985 } 3986 3987 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 3988 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE; 3989 event.bits.log_page_identifier = SPDK_NVME_LOG_DISCOVERY; 3990 3991 nvmf_ctrlr_async_event_notification(ctrlr, &event); 3992 } 3993 3994 int 3995 spdk_nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr, 3996 enum spdk_nvme_async_event_info_error info) 3997 { 3998 union spdk_nvme_async_event_completion event; 3999 4000 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT)) { 4001 return 0; 4002 } 4003 4004 if (info > SPDK_NVME_ASYNC_EVENT_FW_IMAGE_LOAD) { 4005 return 0; 4006 } 4007 4008 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_ERROR; 4009 event.bits.log_page_identifier = SPDK_NVME_LOG_ERROR; 4010 event.bits.async_event_info = info; 4011 4012 return nvmf_ctrlr_async_event_notification(ctrlr, &event); 4013 } 4014 4015 void 4016 nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair) 4017 { 4018 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 4019 int i; 4020 4021 if (ctrlr == NULL || !nvmf_qpair_is_admin_queue(qpair)) { 4022 return; 4023 } 4024 4025 assert(spdk_get_thread() == ctrlr->thread); 4026 4027 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 4028 spdk_nvmf_request_free(ctrlr->aer_req[i]); 4029 ctrlr->aer_req[i] = NULL; 4030 } 4031 4032 ctrlr->nr_aer_reqs = 0; 4033 } 4034 4035 void 4036 spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr) 4037 { 4038 struct spdk_nvmf_request *req; 4039 int i; 4040 4041 assert(spdk_get_thread() == ctrlr->thread); 4042 4043 if (!ctrlr->nr_aer_reqs) { 4044 return; 4045 } 4046 4047 for (i = 0; i < ctrlr->nr_aer_reqs; i++) { 4048 req = ctrlr->aer_req[i]; 4049 4050 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4051 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4052 _nvmf_request_complete(req); 4053 4054 ctrlr->aer_req[i] = NULL; 4055 } 4056 4057 ctrlr->nr_aer_reqs = 0; 4058 } 4059 4060 static void 4061 _nvmf_ctrlr_add_reservation_log(void *ctx) 4062 { 4063 struct spdk_nvmf_reservation_log *log = (struct spdk_nvmf_reservation_log *)ctx; 4064 struct spdk_nvmf_ctrlr *ctrlr = log->ctrlr; 4065 4066 ctrlr->log_page_count++; 4067 4068 /* Maximum number of queued log pages is 255 */ 4069 if (ctrlr->num_avail_log_pages == 0xff) { 4070 struct spdk_nvmf_reservation_log *entry; 4071 entry = TAILQ_LAST(&ctrlr->log_head, log_page_head); 4072 entry->log.log_page_count = ctrlr->log_page_count; 4073 free(log); 4074 return; 4075 } 4076 4077 log->log.log_page_count = ctrlr->log_page_count; 4078 log->log.num_avail_log_pages = ctrlr->num_avail_log_pages++; 4079 TAILQ_INSERT_TAIL(&ctrlr->log_head, log, link); 4080 4081 nvmf_ctrlr_async_event_reservation_notification(ctrlr); 4082 } 4083 4084 void 4085 nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 4086 struct spdk_nvmf_ns *ns, 4087 enum spdk_nvme_reservation_notification_log_page_type type) 4088 { 4089 struct spdk_nvmf_reservation_log *log; 4090 4091 switch (type) { 4092 case SPDK_NVME_RESERVATION_LOG_PAGE_EMPTY: 4093 return; 4094 case SPDK_NVME_REGISTRATION_PREEMPTED: 4095 if (ns->mask & SPDK_NVME_REGISTRATION_PREEMPTED_MASK) { 4096 return; 4097 } 4098 break; 4099 case SPDK_NVME_RESERVATION_RELEASED: 4100 if (ns->mask & SPDK_NVME_RESERVATION_RELEASED_MASK) { 4101 return; 4102 } 4103 break; 4104 case SPDK_NVME_RESERVATION_PREEMPTED: 4105 if (ns->mask & SPDK_NVME_RESERVATION_PREEMPTED_MASK) { 4106 return; 4107 } 4108 break; 4109 default: 4110 return; 4111 } 4112 4113 log = calloc(1, sizeof(*log)); 4114 if (!log) { 4115 SPDK_ERRLOG("Alloc log page failed, ignore the log\n"); 4116 return; 4117 } 4118 log->ctrlr = ctrlr; 4119 log->log.type = type; 4120 log->log.nsid = ns->nsid; 4121 4122 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_reservation_log, log); 4123 } 4124 4125 /* Check from subsystem poll group's namespace information data structure */ 4126 static bool 4127 nvmf_ns_info_ctrlr_is_registrant(struct spdk_nvmf_subsystem_pg_ns_info *ns_info, 4128 struct spdk_nvmf_ctrlr *ctrlr) 4129 { 4130 uint32_t i; 4131 4132 for (i = 0; i < SPDK_NVMF_MAX_NUM_REGISTRANTS; i++) { 4133 if (!spdk_uuid_compare(&ns_info->reg_hostid[i], &ctrlr->hostid)) { 4134 return true; 4135 } 4136 } 4137 4138 return false; 4139 } 4140 4141 /* 4142 * Check the NVMe command is permitted or not for current controller(Host). 4143 */ 4144 static int 4145 nvmf_ns_reservation_request_check(struct spdk_nvmf_subsystem_pg_ns_info *ns_info, 4146 struct spdk_nvmf_ctrlr *ctrlr, 4147 struct spdk_nvmf_request *req) 4148 { 4149 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 4150 enum spdk_nvme_reservation_type rtype = ns_info->rtype; 4151 uint8_t status = SPDK_NVME_SC_SUCCESS; 4152 uint8_t racqa; 4153 bool is_registrant; 4154 4155 /* No valid reservation */ 4156 if (!rtype) { 4157 return 0; 4158 } 4159 4160 is_registrant = nvmf_ns_info_ctrlr_is_registrant(ns_info, ctrlr); 4161 /* All registrants type and current ctrlr is a valid registrant */ 4162 if ((rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 4163 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && is_registrant) { 4164 return 0; 4165 } else if (!spdk_uuid_compare(&ns_info->holder_id, &ctrlr->hostid)) { 4166 return 0; 4167 } 4168 4169 /* Non-holder for current controller */ 4170 switch (cmd->opc) { 4171 case SPDK_NVME_OPC_READ: 4172 case SPDK_NVME_OPC_COMPARE: 4173 if (rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 4174 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 4175 goto exit; 4176 } 4177 if ((rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY || 4178 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) && !is_registrant) { 4179 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 4180 } 4181 break; 4182 case SPDK_NVME_OPC_FLUSH: 4183 case SPDK_NVME_OPC_WRITE: 4184 case SPDK_NVME_OPC_WRITE_UNCORRECTABLE: 4185 case SPDK_NVME_OPC_WRITE_ZEROES: 4186 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 4187 if (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE || 4188 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 4189 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 4190 goto exit; 4191 } 4192 if (!is_registrant) { 4193 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 4194 } 4195 break; 4196 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 4197 racqa = cmd->cdw10_bits.resv_acquire.racqa; 4198 if (racqa == SPDK_NVME_RESERVE_ACQUIRE) { 4199 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 4200 goto exit; 4201 } 4202 if (!is_registrant) { 4203 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 4204 } 4205 break; 4206 case SPDK_NVME_OPC_RESERVATION_RELEASE: 4207 if (!is_registrant) { 4208 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 4209 } 4210 break; 4211 default: 4212 break; 4213 } 4214 4215 exit: 4216 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4217 req->rsp->nvme_cpl.status.sc = status; 4218 if (status == SPDK_NVME_SC_RESERVATION_CONFLICT) { 4219 return -EPERM; 4220 } 4221 4222 return 0; 4223 } 4224 4225 static int 4226 nvmf_ctrlr_process_io_fused_cmd(struct spdk_nvmf_request *req, struct spdk_bdev *bdev, 4227 struct spdk_bdev_desc *desc, struct spdk_io_channel *ch) 4228 { 4229 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 4230 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 4231 struct spdk_nvmf_request *first_fused_req = req->qpair->first_fused_req; 4232 int rc; 4233 4234 if (cmd->fuse == SPDK_NVME_CMD_FUSE_FIRST) { 4235 /* first fused operation (should be compare) */ 4236 if (first_fused_req != NULL) { 4237 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 4238 4239 SPDK_ERRLOG("Wrong sequence of fused operations\n"); 4240 4241 /* abort req->qpair->first_fused_request and continue with new fused command */ 4242 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 4243 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 4244 _nvmf_request_complete(first_fused_req); 4245 } else if (cmd->opc != SPDK_NVME_OPC_COMPARE) { 4246 SPDK_ERRLOG("Wrong op code of fused operations\n"); 4247 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 4248 rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 4249 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4250 } 4251 4252 req->qpair->first_fused_req = req; 4253 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 4254 } else if (cmd->fuse == SPDK_NVME_CMD_FUSE_SECOND) { 4255 /* second fused operation (should be write) */ 4256 if (first_fused_req == NULL) { 4257 SPDK_ERRLOG("Wrong sequence of fused operations\n"); 4258 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 4259 rsp->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 4260 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4261 } else if (cmd->opc != SPDK_NVME_OPC_WRITE) { 4262 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 4263 4264 SPDK_ERRLOG("Wrong op code of fused operations\n"); 4265 4266 /* abort req->qpair->first_fused_request and fail current command */ 4267 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 4268 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 4269 _nvmf_request_complete(first_fused_req); 4270 4271 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 4272 rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 4273 req->qpair->first_fused_req = NULL; 4274 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4275 } 4276 4277 /* save request of first command to generate response later */ 4278 req->first_fused_req = first_fused_req; 4279 req->first_fused = true; 4280 req->qpair->first_fused_req = NULL; 4281 } else { 4282 SPDK_ERRLOG("Invalid fused command fuse field.\n"); 4283 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 4284 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 4285 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4286 } 4287 4288 rc = nvmf_bdev_ctrlr_compare_and_write_cmd(bdev, desc, ch, req->first_fused_req, req); 4289 4290 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 4291 if (spdk_nvme_cpl_is_error(rsp)) { 4292 struct spdk_nvme_cpl *fused_response = &first_fused_req->rsp->nvme_cpl; 4293 4294 fused_response->status = rsp->status; 4295 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 4296 rsp->status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED; 4297 /* Complete first of fused commands. Second will be completed by upper layer */ 4298 _nvmf_request_complete(first_fused_req); 4299 req->first_fused_req = NULL; 4300 req->first_fused = false; 4301 } 4302 } 4303 4304 return rc; 4305 } 4306 4307 bool 4308 nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req) 4309 { 4310 struct spdk_nvmf_transport *transport = req->qpair->transport; 4311 struct spdk_nvmf_ns *ns; 4312 4313 assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_NONE); 4314 4315 if (!transport->opts.zcopy) { 4316 return false; 4317 } 4318 4319 if (nvmf_qpair_is_admin_queue(req->qpair)) { 4320 /* Admin queue */ 4321 return false; 4322 } 4323 4324 if ((req->cmd->nvme_cmd.opc != SPDK_NVME_OPC_WRITE) && 4325 (req->cmd->nvme_cmd.opc != SPDK_NVME_OPC_READ)) { 4326 /* Not a READ or WRITE command */ 4327 return false; 4328 } 4329 4330 if (req->cmd->nvme_cmd.fuse != SPDK_NVME_CMD_FUSE_NONE) { 4331 /* Fused commands dont use zcopy buffers */ 4332 return false; 4333 } 4334 4335 ns = nvmf_ctrlr_get_ns(req->qpair->ctrlr, req->cmd->nvme_cmd.nsid); 4336 if (ns == NULL || ns->bdev == NULL || !ns->zcopy) { 4337 return false; 4338 } 4339 4340 req->zcopy_phase = NVMF_ZCOPY_PHASE_INIT; 4341 return true; 4342 } 4343 4344 void 4345 spdk_nvmf_request_zcopy_start(struct spdk_nvmf_request *req) 4346 { 4347 assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 4348 4349 /* Set iovcnt to be the maximum number of iovs that the ZCOPY can use */ 4350 req->iovcnt = NVMF_REQ_MAX_BUFFERS; 4351 4352 spdk_nvmf_request_exec(req); 4353 } 4354 4355 void 4356 spdk_nvmf_request_zcopy_end(struct spdk_nvmf_request *req, bool commit) 4357 { 4358 assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 4359 req->zcopy_phase = NVMF_ZCOPY_PHASE_END_PENDING; 4360 4361 nvmf_bdev_ctrlr_zcopy_end(req, commit); 4362 } 4363 4364 int 4365 nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req) 4366 { 4367 uint32_t nsid; 4368 struct spdk_nvmf_ns *ns; 4369 struct spdk_bdev *bdev; 4370 struct spdk_bdev_desc *desc; 4371 struct spdk_io_channel *ch; 4372 struct spdk_nvmf_qpair *qpair = req->qpair; 4373 struct spdk_nvmf_poll_group *group = qpair->group; 4374 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 4375 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 4376 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; 4377 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 4378 enum spdk_nvme_ana_state ana_state; 4379 4380 /* pre-set response details for this command */ 4381 response->status.sc = SPDK_NVME_SC_SUCCESS; 4382 nsid = cmd->nsid; 4383 4384 assert(ctrlr != NULL); 4385 if (spdk_unlikely(ctrlr->vcprop.cc.bits.en != 1)) { 4386 SPDK_ERRLOG("I/O command sent to disabled controller\n"); 4387 response->status.sct = SPDK_NVME_SCT_GENERIC; 4388 response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 4389 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4390 } 4391 4392 ns = nvmf_ctrlr_get_ns(ctrlr, nsid); 4393 if (spdk_unlikely(ns == NULL || ns->bdev == NULL)) { 4394 SPDK_DEBUGLOG(nvmf, "Unsuccessful query for nsid %u\n", cmd->nsid); 4395 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 4396 response->status.dnr = 1; 4397 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4398 } 4399 4400 ana_state = nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid); 4401 if (spdk_unlikely(ana_state != SPDK_NVME_ANA_OPTIMIZED_STATE && 4402 ana_state != SPDK_NVME_ANA_NON_OPTIMIZED_STATE)) { 4403 SPDK_DEBUGLOG(nvmf, "Fail I/O command due to ANA state %d\n", 4404 ana_state); 4405 response->status.sct = SPDK_NVME_SCT_PATH; 4406 response->status.sc = _nvme_ana_state_to_path_status(ana_state); 4407 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4408 } 4409 4410 if (spdk_likely(ctrlr->listener != NULL)) { 4411 SPDK_DTRACE_PROBE3_TICKS(nvmf_request_io_exec_path, req, 4412 ctrlr->listener->trid->traddr, 4413 ctrlr->listener->trid->trsvcid); 4414 } 4415 4416 /* scan-build falsely reporting dereference of null pointer */ 4417 assert(group != NULL && group->sgroups != NULL); 4418 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1]; 4419 if (nvmf_ns_reservation_request_check(ns_info, ctrlr, req)) { 4420 SPDK_DEBUGLOG(nvmf, "Reservation Conflict for nsid %u, opcode %u\n", 4421 cmd->nsid, cmd->opc); 4422 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4423 } 4424 4425 bdev = ns->bdev; 4426 desc = ns->desc; 4427 ch = ns_info->channel; 4428 4429 if (spdk_unlikely(cmd->fuse & SPDK_NVME_CMD_FUSE_MASK)) { 4430 return nvmf_ctrlr_process_io_fused_cmd(req, bdev, desc, ch); 4431 } else if (spdk_unlikely(qpair->first_fused_req != NULL)) { 4432 struct spdk_nvme_cpl *fused_response = &qpair->first_fused_req->rsp->nvme_cpl; 4433 4434 SPDK_ERRLOG("Second fused cmd expected - failing first one (cntlid:%u, qid:%u, opcode:0x%x)\n", 4435 ctrlr->cntlid, qpair->qid, 4436 req->qpair->first_fused_req->cmd->nvmf_cmd.opcode); 4437 4438 /* abort qpair->first_fused_request and continue with new command */ 4439 fused_response->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 4440 fused_response->status.sct = SPDK_NVME_SCT_GENERIC; 4441 _nvmf_request_complete(qpair->first_fused_req); 4442 qpair->first_fused_req = NULL; 4443 } 4444 4445 if (ctrlr->subsys->passthrough) { 4446 assert(ns->passthrough_nsid > 0); 4447 req->cmd->nvme_cmd.nsid = ns->passthrough_nsid; 4448 4449 return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req); 4450 } 4451 4452 if (spdk_nvmf_request_using_zcopy(req)) { 4453 assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 4454 return nvmf_bdev_ctrlr_zcopy_start(bdev, desc, ch, req); 4455 } else { 4456 switch (cmd->opc) { 4457 case SPDK_NVME_OPC_READ: 4458 return nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req); 4459 case SPDK_NVME_OPC_WRITE: 4460 return nvmf_bdev_ctrlr_write_cmd(bdev, desc, ch, req); 4461 case SPDK_NVME_OPC_FLUSH: 4462 return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req); 4463 case SPDK_NVME_OPC_COMPARE: 4464 if (spdk_unlikely(!ctrlr->cdata.oncs.compare)) { 4465 goto invalid_opcode; 4466 } 4467 return nvmf_bdev_ctrlr_compare_cmd(bdev, desc, ch, req); 4468 case SPDK_NVME_OPC_WRITE_ZEROES: 4469 if (spdk_unlikely(!ctrlr->cdata.oncs.write_zeroes)) { 4470 goto invalid_opcode; 4471 } 4472 return nvmf_bdev_ctrlr_write_zeroes_cmd(bdev, desc, ch, req); 4473 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 4474 if (spdk_unlikely(!ctrlr->cdata.oncs.dsm)) { 4475 goto invalid_opcode; 4476 } 4477 return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req); 4478 case SPDK_NVME_OPC_RESERVATION_REGISTER: 4479 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 4480 case SPDK_NVME_OPC_RESERVATION_RELEASE: 4481 case SPDK_NVME_OPC_RESERVATION_REPORT: 4482 if (spdk_unlikely(!ctrlr->cdata.oncs.reservations)) { 4483 goto invalid_opcode; 4484 } 4485 spdk_thread_send_msg(ctrlr->subsys->thread, nvmf_ns_reservation_request, req); 4486 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 4487 case SPDK_NVME_OPC_COPY: 4488 if (spdk_unlikely(!ctrlr->cdata.oncs.copy)) { 4489 goto invalid_opcode; 4490 } 4491 return nvmf_bdev_ctrlr_copy_cmd(bdev, desc, ch, req); 4492 default: 4493 if (spdk_unlikely(qpair->transport->opts.disable_command_passthru)) { 4494 goto invalid_opcode; 4495 } 4496 if (ns->passthrough_nsid) { 4497 req->cmd->nvme_cmd.nsid = ns->passthrough_nsid; 4498 } 4499 return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req); 4500 } 4501 } 4502 invalid_opcode: 4503 SPDK_INFOLOG(nvmf, "Unsupported IO opcode 0x%x\n", cmd->opc); 4504 response->status.sct = SPDK_NVME_SCT_GENERIC; 4505 response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; 4506 response->status.dnr = 1; 4507 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4508 } 4509 4510 static void 4511 nvmf_qpair_request_cleanup(struct spdk_nvmf_qpair *qpair) 4512 { 4513 if (spdk_unlikely(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING)) { 4514 assert(qpair->state_cb != NULL); 4515 4516 if (TAILQ_EMPTY(&qpair->outstanding)) { 4517 qpair->state_cb(qpair->state_cb_arg, 0); 4518 } 4519 } 4520 } 4521 4522 int 4523 spdk_nvmf_request_free(struct spdk_nvmf_request *req) 4524 { 4525 struct spdk_nvmf_qpair *qpair = req->qpair; 4526 4527 TAILQ_REMOVE(&qpair->outstanding, req, link); 4528 if (spdk_unlikely(nvmf_transport_req_free(req))) { 4529 SPDK_ERRLOG("Unable to free transport level request resources.\n"); 4530 } 4531 4532 nvmf_qpair_request_cleanup(qpair); 4533 4534 return 0; 4535 } 4536 4537 static void 4538 _nvmf_request_complete(void *ctx) 4539 { 4540 struct spdk_nvmf_request *req = ctx; 4541 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 4542 struct spdk_nvmf_qpair *qpair; 4543 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 4544 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 4545 bool is_aer = false; 4546 uint32_t nsid; 4547 bool paused; 4548 uint8_t opcode; 4549 4550 rsp->sqid = 0; 4551 rsp->status.p = 0; 4552 rsp->cid = req->cmd->nvme_cmd.cid; 4553 nsid = req->cmd->nvme_cmd.nsid; 4554 opcode = req->cmd->nvmf_cmd.opcode; 4555 4556 qpair = req->qpair; 4557 if (spdk_likely(qpair->ctrlr)) { 4558 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 4559 assert(sgroup != NULL); 4560 is_aer = req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 4561 if (spdk_likely(qpair->qid != 0)) { 4562 qpair->group->stat.completed_nvme_io++; 4563 } 4564 4565 /* 4566 * Set the crd value. 4567 * If the the IO has any error, and dnr (DoNotRetry) is not 1, 4568 * and ACRE is enabled, we will set the crd to 1 to select the first CRDT. 4569 */ 4570 if (spdk_unlikely(spdk_nvme_cpl_is_error(rsp) && 4571 rsp->status.dnr == 0 && 4572 qpair->ctrlr->acre_enabled)) { 4573 rsp->status.crd = 1; 4574 } 4575 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) { 4576 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 4577 } 4578 4579 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) { 4580 spdk_nvme_print_completion(qpair->qid, rsp); 4581 } 4582 4583 switch (req->zcopy_phase) { 4584 case NVMF_ZCOPY_PHASE_NONE: 4585 TAILQ_REMOVE(&qpair->outstanding, req, link); 4586 break; 4587 case NVMF_ZCOPY_PHASE_INIT: 4588 if (spdk_unlikely(spdk_nvme_cpl_is_error(rsp))) { 4589 req->zcopy_phase = NVMF_ZCOPY_PHASE_INIT_FAILED; 4590 TAILQ_REMOVE(&qpair->outstanding, req, link); 4591 } else { 4592 req->zcopy_phase = NVMF_ZCOPY_PHASE_EXECUTE; 4593 } 4594 break; 4595 case NVMF_ZCOPY_PHASE_EXECUTE: 4596 break; 4597 case NVMF_ZCOPY_PHASE_END_PENDING: 4598 TAILQ_REMOVE(&qpair->outstanding, req, link); 4599 req->zcopy_phase = NVMF_ZCOPY_PHASE_COMPLETE; 4600 break; 4601 default: 4602 SPDK_ERRLOG("Invalid ZCOPY phase %u\n", req->zcopy_phase); 4603 break; 4604 } 4605 4606 if (spdk_unlikely(nvmf_transport_req_complete(req))) { 4607 SPDK_ERRLOG("Transport request completion error!\n"); 4608 } 4609 4610 /* AER cmd is an exception */ 4611 if (spdk_likely(sgroup && !is_aer)) { 4612 if (spdk_unlikely(opcode == SPDK_NVME_OPC_FABRIC || 4613 nvmf_qpair_is_admin_queue(qpair))) { 4614 assert(sgroup->mgmt_io_outstanding > 0); 4615 sgroup->mgmt_io_outstanding--; 4616 } else { 4617 if (req->zcopy_phase == NVMF_ZCOPY_PHASE_NONE || 4618 req->zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE || 4619 req->zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED) { 4620 /* End of request */ 4621 4622 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 4623 if (spdk_likely(nsid - 1 < sgroup->num_ns)) { 4624 sgroup->ns_info[nsid - 1].io_outstanding--; 4625 } 4626 } 4627 } 4628 4629 if (spdk_unlikely(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSING && 4630 sgroup->mgmt_io_outstanding == 0)) { 4631 paused = true; 4632 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 4633 ns_info = &sgroup->ns_info[nsid]; 4634 4635 if (ns_info->state == SPDK_NVMF_SUBSYSTEM_PAUSING && 4636 ns_info->io_outstanding > 0) { 4637 paused = false; 4638 break; 4639 } 4640 } 4641 4642 if (paused) { 4643 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 4644 sgroup->cb_fn(sgroup->cb_arg, 0); 4645 sgroup->cb_fn = NULL; 4646 sgroup->cb_arg = NULL; 4647 } 4648 } 4649 4650 } 4651 4652 nvmf_qpair_request_cleanup(qpair); 4653 } 4654 4655 int 4656 spdk_nvmf_request_complete(struct spdk_nvmf_request *req) 4657 { 4658 struct spdk_nvmf_qpair *qpair = req->qpair; 4659 4660 spdk_thread_exec_msg(qpair->group->thread, _nvmf_request_complete, req); 4661 4662 return 0; 4663 } 4664 4665 SPDK_LOG_DEPRECATION_REGISTER(nvmf_request_exec_fabrics, "spdk_nvmf_request_exec_fabrics()", 4666 "v24.09", 1); 4667 void 4668 spdk_nvmf_request_exec_fabrics(struct spdk_nvmf_request *req) 4669 { 4670 SPDK_LOG_DEPRECATED(nvmf_request_exec_fabrics); 4671 4672 return spdk_nvmf_request_exec(req); 4673 } 4674 4675 static bool 4676 nvmf_check_subsystem_active(struct spdk_nvmf_request *req) 4677 { 4678 struct spdk_nvmf_qpair *qpair = req->qpair; 4679 struct spdk_nvmf_subsystem_poll_group *sgroup = NULL; 4680 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 4681 uint32_t nsid; 4682 4683 if (spdk_likely(qpair->ctrlr)) { 4684 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id]; 4685 assert(sgroup != NULL); 4686 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) { 4687 sgroup = nvmf_subsystem_pg_from_connect_cmd(req); 4688 } 4689 4690 /* Check if the subsystem is paused (if there is a subsystem) */ 4691 if (spdk_unlikely(sgroup == NULL)) { 4692 return true; 4693 } 4694 4695 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC || 4696 nvmf_qpair_is_admin_queue(qpair))) { 4697 if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) { 4698 /* The subsystem is not currently active. Queue this request. */ 4699 TAILQ_INSERT_TAIL(&sgroup->queued, req, link); 4700 return false; 4701 } 4702 sgroup->mgmt_io_outstanding++; 4703 } else { 4704 nsid = req->cmd->nvme_cmd.nsid; 4705 4706 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 4707 if (spdk_unlikely(nsid - 1 >= sgroup->num_ns)) { 4708 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4709 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 4710 req->rsp->nvme_cpl.status.dnr = 1; 4711 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 4712 _nvmf_request_complete(req); 4713 return false; 4714 } 4715 4716 ns_info = &sgroup->ns_info[nsid - 1]; 4717 if (spdk_unlikely(ns_info->channel == NULL)) { 4718 /* This can can happen if host sends I/O to a namespace that is 4719 * in the process of being added, but before the full addition 4720 * process is complete. Report invalid namespace in that case. 4721 */ 4722 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4723 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 4724 req->rsp->nvme_cpl.status.dnr = 1; 4725 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 4726 ns_info->io_outstanding++; 4727 _nvmf_request_complete(req); 4728 return false; 4729 } 4730 4731 if (spdk_unlikely(ns_info->state != SPDK_NVMF_SUBSYSTEM_ACTIVE)) { 4732 /* The namespace is not currently active. Queue this request. */ 4733 TAILQ_INSERT_TAIL(&sgroup->queued, req, link); 4734 return false; 4735 } 4736 4737 ns_info->io_outstanding++; 4738 } 4739 4740 return true; 4741 } 4742 4743 static bool 4744 nvmf_check_qpair_active(struct spdk_nvmf_request *req) 4745 { 4746 struct spdk_nvmf_qpair *qpair = req->qpair; 4747 int sc, sct; 4748 4749 if (spdk_likely(qpair->state == SPDK_NVMF_QPAIR_ENABLED)) { 4750 return true; 4751 } 4752 4753 sct = SPDK_NVME_SCT_GENERIC; 4754 sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 4755 4756 switch (qpair->state) { 4757 case SPDK_NVMF_QPAIR_CONNECTING: 4758 if (req->cmd->nvmf_cmd.opcode != SPDK_NVME_OPC_FABRIC) { 4759 SPDK_ERRLOG("Received command 0x%x on qid %u before CONNECT\n", 4760 req->cmd->nvmf_cmd.opcode, qpair->qid); 4761 break; 4762 } 4763 if (req->cmd->nvmf_cmd.fctype != SPDK_NVMF_FABRIC_COMMAND_CONNECT) { 4764 SPDK_ERRLOG("Received fctype 0x%x on qid %u before CONNECT\n", 4765 req->cmd->nvmf_cmd.fctype, qpair->qid); 4766 break; 4767 } 4768 return true; 4769 case SPDK_NVMF_QPAIR_AUTHENTICATING: 4770 sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 4771 sc = SPDK_NVMF_FABRIC_SC_AUTH_REQUIRED; 4772 if (req->cmd->nvmf_cmd.opcode != SPDK_NVME_OPC_FABRIC) { 4773 SPDK_ERRLOG("Received command 0x%x on qid %u before authentication\n", 4774 req->cmd->nvmf_cmd.opcode, qpair->qid); 4775 break; 4776 } 4777 if (req->cmd->nvmf_cmd.fctype != SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND && 4778 req->cmd->nvmf_cmd.fctype != SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV) { 4779 SPDK_ERRLOG("Received fctype 0x%x on qid %u before authentication\n", 4780 req->cmd->nvmf_cmd.fctype, qpair->qid); 4781 break; 4782 } 4783 return true; 4784 default: 4785 SPDK_ERRLOG("Received command 0x%x on qid %u in state %d\n", 4786 req->cmd->nvmf_cmd.opcode, qpair->qid, qpair->state); 4787 break; 4788 } 4789 4790 req->rsp->nvme_cpl.status.sct = sct; 4791 req->rsp->nvme_cpl.status.sc = sc; 4792 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 4793 _nvmf_request_complete(req); 4794 4795 return false; 4796 } 4797 4798 void 4799 spdk_nvmf_request_exec(struct spdk_nvmf_request *req) 4800 { 4801 struct spdk_nvmf_qpair *qpair = req->qpair; 4802 enum spdk_nvmf_request_exec_status status; 4803 4804 if (spdk_unlikely(!nvmf_check_subsystem_active(req))) { 4805 return; 4806 } 4807 if (spdk_unlikely(!nvmf_check_qpair_active(req))) { 4808 return; 4809 } 4810 4811 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) { 4812 spdk_nvme_print_command(qpair->qid, &req->cmd->nvme_cmd); 4813 } 4814 4815 /* Place the request on the outstanding list so we can keep track of it */ 4816 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link); 4817 4818 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) { 4819 status = nvmf_ctrlr_process_fabrics_cmd(req); 4820 } else if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair))) { 4821 status = nvmf_ctrlr_process_admin_cmd(req); 4822 } else { 4823 status = nvmf_ctrlr_process_io_cmd(req); 4824 } 4825 4826 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) { 4827 _nvmf_request_complete(req); 4828 } 4829 } 4830 4831 static bool 4832 nvmf_ctrlr_get_dif_ctx(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd, 4833 struct spdk_dif_ctx *dif_ctx) 4834 { 4835 struct spdk_nvmf_ns *ns; 4836 struct spdk_bdev *bdev; 4837 4838 if (ctrlr == NULL || cmd == NULL) { 4839 return false; 4840 } 4841 4842 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid); 4843 if (ns == NULL || ns->bdev == NULL) { 4844 return false; 4845 } 4846 4847 bdev = ns->bdev; 4848 4849 switch (cmd->opc) { 4850 case SPDK_NVME_OPC_READ: 4851 case SPDK_NVME_OPC_WRITE: 4852 case SPDK_NVME_OPC_COMPARE: 4853 return nvmf_bdev_ctrlr_get_dif_ctx(bdev, cmd, dif_ctx); 4854 default: 4855 break; 4856 } 4857 4858 return false; 4859 } 4860 4861 bool 4862 spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx *dif_ctx) 4863 { 4864 struct spdk_nvmf_qpair *qpair = req->qpair; 4865 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 4866 4867 if (spdk_likely(ctrlr == NULL || !ctrlr->dif_insert_or_strip)) { 4868 return false; 4869 } 4870 4871 if (spdk_unlikely(!spdk_nvmf_qpair_is_active(qpair))) { 4872 return false; 4873 } 4874 4875 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) { 4876 return false; 4877 } 4878 4879 if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair))) { 4880 return false; 4881 } 4882 4883 return nvmf_ctrlr_get_dif_ctx(ctrlr, &req->cmd->nvme_cmd, dif_ctx); 4884 } 4885 4886 void 4887 spdk_nvmf_set_custom_admin_cmd_hdlr(uint8_t opc, spdk_nvmf_custom_cmd_hdlr hdlr) 4888 { 4889 g_nvmf_custom_admin_cmd_hdlrs[opc].hdlr = hdlr; 4890 } 4891 4892 static int 4893 nvmf_passthru_admin_cmd_for_bdev_nsid(struct spdk_nvmf_request *req, uint32_t bdev_nsid) 4894 { 4895 struct spdk_bdev *bdev; 4896 struct spdk_bdev_desc *desc; 4897 struct spdk_io_channel *ch; 4898 struct spdk_nvmf_ns *ns; 4899 struct spdk_nvmf_ctrlr *ctrlr; 4900 struct spdk_nvme_cpl *response = spdk_nvmf_request_get_response(req); 4901 int rc; 4902 4903 rc = spdk_nvmf_request_get_bdev(bdev_nsid, req, &bdev, &desc, &ch); 4904 if (rc) { 4905 response->status.sct = SPDK_NVME_SCT_GENERIC; 4906 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 4907 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4908 } 4909 4910 ctrlr = req->qpair->ctrlr; 4911 ns = nvmf_ctrlr_get_ns(ctrlr, bdev_nsid); 4912 4913 if (ns->passthrough_nsid) { 4914 req->cmd->nvme_cmd.nsid = ns->passthrough_nsid; 4915 } 4916 4917 return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, NULL); 4918 } 4919 4920 static int 4921 nvmf_passthru_admin_cmd(struct spdk_nvmf_request *req) 4922 { 4923 struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req); 4924 uint32_t bdev_nsid; 4925 4926 if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].nsid != 0) { 4927 bdev_nsid = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].nsid; 4928 } else { 4929 bdev_nsid = cmd->nsid; 4930 } 4931 4932 return nvmf_passthru_admin_cmd_for_bdev_nsid(req, bdev_nsid); 4933 } 4934 4935 int 4936 nvmf_passthru_admin_cmd_for_ctrlr(struct spdk_nvmf_request *req, struct spdk_nvmf_ctrlr *ctrlr) 4937 { 4938 struct spdk_nvme_cpl *response = spdk_nvmf_request_get_response(req); 4939 struct spdk_nvmf_ns *ns; 4940 4941 ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); 4942 if (ns == NULL) { 4943 /* Is there a better sc to use here? */ 4944 response->status.sct = SPDK_NVME_SCT_GENERIC; 4945 response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 4946 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 4947 } 4948 4949 return nvmf_passthru_admin_cmd_for_bdev_nsid(req, ns->nsid); 4950 } 4951 4952 void 4953 spdk_nvmf_set_passthru_admin_cmd(uint8_t opc, uint32_t forward_nsid) 4954 { 4955 g_nvmf_custom_admin_cmd_hdlrs[opc].hdlr = nvmf_passthru_admin_cmd; 4956 g_nvmf_custom_admin_cmd_hdlrs[opc].nsid = forward_nsid; 4957 } 4958 4959 int 4960 spdk_nvmf_request_get_bdev(uint32_t nsid, struct spdk_nvmf_request *req, 4961 struct spdk_bdev **bdev, struct spdk_bdev_desc **desc, struct spdk_io_channel **ch) 4962 { 4963 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 4964 struct spdk_nvmf_ns *ns; 4965 struct spdk_nvmf_poll_group *group = req->qpair->group; 4966 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 4967 4968 *bdev = NULL; 4969 *desc = NULL; 4970 *ch = NULL; 4971 4972 ns = nvmf_ctrlr_get_ns(ctrlr, nsid); 4973 if (ns == NULL || ns->bdev == NULL) { 4974 return -EINVAL; 4975 } 4976 4977 assert(group != NULL && group->sgroups != NULL); 4978 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1]; 4979 *bdev = ns->bdev; 4980 *desc = ns->desc; 4981 *ch = ns_info->channel; 4982 4983 return 0; 4984 } 4985 4986 struct spdk_nvmf_ctrlr *spdk_nvmf_request_get_ctrlr(struct spdk_nvmf_request *req) 4987 { 4988 return req->qpair->ctrlr; 4989 } 4990 4991 struct spdk_nvme_cmd *spdk_nvmf_request_get_cmd(struct spdk_nvmf_request *req) 4992 { 4993 return &req->cmd->nvme_cmd; 4994 } 4995 4996 struct spdk_nvme_cpl *spdk_nvmf_request_get_response(struct spdk_nvmf_request *req) 4997 { 4998 return &req->rsp->nvme_cpl; 4999 } 5000 5001 struct spdk_nvmf_subsystem *spdk_nvmf_request_get_subsystem(struct spdk_nvmf_request *req) 5002 { 5003 return req->qpair->ctrlr->subsys; 5004 } 5005 5006 size_t 5007 spdk_nvmf_request_copy_from_buf(struct spdk_nvmf_request *req, 5008 void *buf, size_t buflen) 5009 { 5010 struct spdk_iov_xfer ix; 5011 5012 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 5013 return spdk_iov_xfer_from_buf(&ix, buf, buflen); 5014 } 5015 5016 size_t 5017 spdk_nvmf_request_copy_to_buf(struct spdk_nvmf_request *req, 5018 void *buf, size_t buflen) 5019 { 5020 struct spdk_iov_xfer ix; 5021 5022 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 5023 return spdk_iov_xfer_to_buf(&ix, buf, buflen); 5024 } 5025 5026 struct spdk_nvmf_subsystem *spdk_nvmf_ctrlr_get_subsystem(struct spdk_nvmf_ctrlr *ctrlr) 5027 { 5028 return ctrlr->subsys; 5029 } 5030 5031 uint16_t 5032 spdk_nvmf_ctrlr_get_id(struct spdk_nvmf_ctrlr *ctrlr) 5033 { 5034 return ctrlr->cntlid; 5035 } 5036 5037 struct spdk_nvmf_request *spdk_nvmf_request_get_req_to_abort(struct spdk_nvmf_request *req) 5038 { 5039 return req->req_to_abort; 5040 } 5041