1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvme_internal.h" 37 38 #include "spdk/env.h" 39 #include "spdk/string.h" 40 41 static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr, 42 struct nvme_async_event_request *aer); 43 static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns); 44 static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns); 45 46 static int 47 nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc) 48 { 49 return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw), 50 &cc->raw); 51 } 52 53 static int 54 nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts) 55 { 56 return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw), 57 &csts->raw); 58 } 59 60 int 61 nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap) 62 { 63 return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw), 64 &cap->raw); 65 } 66 67 int 68 nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs) 69 { 70 return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw), 71 &vs->raw); 72 } 73 74 static int 75 nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc) 76 { 77 return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw), 78 cc->raw); 79 } 80 81 int 82 nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz) 83 { 84 return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 85 &cmbsz->raw); 86 } 87 88 void 89 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 90 { 91 char host_id_str[SPDK_UUID_STRING_LEN]; 92 93 assert(opts); 94 95 memset(opts, 0, opts_size); 96 97 #define FIELD_OK(field) \ 98 offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size 99 100 if (FIELD_OK(num_io_queues)) { 101 opts->num_io_queues = DEFAULT_MAX_IO_QUEUES; 102 } 103 104 if (FIELD_OK(use_cmb_sqs)) { 105 opts->use_cmb_sqs = true; 106 } 107 108 if (FIELD_OK(arb_mechanism)) { 109 opts->arb_mechanism = SPDK_NVME_CC_AMS_RR; 110 } 111 112 if (FIELD_OK(keep_alive_timeout_ms)) { 113 opts->keep_alive_timeout_ms = MIN_KEEP_ALIVE_TIMEOUT_IN_MS; 114 } 115 116 if (FIELD_OK(io_queue_size)) { 117 opts->io_queue_size = DEFAULT_IO_QUEUE_SIZE; 118 } 119 120 if (FIELD_OK(io_queue_requests)) { 121 opts->io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS; 122 } 123 124 if (FIELD_OK(host_id)) { 125 memset(opts->host_id, 0, sizeof(opts->host_id)); 126 } 127 128 if (nvme_driver_init() == 0) { 129 if (FIELD_OK(extended_host_id)) { 130 memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id, 131 sizeof(opts->extended_host_id)); 132 } 133 134 if (FIELD_OK(hostnqn)) { 135 spdk_uuid_fmt_lower(host_id_str, sizeof(host_id_str), 136 &g_spdk_nvme_driver->default_extended_host_id); 137 snprintf(opts->hostnqn, sizeof(opts->hostnqn), "2014-08.org.nvmexpress:uuid:%s", host_id_str); 138 } 139 } 140 141 if (FIELD_OK(src_addr)) { 142 memset(opts->src_addr, 0, sizeof(opts->src_addr)); 143 } 144 145 if (FIELD_OK(src_svcid)) { 146 memset(opts->src_svcid, 0, sizeof(opts->src_svcid)); 147 } 148 149 if (FIELD_OK(command_set)) { 150 opts->command_set = SPDK_NVME_CC_CSS_NVM; 151 } 152 153 if (FIELD_OK(admin_timeout_ms)) { 154 opts->admin_timeout_ms = NVME_MAX_TIMEOUT_PERIOD * 1000; 155 } 156 157 if (FIELD_OK(header_digest)) { 158 opts->header_digest = false; 159 } 160 161 if (FIELD_OK(data_digest)) { 162 opts->data_digest = false; 163 } 164 #undef FIELD_OK 165 } 166 167 /** 168 * This function will be called when the process allocates the IO qpair. 169 * Note: the ctrlr_lock must be held when calling this function. 170 */ 171 static void 172 nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair) 173 { 174 struct spdk_nvme_ctrlr_process *active_proc; 175 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 176 177 active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); 178 if (active_proc) { 179 TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq); 180 qpair->active_proc = active_proc; 181 } 182 } 183 184 /** 185 * This function will be called when the process frees the IO qpair. 186 * Note: the ctrlr_lock must be held when calling this function. 187 */ 188 static void 189 nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair) 190 { 191 struct spdk_nvme_ctrlr_process *active_proc; 192 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 193 struct spdk_nvme_qpair *active_qpair, *tmp_qpair; 194 195 active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); 196 if (!active_proc) { 197 return; 198 } 199 200 TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs, 201 per_process_tailq, tmp_qpair) { 202 if (active_qpair == qpair) { 203 TAILQ_REMOVE(&active_proc->allocated_io_qpairs, 204 active_qpair, per_process_tailq); 205 206 break; 207 } 208 } 209 } 210 211 void 212 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 213 struct spdk_nvme_io_qpair_opts *opts, 214 size_t opts_size) 215 { 216 assert(ctrlr); 217 218 assert(opts); 219 220 memset(opts, 0, opts_size); 221 222 #define FIELD_OK(field) \ 223 offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size 224 225 if (FIELD_OK(qprio)) { 226 opts->qprio = SPDK_NVME_QPRIO_URGENT; 227 } 228 229 if (FIELD_OK(io_queue_size)) { 230 opts->io_queue_size = ctrlr->opts.io_queue_size; 231 } 232 233 if (FIELD_OK(io_queue_requests)) { 234 opts->io_queue_requests = ctrlr->opts.io_queue_requests; 235 } 236 237 #undef FIELD_OK 238 } 239 240 struct spdk_nvme_qpair * 241 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 242 const struct spdk_nvme_io_qpair_opts *user_opts, 243 size_t opts_size) 244 { 245 uint32_t qid; 246 struct spdk_nvme_qpair *qpair; 247 union spdk_nvme_cc_register cc; 248 struct spdk_nvme_io_qpair_opts opts; 249 250 if (!ctrlr) { 251 return NULL; 252 } 253 254 /* 255 * Get the default options, then overwrite them with the user-provided options 256 * up to opts_size. 257 * 258 * This allows for extensions of the opts structure without breaking 259 * ABI compatibility. 260 */ 261 spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts)); 262 if (user_opts) { 263 memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size)); 264 } 265 266 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 267 if (nvme_ctrlr_get_cc(ctrlr, &cc)) { 268 SPDK_ERRLOG("get_cc failed\n"); 269 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 270 return NULL; 271 } 272 273 /* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */ 274 if ((opts.qprio & 3) != opts.qprio) { 275 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 276 return NULL; 277 } 278 279 /* 280 * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the 281 * default round robin arbitration method. 282 */ 283 if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts.qprio != SPDK_NVME_QPRIO_URGENT)) { 284 SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n"); 285 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 286 return NULL; 287 } 288 289 /* 290 * Get the first available I/O queue ID. 291 */ 292 qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1); 293 if (qid > ctrlr->opts.num_io_queues) { 294 SPDK_ERRLOG("No free I/O queue IDs\n"); 295 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 296 return NULL; 297 } 298 299 qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, &opts); 300 if (qpair == NULL) { 301 SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n"); 302 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 303 return NULL; 304 } 305 spdk_bit_array_clear(ctrlr->free_io_qids, qid); 306 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 307 308 nvme_ctrlr_proc_add_io_qpair(qpair); 309 310 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 311 312 if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) { 313 spdk_delay_us(100); 314 } 315 316 return qpair; 317 } 318 319 int 320 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 321 { 322 struct spdk_nvme_ctrlr *ctrlr; 323 324 if (qpair == NULL) { 325 return 0; 326 } 327 328 ctrlr = qpair->ctrlr; 329 330 if (qpair->in_completion_context) { 331 /* 332 * There are many cases where it is convenient to delete an io qpair in the context 333 * of that qpair's completion routine. To handle this properly, set a flag here 334 * so that the completion routine will perform an actual delete after the context 335 * unwinds. 336 */ 337 qpair->delete_after_completion_context = 1; 338 return 0; 339 } 340 341 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 342 343 nvme_ctrlr_proc_remove_io_qpair(qpair); 344 345 TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq); 346 spdk_bit_array_set(ctrlr->free_io_qids, qpair->id); 347 348 if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) { 349 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 350 return -1; 351 } 352 353 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 354 return 0; 355 } 356 357 static void 358 nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr, 359 struct spdk_nvme_intel_log_page_directory *log_page_directory) 360 { 361 if (log_page_directory == NULL) { 362 return; 363 } 364 365 if (ctrlr->cdata.vid != SPDK_PCI_VID_INTEL) { 366 return; 367 } 368 369 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true; 370 371 if (log_page_directory->read_latency_log_len || 372 (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) { 373 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true; 374 } 375 if (log_page_directory->write_latency_log_len || 376 (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) { 377 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true; 378 } 379 if (log_page_directory->temperature_statistics_log_len) { 380 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true; 381 } 382 if (log_page_directory->smart_log_len) { 383 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true; 384 } 385 if (log_page_directory->marketing_description_log_len) { 386 ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true; 387 } 388 } 389 390 static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr) 391 { 392 int rc = 0; 393 uint64_t phys_addr = 0; 394 struct nvme_completion_poll_status status; 395 struct spdk_nvme_intel_log_page_directory *log_page_directory; 396 397 log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory), 398 64, &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 399 if (log_page_directory == NULL) { 400 SPDK_ERRLOG("could not allocate log_page_directory\n"); 401 return -ENXIO; 402 } 403 404 rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, 405 SPDK_NVME_GLOBAL_NS_TAG, log_page_directory, 406 sizeof(struct spdk_nvme_intel_log_page_directory), 407 0, nvme_completion_poll_cb, &status); 408 if (rc != 0) { 409 spdk_free(log_page_directory); 410 return rc; 411 } 412 413 if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, &status, 414 ctrlr->opts.admin_timeout_ms / 1000)) { 415 spdk_free(log_page_directory); 416 SPDK_WARNLOG("Intel log pages not supported on Intel drive!\n"); 417 return 0; 418 } 419 420 nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory); 421 spdk_free(log_page_directory); 422 return 0; 423 } 424 425 static int 426 nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr) 427 { 428 int rc = 0; 429 430 memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported)); 431 /* Mandatory pages */ 432 ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true; 433 ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true; 434 ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true; 435 if (ctrlr->cdata.lpa.celp) { 436 ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true; 437 } 438 if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL && !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) { 439 rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr); 440 } 441 442 return rc; 443 } 444 445 static void 446 nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr) 447 { 448 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true; 449 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true; 450 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true; 451 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true; 452 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true; 453 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true; 454 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true; 455 } 456 457 static void 458 nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr) 459 { 460 memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported)); 461 /* Mandatory features */ 462 ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true; 463 ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true; 464 ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true; 465 ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true; 466 ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true; 467 ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true; 468 ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true; 469 ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true; 470 ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true; 471 /* Optional features */ 472 if (ctrlr->cdata.vwc.present) { 473 ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true; 474 } 475 if (ctrlr->cdata.apsta.supported) { 476 ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true; 477 } 478 if (ctrlr->cdata.hmpre) { 479 ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true; 480 } 481 if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) { 482 nvme_ctrlr_set_intel_supported_features(ctrlr); 483 } 484 } 485 486 void 487 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove) 488 { 489 /* 490 * Set the flag here and leave the work failure of qpairs to 491 * spdk_nvme_qpair_process_completions(). 492 */ 493 if (hot_remove) { 494 ctrlr->is_removed = true; 495 } 496 ctrlr->is_failed = true; 497 SPDK_ERRLOG("ctrlr %s in failed state.\n", ctrlr->trid.traddr); 498 } 499 500 static void 501 nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr) 502 { 503 union spdk_nvme_cc_register cc; 504 union spdk_nvme_csts_register csts; 505 uint32_t ms_waited = 0; 506 uint32_t shutdown_timeout_ms; 507 508 if (ctrlr->is_removed) { 509 return; 510 } 511 512 if (nvme_ctrlr_get_cc(ctrlr, &cc)) { 513 SPDK_ERRLOG("get_cc() failed\n"); 514 return; 515 } 516 517 cc.bits.shn = SPDK_NVME_SHN_NORMAL; 518 519 if (nvme_ctrlr_set_cc(ctrlr, &cc)) { 520 SPDK_ERRLOG("set_cc() failed\n"); 521 return; 522 } 523 524 /* 525 * The NVMe specification defines RTD3E to be the time between 526 * setting SHN = 1 until the controller will set SHST = 10b. 527 * If the device doesn't report RTD3 entry latency, or if it 528 * reports RTD3 entry latency less than 10 seconds, pick 529 * 10 seconds as a reasonable amount of time to 530 * wait before proceeding. 531 */ 532 SPDK_DEBUGLOG(SPDK_LOG_NVME, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e); 533 shutdown_timeout_ms = (ctrlr->cdata.rtd3e + 999) / 1000; 534 shutdown_timeout_ms = spdk_max(shutdown_timeout_ms, 10000); 535 SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown timeout = %" PRIu32 " ms\n", shutdown_timeout_ms); 536 537 do { 538 if (nvme_ctrlr_get_csts(ctrlr, &csts)) { 539 SPDK_ERRLOG("get_csts() failed\n"); 540 return; 541 } 542 543 if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) { 544 SPDK_DEBUGLOG(SPDK_LOG_NVME, "shutdown complete in %u milliseconds\n", 545 ms_waited); 546 ctrlr->is_shutdown = true; 547 return; 548 } 549 550 nvme_delay(1000); 551 ms_waited++; 552 } while (ms_waited < shutdown_timeout_ms); 553 554 SPDK_ERRLOG("did not shutdown within %u milliseconds\n", shutdown_timeout_ms); 555 } 556 557 static int 558 nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr) 559 { 560 union spdk_nvme_cc_register cc; 561 int rc; 562 563 rc = nvme_transport_ctrlr_enable(ctrlr); 564 if (rc != 0) { 565 SPDK_ERRLOG("transport ctrlr_enable failed\n"); 566 return rc; 567 } 568 569 if (nvme_ctrlr_get_cc(ctrlr, &cc)) { 570 SPDK_ERRLOG("get_cc() failed\n"); 571 return -EIO; 572 } 573 574 if (cc.bits.en != 0) { 575 SPDK_ERRLOG("%s called with CC.EN = 1\n", __func__); 576 return -EINVAL; 577 } 578 579 cc.bits.en = 1; 580 cc.bits.css = 0; 581 cc.bits.shn = 0; 582 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 583 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 584 585 /* Page size is 2 ^ (12 + mps). */ 586 cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12; 587 588 if (ctrlr->cap.bits.css == 0) { 589 SPDK_INFOLOG(SPDK_LOG_NVME, 590 "Drive reports no command sets supported. Assuming NVM is supported.\n"); 591 ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM; 592 } 593 594 if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) { 595 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Requested I/O command set %u but supported mask is 0x%x\n", 596 ctrlr->opts.command_set, ctrlr->cap.bits.css); 597 return -EINVAL; 598 } 599 600 cc.bits.css = ctrlr->opts.command_set; 601 602 switch (ctrlr->opts.arb_mechanism) { 603 case SPDK_NVME_CC_AMS_RR: 604 break; 605 case SPDK_NVME_CC_AMS_WRR: 606 if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) { 607 break; 608 } 609 return -EINVAL; 610 case SPDK_NVME_CC_AMS_VS: 611 if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) { 612 break; 613 } 614 return -EINVAL; 615 default: 616 return -EINVAL; 617 } 618 619 cc.bits.ams = ctrlr->opts.arb_mechanism; 620 621 if (nvme_ctrlr_set_cc(ctrlr, &cc)) { 622 SPDK_ERRLOG("set_cc() failed\n"); 623 return -EIO; 624 } 625 626 return 0; 627 } 628 629 #ifdef DEBUG 630 static const char * 631 nvme_ctrlr_state_string(enum nvme_ctrlr_state state) 632 { 633 switch (state) { 634 case NVME_CTRLR_STATE_INIT_DELAY: 635 return "delay init"; 636 case NVME_CTRLR_STATE_INIT: 637 return "init"; 638 case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1: 639 return "disable and wait for CSTS.RDY = 1"; 640 case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0: 641 return "disable and wait for CSTS.RDY = 0"; 642 case NVME_CTRLR_STATE_ENABLE: 643 return "enable controller by writing CC.EN = 1"; 644 case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1: 645 return "wait for CSTS.RDY = 1"; 646 case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE: 647 return "enable admin queue"; 648 case NVME_CTRLR_STATE_IDENTIFY: 649 return "identify controller"; 650 case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY: 651 return "wait for identify controller"; 652 case NVME_CTRLR_STATE_SET_NUM_QUEUES: 653 return "set number of queues"; 654 case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES: 655 return "wait for set number of queues"; 656 case NVME_CTRLR_STATE_GET_NUM_QUEUES: 657 return "get number of queues"; 658 case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES: 659 return "wait for get number of queues"; 660 case NVME_CTRLR_STATE_CONSTRUCT_NS: 661 return "construct namespaces"; 662 case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS: 663 return "identify active ns"; 664 case NVME_CTRLR_STATE_IDENTIFY_NS: 665 return "identify ns"; 666 case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS: 667 return "wait for identify ns"; 668 case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS: 669 return "identify namespace id descriptors"; 670 case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS: 671 return "wait for identify namespace id descriptors"; 672 case NVME_CTRLR_STATE_CONFIGURE_AER: 673 return "configure AER"; 674 case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER: 675 return "wait for configure aer"; 676 case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES: 677 return "set supported log pages"; 678 case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES: 679 return "set supported features"; 680 case NVME_CTRLR_STATE_SET_DB_BUF_CFG: 681 return "set doorbell buffer config"; 682 case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG: 683 return "wait for doorbell buffer config"; 684 case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT: 685 return "set keep alive timeout"; 686 case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT: 687 return "wait for set keep alive timeout"; 688 case NVME_CTRLR_STATE_SET_HOST_ID: 689 return "set host ID"; 690 case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID: 691 return "wait for set host ID"; 692 case NVME_CTRLR_STATE_READY: 693 return "ready"; 694 case NVME_CTRLR_STATE_ERROR: 695 return "error"; 696 } 697 return "unknown"; 698 }; 699 #endif /* DEBUG */ 700 701 static void 702 nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state, 703 uint64_t timeout_in_ms) 704 { 705 ctrlr->state = state; 706 if (timeout_in_ms == 0) { 707 SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (no timeout)\n", 708 nvme_ctrlr_state_string(ctrlr->state)); 709 ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE; 710 } else { 711 SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (timeout %" PRIu64 " ms)\n", 712 nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms); 713 ctrlr->state_timeout_tsc = spdk_get_ticks() + (timeout_in_ms * spdk_get_ticks_hz()) / 1000; 714 } 715 } 716 717 static void 718 nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr) 719 { 720 if (ctrlr->shadow_doorbell) { 721 spdk_dma_free(ctrlr->shadow_doorbell); 722 ctrlr->shadow_doorbell = NULL; 723 } 724 725 if (ctrlr->eventidx) { 726 spdk_dma_free(ctrlr->eventidx); 727 ctrlr->eventidx = NULL; 728 } 729 } 730 731 static void 732 nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl) 733 { 734 struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg; 735 736 if (spdk_nvme_cpl_is_error(cpl)) { 737 SPDK_WARNLOG("Doorbell buffer config failed\n"); 738 } else { 739 SPDK_INFOLOG(SPDK_LOG_NVME, "NVMe controller: %s doorbell buffer config enabled\n", 740 ctrlr->trid.traddr); 741 } 742 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 743 ctrlr->opts.admin_timeout_ms); 744 } 745 746 static int 747 nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr) 748 { 749 int rc = 0; 750 uint64_t prp1, prp2; 751 752 if (!ctrlr->cdata.oacs.doorbell_buffer_config) { 753 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 754 ctrlr->opts.admin_timeout_ms); 755 return 0; 756 } 757 758 if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) { 759 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 760 ctrlr->opts.admin_timeout_ms); 761 return 0; 762 } 763 764 /* only 1 page size for doorbell buffer */ 765 ctrlr->shadow_doorbell = spdk_dma_zmalloc(ctrlr->page_size, ctrlr->page_size, 766 &prp1); 767 if (ctrlr->shadow_doorbell == NULL) { 768 rc = -ENOMEM; 769 goto error; 770 } 771 772 ctrlr->eventidx = spdk_dma_zmalloc(ctrlr->page_size, ctrlr->page_size, &prp2); 773 if (ctrlr->eventidx == NULL) { 774 rc = -ENOMEM; 775 goto error; 776 } 777 778 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG, 779 ctrlr->opts.admin_timeout_ms); 780 781 rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2, 782 nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr); 783 if (rc != 0) { 784 goto error; 785 } 786 787 return 0; 788 789 error: 790 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 791 nvme_ctrlr_free_doorbell_buffer(ctrlr); 792 return rc; 793 } 794 795 int 796 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr) 797 { 798 int rc = 0; 799 struct spdk_nvme_qpair *qpair; 800 struct nvme_request *req, *tmp; 801 802 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 803 804 if (ctrlr->is_resetting || ctrlr->is_failed) { 805 /* 806 * Controller is already resetting or has failed. Return 807 * immediately since there is no need to kick off another 808 * reset in these cases. 809 */ 810 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 811 return 0; 812 } 813 814 ctrlr->is_resetting = true; 815 816 SPDK_NOTICELOG("resetting controller\n"); 817 818 /* Free all of the queued abort requests */ 819 STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) { 820 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq); 821 nvme_free_request(req); 822 ctrlr->outstanding_aborts--; 823 } 824 825 /* Disable all queues before disabling the controller hardware. */ 826 nvme_qpair_disable(ctrlr->adminq); 827 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) { 828 nvme_qpair_disable(qpair); 829 } 830 831 /* Doorbell buffer config is invalid during reset */ 832 nvme_ctrlr_free_doorbell_buffer(ctrlr); 833 834 /* Set the state back to INIT to cause a full hardware reset. */ 835 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE); 836 837 while (ctrlr->state != NVME_CTRLR_STATE_READY) { 838 if (nvme_ctrlr_process_init(ctrlr) != 0) { 839 SPDK_ERRLOG("%s: controller reinitialization failed\n", __func__); 840 nvme_ctrlr_fail(ctrlr, false); 841 rc = -1; 842 break; 843 } 844 } 845 846 if (!ctrlr->is_failed) { 847 /* Reinitialize qpairs */ 848 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) { 849 if (nvme_transport_ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) { 850 nvme_ctrlr_fail(ctrlr, false); 851 rc = -1; 852 } 853 } 854 } 855 856 ctrlr->is_resetting = false; 857 858 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 859 860 return rc; 861 } 862 863 static void 864 nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl) 865 { 866 struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg; 867 868 if (spdk_nvme_cpl_is_error(cpl)) { 869 SPDK_ERRLOG("nvme_identify_controller failed!\n"); 870 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 871 return; 872 } 873 874 /* 875 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 876 * controller supports. 877 */ 878 ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr); 879 SPDK_DEBUGLOG(SPDK_LOG_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size); 880 if (ctrlr->cdata.mdts > 0) { 881 ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size, 882 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 883 SPDK_DEBUGLOG(SPDK_LOG_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size); 884 } 885 886 SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid); 887 if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) { 888 ctrlr->cntlid = ctrlr->cdata.cntlid; 889 } else { 890 /* 891 * Fabrics controllers should already have CNTLID from the Connect command. 892 * 893 * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data, 894 * trust the one from Connect. 895 */ 896 if (ctrlr->cntlid != ctrlr->cdata.cntlid) { 897 SPDK_DEBUGLOG(SPDK_LOG_NVME, 898 "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n", 899 ctrlr->cdata.cntlid, ctrlr->cntlid); 900 } 901 } 902 903 if (ctrlr->cdata.sgls.supported) { 904 ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED; 905 ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr); 906 } 907 908 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES, 909 ctrlr->opts.admin_timeout_ms); 910 } 911 912 static int 913 nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr) 914 { 915 int rc; 916 917 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY, 918 ctrlr->opts.admin_timeout_ms); 919 920 rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0, 921 &ctrlr->cdata, sizeof(ctrlr->cdata), 922 nvme_ctrlr_identify_done, ctrlr); 923 if (rc != 0) { 924 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 925 return rc; 926 } 927 928 return 0; 929 } 930 931 int 932 nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr) 933 { 934 struct nvme_completion_poll_status status; 935 int rc; 936 uint32_t i; 937 uint32_t num_pages; 938 uint32_t next_nsid = 0; 939 uint32_t *new_ns_list = NULL; 940 941 if (ctrlr->num_ns == 0) { 942 spdk_dma_free(ctrlr->active_ns_list); 943 ctrlr->active_ns_list = NULL; 944 945 return 0; 946 } 947 948 /* 949 * The allocated size must be a multiple of sizeof(struct spdk_nvme_ns_list) 950 */ 951 num_pages = (ctrlr->num_ns * sizeof(new_ns_list[0]) - 1) / sizeof(struct spdk_nvme_ns_list) + 1; 952 new_ns_list = spdk_dma_zmalloc(num_pages * sizeof(struct spdk_nvme_ns_list), ctrlr->page_size, 953 NULL); 954 if (!new_ns_list) { 955 SPDK_ERRLOG("Failed to allocate active_ns_list!\n"); 956 return -ENOMEM; 957 } 958 959 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 1, 0) && !(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 960 /* 961 * Iterate through the pages and fetch each chunk of 1024 namespaces until 962 * there are no more active namespaces 963 */ 964 for (i = 0; i < num_pages; i++) { 965 rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid, 966 &new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list), 967 nvme_completion_poll_cb, &status); 968 if (rc != 0) { 969 goto fail; 970 } 971 if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) { 972 SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n"); 973 rc = -ENXIO; 974 goto fail; 975 } 976 next_nsid = new_ns_list[1024 * i + 1023]; 977 if (next_nsid == 0) { 978 /* 979 * No more active namespaces found, no need to fetch additional chunks 980 */ 981 break; 982 } 983 } 984 985 } else { 986 /* 987 * Controller doesn't support active ns list CNS 0x02 so dummy up 988 * an active ns list 989 */ 990 for (i = 0; i < ctrlr->num_ns; i++) { 991 new_ns_list[i] = i + 1; 992 } 993 } 994 995 /* 996 * Now that that the list is properly setup, we can swap it in to the ctrlr and 997 * free up the previous one. 998 */ 999 spdk_dma_free(ctrlr->active_ns_list); 1000 ctrlr->active_ns_list = new_ns_list; 1001 1002 return 0; 1003 fail: 1004 spdk_dma_free(new_ns_list); 1005 return rc; 1006 } 1007 1008 static void 1009 nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl) 1010 { 1011 struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg; 1012 struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr; 1013 uint32_t nsid; 1014 int rc; 1015 1016 if (spdk_nvme_cpl_is_error(cpl)) { 1017 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1018 return; 1019 } else { 1020 nvme_ns_set_identify_data(ns); 1021 } 1022 1023 /* move on to the next active NS */ 1024 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id); 1025 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid); 1026 if (ns == NULL) { 1027 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS, 1028 ctrlr->opts.admin_timeout_ms); 1029 return; 1030 } 1031 ns->ctrlr = ctrlr; 1032 ns->id = nsid; 1033 1034 rc = nvme_ctrlr_identify_ns_async(ns); 1035 if (rc) { 1036 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1037 } 1038 } 1039 1040 static int 1041 nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns) 1042 { 1043 struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr; 1044 struct spdk_nvme_ns_data *nsdata; 1045 1046 nsdata = &ctrlr->nsdata[ns->id - 1]; 1047 1048 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS, 1049 ctrlr->opts.admin_timeout_ms); 1050 return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id, 1051 nsdata, sizeof(*nsdata), 1052 nvme_ctrlr_identify_ns_async_done, ns); 1053 } 1054 1055 static int 1056 nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr) 1057 { 1058 uint32_t nsid; 1059 struct spdk_nvme_ns *ns; 1060 int rc; 1061 1062 nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); 1063 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid); 1064 if (ns == NULL) { 1065 /* No active NS, move on to the next state */ 1066 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, 1067 ctrlr->opts.admin_timeout_ms); 1068 return 0; 1069 } 1070 1071 ns->ctrlr = ctrlr; 1072 ns->id = nsid; 1073 1074 rc = nvme_ctrlr_identify_ns_async(ns); 1075 if (rc) { 1076 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1077 } 1078 1079 return rc; 1080 } 1081 1082 static void 1083 nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl) 1084 { 1085 struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg; 1086 struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr; 1087 uint32_t nsid; 1088 int rc; 1089 1090 if (spdk_nvme_cpl_is_error(cpl)) { 1091 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, 1092 ctrlr->opts.admin_timeout_ms); 1093 return; 1094 } 1095 1096 /* move on to the next active NS */ 1097 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id); 1098 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid); 1099 if (ns == NULL) { 1100 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, 1101 ctrlr->opts.admin_timeout_ms); 1102 return; 1103 } 1104 1105 rc = nvme_ctrlr_identify_id_desc_async(ns); 1106 if (rc) { 1107 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1108 } 1109 } 1110 1111 static int 1112 nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns) 1113 { 1114 struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr; 1115 1116 memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list)); 1117 1118 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS, 1119 ctrlr->opts.admin_timeout_ms); 1120 return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST, 1121 0, ns->id, ns->id_desc_list, sizeof(ns->id_desc_list), 1122 nvme_ctrlr_identify_id_desc_async_done, ns); 1123 } 1124 1125 static int 1126 nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr) 1127 { 1128 uint32_t nsid; 1129 struct spdk_nvme_ns *ns; 1130 int rc; 1131 1132 if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) || 1133 (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 1134 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n"); 1135 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, 1136 ctrlr->opts.admin_timeout_ms); 1137 return 0; 1138 } 1139 1140 nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); 1141 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid); 1142 if (ns == NULL) { 1143 /* No active NS, move on to the next state */ 1144 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, 1145 ctrlr->opts.admin_timeout_ms); 1146 return 0; 1147 } 1148 1149 rc = nvme_ctrlr_identify_id_desc_async(ns); 1150 if (rc) { 1151 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1152 } 1153 1154 return rc; 1155 } 1156 1157 static void 1158 nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl) 1159 { 1160 struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg; 1161 1162 if (spdk_nvme_cpl_is_error(cpl)) { 1163 SPDK_ERRLOG("Set Features - Number of Queues failed!\n"); 1164 } 1165 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_NUM_QUEUES, 1166 ctrlr->opts.admin_timeout_ms); 1167 } 1168 1169 static int 1170 nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr) 1171 { 1172 int rc; 1173 1174 if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) { 1175 SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n", 1176 ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES); 1177 ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES; 1178 } else if (ctrlr->opts.num_io_queues < 1) { 1179 SPDK_NOTICELOG("Requested num_io_queues 0, increasing to 1\n"); 1180 ctrlr->opts.num_io_queues = 1; 1181 } 1182 1183 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES, 1184 ctrlr->opts.admin_timeout_ms); 1185 1186 rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues, 1187 nvme_ctrlr_set_num_queues_done, ctrlr); 1188 if (rc != 0) { 1189 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1190 return rc; 1191 } 1192 1193 return 0; 1194 } 1195 1196 static void 1197 nvme_ctrlr_get_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl) 1198 { 1199 uint32_t cq_allocated, sq_allocated, min_allocated, i; 1200 struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg; 1201 1202 if (spdk_nvme_cpl_is_error(cpl)) { 1203 SPDK_ERRLOG("Get Features - Number of Queues failed!\n"); 1204 ctrlr->opts.num_io_queues = 0; 1205 } else { 1206 /* 1207 * Data in cdw0 is 0-based. 1208 * Lower 16-bits indicate number of submission queues allocated. 1209 * Upper 16-bits indicate number of completion queues allocated. 1210 */ 1211 sq_allocated = (cpl->cdw0 & 0xFFFF) + 1; 1212 cq_allocated = (cpl->cdw0 >> 16) + 1; 1213 1214 /* 1215 * For 1:1 queue mapping, set number of allocated queues to be minimum of 1216 * submission and completion queues. 1217 */ 1218 min_allocated = spdk_min(sq_allocated, cq_allocated); 1219 1220 /* Set number of queues to be minimum of requested and actually allocated. */ 1221 ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues); 1222 } 1223 1224 ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1); 1225 if (ctrlr->free_io_qids == NULL) { 1226 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1227 return; 1228 } 1229 1230 /* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */ 1231 spdk_bit_array_clear(ctrlr->free_io_qids, 0); 1232 for (i = 1; i <= ctrlr->opts.num_io_queues; i++) { 1233 spdk_bit_array_set(ctrlr->free_io_qids, i); 1234 } 1235 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS, 1236 ctrlr->opts.admin_timeout_ms); 1237 } 1238 1239 static int 1240 nvme_ctrlr_get_num_queues(struct spdk_nvme_ctrlr *ctrlr) 1241 { 1242 int rc; 1243 1244 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES, 1245 ctrlr->opts.admin_timeout_ms); 1246 1247 /* Obtain the number of queues allocated using Get Features. */ 1248 rc = nvme_ctrlr_cmd_get_num_queues(ctrlr, nvme_ctrlr_get_num_queues_done, ctrlr); 1249 if (rc != 0) { 1250 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1251 return rc; 1252 } 1253 1254 return 0; 1255 } 1256 1257 static void 1258 nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl) 1259 { 1260 uint32_t keep_alive_interval_ms; 1261 struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg; 1262 1263 if (spdk_nvme_cpl_is_error(cpl)) { 1264 SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n", 1265 cpl->status.sc, cpl->status.sct); 1266 ctrlr->opts.keep_alive_timeout_ms = 0; 1267 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1268 return; 1269 } 1270 1271 if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) { 1272 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller adjusted keep alive timeout to %u ms\n", 1273 cpl->cdw0); 1274 } 1275 1276 ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0; 1277 1278 keep_alive_interval_ms = ctrlr->opts.keep_alive_timeout_ms / 2; 1279 if (keep_alive_interval_ms == 0) { 1280 keep_alive_interval_ms = 1; 1281 } 1282 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Sending keep alive every %u ms\n", keep_alive_interval_ms); 1283 1284 ctrlr->keep_alive_interval_ticks = (keep_alive_interval_ms * spdk_get_ticks_hz()) / UINT64_C(1000); 1285 1286 /* Schedule the first Keep Alive to be sent as soon as possible. */ 1287 ctrlr->next_keep_alive_tick = spdk_get_ticks(); 1288 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID, 1289 ctrlr->opts.admin_timeout_ms); 1290 } 1291 1292 static int 1293 nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr) 1294 { 1295 int rc; 1296 1297 if (ctrlr->opts.keep_alive_timeout_ms == 0) { 1298 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID, 1299 ctrlr->opts.admin_timeout_ms); 1300 return 0; 1301 } 1302 1303 if (ctrlr->cdata.kas == 0) { 1304 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller KAS is 0 - not enabling Keep Alive\n"); 1305 ctrlr->opts.keep_alive_timeout_ms = 0; 1306 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID, 1307 ctrlr->opts.admin_timeout_ms); 1308 return 0; 1309 } 1310 1311 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT, 1312 ctrlr->opts.admin_timeout_ms); 1313 1314 /* Retrieve actual keep alive timeout, since the controller may have adjusted it. */ 1315 rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0, 1316 nvme_ctrlr_set_keep_alive_timeout_done, ctrlr); 1317 if (rc != 0) { 1318 SPDK_ERRLOG("Keep alive timeout Get Feature failed: %d\n", rc); 1319 ctrlr->opts.keep_alive_timeout_ms = 0; 1320 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1321 return rc; 1322 } 1323 1324 return 0; 1325 } 1326 1327 static void 1328 nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl) 1329 { 1330 struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg; 1331 1332 if (spdk_nvme_cpl_is_error(cpl)) { 1333 /* 1334 * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature 1335 * is optional. 1336 */ 1337 SPDK_WARNLOG("Set Features - Host ID failed: SC 0x%x SCT 0x%x\n", 1338 cpl->status.sc, cpl->status.sct); 1339 } else { 1340 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Set Features - Host ID was successful\n"); 1341 } 1342 1343 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE); 1344 } 1345 1346 static int 1347 nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr) 1348 { 1349 uint8_t *host_id; 1350 uint32_t host_id_size; 1351 int rc; 1352 1353 if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) { 1354 /* 1355 * NVMe-oF sends the host ID during Connect and doesn't allow 1356 * Set Features - Host Identifier after Connect, so we don't need to do anything here. 1357 */ 1358 SPDK_DEBUGLOG(SPDK_LOG_NVME, "NVMe-oF transport - not sending Set Features - Host ID\n"); 1359 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE); 1360 return 0; 1361 } 1362 1363 if (ctrlr->cdata.ctratt.host_id_exhid_supported) { 1364 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 128-bit extended host identifier\n"); 1365 host_id = ctrlr->opts.extended_host_id; 1366 host_id_size = sizeof(ctrlr->opts.extended_host_id); 1367 } else { 1368 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Using 64-bit host identifier\n"); 1369 host_id = ctrlr->opts.host_id; 1370 host_id_size = sizeof(ctrlr->opts.host_id); 1371 } 1372 1373 /* If the user specified an all-zeroes host identifier, don't send the command. */ 1374 if (spdk_mem_all_zero(host_id, host_id_size)) { 1375 SPDK_DEBUGLOG(SPDK_LOG_NVME, 1376 "User did not specify host ID - not sending Set Features - Host ID\n"); 1377 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE); 1378 return 0; 1379 } 1380 1381 SPDK_LOGDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size); 1382 1383 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID, 1384 ctrlr->opts.admin_timeout_ms); 1385 1386 rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr); 1387 if (rc != 0) { 1388 SPDK_ERRLOG("Set Features - Host ID failed: %d\n", rc); 1389 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1390 return rc; 1391 } 1392 1393 return 0; 1394 } 1395 1396 static void 1397 nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr) 1398 { 1399 if (ctrlr->ns) { 1400 uint32_t i, num_ns = ctrlr->num_ns; 1401 1402 for (i = 0; i < num_ns; i++) { 1403 nvme_ns_destruct(&ctrlr->ns[i]); 1404 } 1405 1406 spdk_free(ctrlr->ns); 1407 ctrlr->ns = NULL; 1408 ctrlr->num_ns = 0; 1409 } 1410 1411 if (ctrlr->nsdata) { 1412 spdk_free(ctrlr->nsdata); 1413 ctrlr->nsdata = NULL; 1414 } 1415 1416 spdk_dma_free(ctrlr->active_ns_list); 1417 ctrlr->active_ns_list = NULL; 1418 } 1419 1420 static void 1421 nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr) 1422 { 1423 uint32_t i, nn = ctrlr->cdata.nn; 1424 struct spdk_nvme_ns_data *nsdata; 1425 1426 for (i = 0; i < nn; i++) { 1427 struct spdk_nvme_ns *ns = &ctrlr->ns[i]; 1428 uint32_t nsid = i + 1; 1429 nsdata = &ctrlr->nsdata[nsid - 1]; 1430 1431 if ((nsdata->ncap == 0) && spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) { 1432 if (nvme_ns_construct(ns, nsid, ctrlr) != 0) { 1433 continue; 1434 } 1435 } 1436 1437 if (nsdata->ncap && !spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid)) { 1438 nvme_ns_destruct(ns); 1439 } 1440 } 1441 } 1442 1443 static int 1444 nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr) 1445 { 1446 int rc = 0; 1447 uint32_t nn = ctrlr->cdata.nn; 1448 uint64_t phys_addr = 0; 1449 1450 /* ctrlr->num_ns may be 0 (startup) or a different number of namespaces (reset), 1451 * so check if we need to reallocate. 1452 */ 1453 if (nn != ctrlr->num_ns) { 1454 nvme_ctrlr_destruct_namespaces(ctrlr); 1455 1456 if (nn == 0) { 1457 SPDK_WARNLOG("controller has 0 namespaces\n"); 1458 return 0; 1459 } 1460 1461 ctrlr->ns = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64, 1462 &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 1463 if (ctrlr->ns == NULL) { 1464 rc = -ENOMEM; 1465 goto fail; 1466 } 1467 1468 ctrlr->nsdata = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64, 1469 &phys_addr, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA); 1470 if (ctrlr->nsdata == NULL) { 1471 rc = -ENOMEM; 1472 goto fail; 1473 } 1474 1475 ctrlr->num_ns = nn; 1476 } 1477 1478 return 0; 1479 1480 fail: 1481 nvme_ctrlr_destruct_namespaces(ctrlr); 1482 return rc; 1483 } 1484 1485 static void 1486 nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl) 1487 { 1488 struct nvme_async_event_request *aer = arg; 1489 struct spdk_nvme_ctrlr *ctrlr = aer->ctrlr; 1490 struct spdk_nvme_ctrlr_process *active_proc; 1491 union spdk_nvme_async_event_completion event; 1492 int rc; 1493 1494 if (cpl->status.sct == SPDK_NVME_SCT_GENERIC && 1495 cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) { 1496 /* 1497 * This is simulated when controller is being shut down, to 1498 * effectively abort outstanding asynchronous event requests 1499 * and make sure all memory is freed. Do not repost the 1500 * request in this case. 1501 */ 1502 return; 1503 } 1504 1505 if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC && 1506 cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) { 1507 /* 1508 * SPDK will only send as many AERs as the device says it supports, 1509 * so this status code indicates an out-of-spec device. Do not repost 1510 * the request in this case. 1511 */ 1512 SPDK_ERRLOG("Controller appears out-of-spec for asynchronous event request\n" 1513 "handling. Do not repost this AER.\n"); 1514 return; 1515 } 1516 1517 event.raw = cpl->cdw0; 1518 if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) && 1519 (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) { 1520 rc = nvme_ctrlr_identify_active_ns(ctrlr); 1521 if (rc) { 1522 return; 1523 } 1524 nvme_ctrlr_update_namespaces(ctrlr); 1525 } 1526 1527 active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); 1528 if (active_proc && active_proc->aer_cb_fn) { 1529 active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl); 1530 } 1531 1532 /* If the ctrlr is already shutdown, we should not send aer again */ 1533 if (ctrlr->is_shutdown) { 1534 return; 1535 } 1536 1537 /* 1538 * Repost another asynchronous event request to replace the one 1539 * that just completed. 1540 */ 1541 if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) { 1542 /* 1543 * We can't do anything to recover from a failure here, 1544 * so just print a warning message and leave the AER unsubmitted. 1545 */ 1546 SPDK_ERRLOG("resubmitting AER failed!\n"); 1547 } 1548 } 1549 1550 static int 1551 nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr, 1552 struct nvme_async_event_request *aer) 1553 { 1554 struct nvme_request *req; 1555 1556 aer->ctrlr = ctrlr; 1557 req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer); 1558 aer->req = req; 1559 if (req == NULL) { 1560 return -1; 1561 } 1562 1563 req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1564 return nvme_ctrlr_submit_admin_request(ctrlr, req); 1565 } 1566 1567 static void 1568 nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl) 1569 { 1570 struct nvme_async_event_request *aer; 1571 int rc; 1572 uint32_t i; 1573 struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg; 1574 1575 if (spdk_nvme_cpl_is_error(cpl)) { 1576 SPDK_NOTICELOG("nvme_ctrlr_configure_aer failed!\n"); 1577 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES, 1578 ctrlr->opts.admin_timeout_ms); 1579 return; 1580 } 1581 1582 /* aerl is a zero-based value, so we need to add 1 here. */ 1583 ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1)); 1584 1585 for (i = 0; i < ctrlr->num_aers; i++) { 1586 aer = &ctrlr->aer[i]; 1587 rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 1588 if (rc) { 1589 SPDK_ERRLOG("nvme_ctrlr_construct_and_submit_aer failed!\n"); 1590 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1591 return; 1592 } 1593 } 1594 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES, 1595 ctrlr->opts.admin_timeout_ms); 1596 } 1597 1598 static int 1599 nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr) 1600 { 1601 union spdk_nvme_feat_async_event_configuration config; 1602 int rc; 1603 1604 config.raw = 0; 1605 config.bits.crit_warn.bits.available_spare = 1; 1606 config.bits.crit_warn.bits.temperature = 1; 1607 config.bits.crit_warn.bits.device_reliability = 1; 1608 config.bits.crit_warn.bits.read_only = 1; 1609 config.bits.crit_warn.bits.volatile_memory_backup = 1; 1610 1611 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) { 1612 if (ctrlr->cdata.oaes.ns_attribute_notices) { 1613 config.bits.ns_attr_notice = 1; 1614 } 1615 if (ctrlr->cdata.oaes.fw_activation_notices) { 1616 config.bits.fw_activation_notice = 1; 1617 } 1618 } 1619 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) { 1620 config.bits.telemetry_log_notice = 1; 1621 } 1622 1623 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER, 1624 ctrlr->opts.admin_timeout_ms); 1625 1626 rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config, 1627 nvme_ctrlr_configure_aer_done, 1628 ctrlr); 1629 if (rc != 0) { 1630 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE); 1631 return rc; 1632 } 1633 1634 return 0; 1635 } 1636 1637 struct spdk_nvme_ctrlr_process * 1638 spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid) 1639 { 1640 struct spdk_nvme_ctrlr_process *active_proc; 1641 1642 TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { 1643 if (active_proc->pid == pid) { 1644 return active_proc; 1645 } 1646 } 1647 1648 return NULL; 1649 } 1650 1651 struct spdk_nvme_ctrlr_process * 1652 spdk_nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr) 1653 { 1654 return spdk_nvme_ctrlr_get_process(ctrlr, getpid()); 1655 } 1656 1657 /** 1658 * This function will be called when a process is using the controller. 1659 * 1. For the primary process, it is called when constructing the controller. 1660 * 2. For the secondary process, it is called at probing the controller. 1661 * Note: will check whether the process is already added for the same process. 1662 */ 1663 int 1664 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle) 1665 { 1666 struct spdk_nvme_ctrlr_process *ctrlr_proc; 1667 pid_t pid = getpid(); 1668 1669 /* Check whether the process is already added or not */ 1670 if (spdk_nvme_ctrlr_get_process(ctrlr, pid)) { 1671 return 0; 1672 } 1673 1674 /* Initialize the per process properties for this ctrlr */ 1675 ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process), 1676 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 1677 if (ctrlr_proc == NULL) { 1678 SPDK_ERRLOG("failed to allocate memory to track the process props\n"); 1679 1680 return -1; 1681 } 1682 1683 ctrlr_proc->is_primary = spdk_process_is_primary(); 1684 ctrlr_proc->pid = pid; 1685 STAILQ_INIT(&ctrlr_proc->active_reqs); 1686 ctrlr_proc->devhandle = devhandle; 1687 ctrlr_proc->ref = 0; 1688 TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs); 1689 1690 TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq); 1691 1692 return 0; 1693 } 1694 1695 /** 1696 * This function will be called when the process detaches the controller. 1697 * Note: the ctrlr_lock must be held when calling this function. 1698 */ 1699 static void 1700 nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr, 1701 struct spdk_nvme_ctrlr_process *proc) 1702 { 1703 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1704 1705 assert(STAILQ_EMPTY(&proc->active_reqs)); 1706 1707 TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) { 1708 spdk_nvme_ctrlr_free_io_qpair(qpair); 1709 } 1710 1711 TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq); 1712 1713 if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) { 1714 spdk_pci_device_detach(proc->devhandle); 1715 } 1716 1717 spdk_dma_free(proc); 1718 } 1719 1720 /** 1721 * This function will be called when the process exited unexpectedly 1722 * in order to free any incomplete nvme request, allocated IO qpairs 1723 * and allocated memory. 1724 * Note: the ctrlr_lock must be held when calling this function. 1725 */ 1726 static void 1727 nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc) 1728 { 1729 struct nvme_request *req, *tmp_req; 1730 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1731 1732 STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) { 1733 STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq); 1734 1735 assert(req->pid == proc->pid); 1736 1737 nvme_free_request(req); 1738 } 1739 1740 TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) { 1741 TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq); 1742 1743 /* 1744 * The process may have been killed while some qpairs were in their 1745 * completion context. Clear that flag here to allow these IO 1746 * qpairs to be deleted. 1747 */ 1748 qpair->in_completion_context = 0; 1749 1750 qpair->no_deletion_notification_needed = 1; 1751 1752 spdk_nvme_ctrlr_free_io_qpair(qpair); 1753 } 1754 1755 spdk_dma_free(proc); 1756 } 1757 1758 /** 1759 * This function will be called when destructing the controller. 1760 * 1. There is no more admin request on this controller. 1761 * 2. Clean up any left resource allocation when its associated process is gone. 1762 */ 1763 void 1764 nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr) 1765 { 1766 struct spdk_nvme_ctrlr_process *active_proc, *tmp; 1767 1768 /* Free all the processes' properties and make sure no pending admin IOs */ 1769 TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) { 1770 TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq); 1771 1772 assert(STAILQ_EMPTY(&active_proc->active_reqs)); 1773 1774 spdk_free(active_proc); 1775 } 1776 } 1777 1778 /** 1779 * This function will be called when any other process attaches or 1780 * detaches the controller in order to cleanup those unexpectedly 1781 * terminated processes. 1782 * Note: the ctrlr_lock must be held when calling this function. 1783 */ 1784 static int 1785 nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr) 1786 { 1787 struct spdk_nvme_ctrlr_process *active_proc, *tmp; 1788 int active_proc_count = 0; 1789 1790 TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) { 1791 if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) { 1792 SPDK_ERRLOG("process %d terminated unexpected\n", active_proc->pid); 1793 1794 TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq); 1795 1796 nvme_ctrlr_cleanup_process(active_proc); 1797 } else { 1798 active_proc_count++; 1799 } 1800 } 1801 1802 return active_proc_count; 1803 } 1804 1805 void 1806 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr) 1807 { 1808 struct spdk_nvme_ctrlr_process *active_proc; 1809 1810 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1811 1812 nvme_ctrlr_remove_inactive_proc(ctrlr); 1813 1814 active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); 1815 if (active_proc) { 1816 active_proc->ref++; 1817 } 1818 1819 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1820 } 1821 1822 void 1823 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr) 1824 { 1825 struct spdk_nvme_ctrlr_process *active_proc; 1826 int proc_count; 1827 1828 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1829 1830 proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr); 1831 1832 active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); 1833 if (active_proc) { 1834 active_proc->ref--; 1835 assert(active_proc->ref >= 0); 1836 1837 /* 1838 * The last active process will be removed at the end of 1839 * the destruction of the controller. 1840 */ 1841 if (active_proc->ref == 0 && proc_count != 1) { 1842 nvme_ctrlr_remove_process(ctrlr, active_proc); 1843 } 1844 } 1845 1846 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1847 } 1848 1849 int 1850 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr) 1851 { 1852 struct spdk_nvme_ctrlr_process *active_proc; 1853 int ref = 0; 1854 1855 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1856 1857 nvme_ctrlr_remove_inactive_proc(ctrlr); 1858 1859 TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { 1860 ref += active_proc->ref; 1861 } 1862 1863 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1864 1865 return ref; 1866 } 1867 1868 /** 1869 * Get the PCI device handle which is only visible to its associated process. 1870 */ 1871 struct spdk_pci_device * 1872 nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr) 1873 { 1874 struct spdk_nvme_ctrlr_process *active_proc; 1875 struct spdk_pci_device *devhandle = NULL; 1876 1877 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1878 1879 active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); 1880 if (active_proc) { 1881 devhandle = active_proc->devhandle; 1882 } 1883 1884 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1885 1886 return devhandle; 1887 } 1888 1889 static void 1890 nvme_ctrlr_enable_admin_queue(struct spdk_nvme_ctrlr *ctrlr) 1891 { 1892 nvme_transport_qpair_reset(ctrlr->adminq); 1893 nvme_qpair_enable(ctrlr->adminq); 1894 } 1895 1896 /** 1897 * This function will be called repeatedly during initialization until the controller is ready. 1898 */ 1899 int 1900 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr) 1901 { 1902 union spdk_nvme_cc_register cc; 1903 union spdk_nvme_csts_register csts; 1904 uint32_t ready_timeout_in_ms; 1905 int rc = 0; 1906 1907 /* 1908 * May need to avoid accessing any register on the target controller 1909 * for a while. Return early without touching the FSM. 1910 * Check sleep_timeout_tsc > 0 for unit test. 1911 */ 1912 if ((ctrlr->sleep_timeout_tsc > 0) && 1913 (spdk_get_ticks() <= ctrlr->sleep_timeout_tsc)) { 1914 return 0; 1915 } 1916 ctrlr->sleep_timeout_tsc = 0; 1917 1918 if (nvme_ctrlr_get_cc(ctrlr, &cc) || 1919 nvme_ctrlr_get_csts(ctrlr, &csts)) { 1920 if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) { 1921 /* While a device is resetting, it may be unable to service MMIO reads 1922 * temporarily. Allow for this case. 1923 */ 1924 SPDK_ERRLOG("Get registers failed while waiting for CSTS.RDY == 0\n"); 1925 goto init_timeout; 1926 } 1927 SPDK_ERRLOG("Failed to read CC and CSTS in state %d\n", ctrlr->state); 1928 nvme_ctrlr_fail(ctrlr, false); 1929 return -EIO; 1930 } 1931 1932 ready_timeout_in_ms = 500 * ctrlr->cap.bits.to; 1933 1934 /* 1935 * Check if the current initialization step is done or has timed out. 1936 */ 1937 switch (ctrlr->state) { 1938 case NVME_CTRLR_STATE_INIT_DELAY: 1939 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms); 1940 /* 1941 * Controller may need some delay before it's enabled. 1942 * 1943 * This is a workaround for an issue where the PCIe-attached NVMe controller 1944 * is not ready after VFIO reset. We delay the initialization rather than the 1945 * enabling itself, because this is required only for the very first enabling 1946 * - directly after a VFIO reset. 1947 * 1948 * TODO: Figure out what is actually going wrong. 1949 */ 1950 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Adding 2 second delay before initializing the controller\n"); 1951 ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2000 * spdk_get_ticks_hz() / 1000); 1952 break; 1953 1954 case NVME_CTRLR_STATE_INIT: 1955 /* Begin the hardware initialization by making sure the controller is disabled. */ 1956 if (cc.bits.en) { 1957 SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1\n"); 1958 /* 1959 * Controller is currently enabled. We need to disable it to cause a reset. 1960 * 1961 * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready. 1962 * Wait for the ready bit to be 1 before disabling the controller. 1963 */ 1964 if (csts.bits.rdy == 0) { 1965 SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n"); 1966 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1, ready_timeout_in_ms); 1967 return 0; 1968 } 1969 1970 /* CC.EN = 1 && CSTS.RDY == 1, so we can immediately disable the controller. */ 1971 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n"); 1972 cc.bits.en = 0; 1973 if (nvme_ctrlr_set_cc(ctrlr, &cc)) { 1974 SPDK_ERRLOG("set_cc() failed\n"); 1975 nvme_ctrlr_fail(ctrlr, false); 1976 return -EIO; 1977 } 1978 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms); 1979 1980 /* 1981 * Wait 2.5 seconds before accessing PCI registers. 1982 * Not using sleep() to avoid blocking other controller's initialization. 1983 */ 1984 if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) { 1985 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Applying quirk: delay 2.5 seconds before reading registers\n"); 1986 ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000); 1987 } 1988 return 0; 1989 } else { 1990 if (csts.bits.rdy == 1) { 1991 SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 1 - waiting for shutdown to complete\n"); 1992 } 1993 1994 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms); 1995 return 0; 1996 } 1997 break; 1998 1999 case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1: 2000 if (csts.bits.rdy == 1) { 2001 SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - disabling controller\n"); 2002 /* CC.EN = 1 && CSTS.RDY = 1, so we can set CC.EN = 0 now. */ 2003 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 0\n"); 2004 cc.bits.en = 0; 2005 if (nvme_ctrlr_set_cc(ctrlr, &cc)) { 2006 SPDK_ERRLOG("set_cc() failed\n"); 2007 nvme_ctrlr_fail(ctrlr, false); 2008 return -EIO; 2009 } 2010 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms); 2011 return 0; 2012 } 2013 break; 2014 2015 case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0: 2016 if (csts.bits.rdy == 0) { 2017 SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 0 && CSTS.RDY = 0\n"); 2018 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms); 2019 /* 2020 * Delay 100us before setting CC.EN = 1. Some NVMe SSDs miss CC.EN getting 2021 * set to 1 if it is too soon after CSTS.RDY is reported as 0. 2022 */ 2023 spdk_delay_us(100); 2024 return 0; 2025 } 2026 break; 2027 2028 case NVME_CTRLR_STATE_ENABLE: 2029 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Setting CC.EN = 1\n"); 2030 rc = nvme_ctrlr_enable(ctrlr); 2031 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms); 2032 return rc; 2033 2034 case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1: 2035 if (csts.bits.rdy == 1) { 2036 SPDK_DEBUGLOG(SPDK_LOG_NVME, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n"); 2037 /* 2038 * The controller has been enabled. 2039 * Perform the rest of initialization serially. 2040 */ 2041 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE, 2042 ctrlr->opts.admin_timeout_ms); 2043 return 0; 2044 } 2045 break; 2046 2047 case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE: 2048 nvme_ctrlr_enable_admin_queue(ctrlr); 2049 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY, 2050 ctrlr->opts.admin_timeout_ms); 2051 break; 2052 2053 case NVME_CTRLR_STATE_IDENTIFY: 2054 rc = nvme_ctrlr_identify(ctrlr); 2055 break; 2056 2057 case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY: 2058 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2059 break; 2060 2061 case NVME_CTRLR_STATE_SET_NUM_QUEUES: 2062 rc = nvme_ctrlr_set_num_queues(ctrlr); 2063 break; 2064 2065 case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES: 2066 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2067 break; 2068 2069 case NVME_CTRLR_STATE_GET_NUM_QUEUES: 2070 rc = nvme_ctrlr_get_num_queues(ctrlr); 2071 break; 2072 2073 case NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES: 2074 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2075 break; 2076 2077 case NVME_CTRLR_STATE_CONSTRUCT_NS: 2078 rc = nvme_ctrlr_construct_namespaces(ctrlr); 2079 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS, 2080 ctrlr->opts.admin_timeout_ms); 2081 break; 2082 2083 case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS: 2084 rc = nvme_ctrlr_identify_active_ns(ctrlr); 2085 if (rc < 0) { 2086 nvme_ctrlr_destruct_namespaces(ctrlr); 2087 } 2088 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS, 2089 ctrlr->opts.admin_timeout_ms); 2090 break; 2091 2092 case NVME_CTRLR_STATE_IDENTIFY_NS: 2093 rc = nvme_ctrlr_identify_namespaces(ctrlr); 2094 break; 2095 2096 case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS: 2097 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2098 break; 2099 2100 case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS: 2101 rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr); 2102 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, 2103 ctrlr->opts.admin_timeout_ms); 2104 break; 2105 2106 case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS: 2107 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2108 break; 2109 2110 case NVME_CTRLR_STATE_CONFIGURE_AER: 2111 rc = nvme_ctrlr_configure_aer(ctrlr); 2112 break; 2113 2114 case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER: 2115 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2116 break; 2117 2118 case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES: 2119 rc = nvme_ctrlr_set_supported_log_pages(ctrlr); 2120 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES, 2121 ctrlr->opts.admin_timeout_ms); 2122 break; 2123 2124 case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES: 2125 nvme_ctrlr_set_supported_features(ctrlr); 2126 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG, 2127 ctrlr->opts.admin_timeout_ms); 2128 break; 2129 2130 case NVME_CTRLR_STATE_SET_DB_BUF_CFG: 2131 rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr); 2132 break; 2133 2134 case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG: 2135 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2136 break; 2137 2138 case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT: 2139 rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr); 2140 break; 2141 2142 case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT: 2143 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2144 break; 2145 2146 case NVME_CTRLR_STATE_SET_HOST_ID: 2147 rc = nvme_ctrlr_set_host_id(ctrlr); 2148 break; 2149 2150 case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID: 2151 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2152 break; 2153 2154 case NVME_CTRLR_STATE_READY: 2155 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Ctrlr already in ready state\n"); 2156 return 0; 2157 2158 case NVME_CTRLR_STATE_ERROR: 2159 SPDK_ERRLOG("Ctrlr %s is in error state\n", ctrlr->trid.traddr); 2160 return -1; 2161 2162 default: 2163 assert(0); 2164 nvme_ctrlr_fail(ctrlr, false); 2165 return -1; 2166 } 2167 2168 init_timeout: 2169 if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE && 2170 spdk_get_ticks() > ctrlr->state_timeout_tsc) { 2171 SPDK_ERRLOG("Initialization timed out in state %d\n", ctrlr->state); 2172 nvme_ctrlr_fail(ctrlr, false); 2173 return -1; 2174 } 2175 2176 return rc; 2177 } 2178 2179 int 2180 nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx) 2181 { 2182 pthread_mutexattr_t attr; 2183 int rc = 0; 2184 2185 if (pthread_mutexattr_init(&attr)) { 2186 return -1; 2187 } 2188 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) || 2189 #ifndef __FreeBSD__ 2190 pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) || 2191 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) || 2192 #endif 2193 pthread_mutex_init(mtx, &attr)) { 2194 rc = -1; 2195 } 2196 pthread_mutexattr_destroy(&attr); 2197 return rc; 2198 } 2199 2200 int 2201 nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr) 2202 { 2203 int rc; 2204 2205 if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) { 2206 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE); 2207 } else { 2208 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE); 2209 } 2210 2211 ctrlr->flags = 0; 2212 ctrlr->free_io_qids = NULL; 2213 ctrlr->is_resetting = false; 2214 ctrlr->is_failed = false; 2215 ctrlr->is_shutdown = false; 2216 2217 TAILQ_INIT(&ctrlr->active_io_qpairs); 2218 STAILQ_INIT(&ctrlr->queued_aborts); 2219 ctrlr->outstanding_aborts = 0; 2220 2221 rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock); 2222 if (rc != 0) { 2223 return rc; 2224 } 2225 2226 TAILQ_INIT(&ctrlr->active_procs); 2227 2228 return rc; 2229 } 2230 2231 /* This function should be called once at ctrlr initialization to set up constant properties. */ 2232 void 2233 nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap, 2234 const union spdk_nvme_vs_register *vs) 2235 { 2236 ctrlr->cap = *cap; 2237 ctrlr->vs = *vs; 2238 2239 ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin); 2240 2241 /* For now, always select page_size == min_page_size. */ 2242 ctrlr->page_size = ctrlr->min_page_size; 2243 2244 ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES); 2245 ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES); 2246 ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u); 2247 2248 ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size); 2249 } 2250 2251 void 2252 nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr) 2253 { 2254 pthread_mutex_destroy(&ctrlr->ctrlr_lock); 2255 } 2256 2257 void 2258 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) 2259 { 2260 struct spdk_nvme_qpair *qpair, *tmp; 2261 2262 SPDK_DEBUGLOG(SPDK_LOG_NVME, "Prepare to destruct SSD: %s\n", ctrlr->trid.traddr); 2263 TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) { 2264 spdk_nvme_ctrlr_free_io_qpair(qpair); 2265 } 2266 2267 nvme_ctrlr_free_doorbell_buffer(ctrlr); 2268 2269 nvme_ctrlr_shutdown(ctrlr); 2270 2271 nvme_ctrlr_destruct_namespaces(ctrlr); 2272 2273 spdk_bit_array_free(&ctrlr->free_io_qids); 2274 2275 nvme_transport_ctrlr_destruct(ctrlr); 2276 } 2277 2278 int 2279 nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, 2280 struct nvme_request *req) 2281 { 2282 return nvme_qpair_submit_request(ctrlr->adminq, req); 2283 } 2284 2285 static void 2286 nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl) 2287 { 2288 /* Do nothing */ 2289 } 2290 2291 /* 2292 * Check if we need to send a Keep Alive command. 2293 * Caller must hold ctrlr->ctrlr_lock. 2294 */ 2295 static void 2296 nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr) 2297 { 2298 uint64_t now; 2299 struct nvme_request *req; 2300 struct spdk_nvme_cmd *cmd; 2301 int rc; 2302 2303 now = spdk_get_ticks(); 2304 if (now < ctrlr->next_keep_alive_tick) { 2305 return; 2306 } 2307 2308 req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL); 2309 if (req == NULL) { 2310 return; 2311 } 2312 2313 cmd = &req->cmd; 2314 cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE; 2315 2316 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 2317 if (rc != 0) { 2318 SPDK_ERRLOG("Submitting Keep Alive failed\n"); 2319 } 2320 2321 ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks; 2322 } 2323 2324 int32_t 2325 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 2326 { 2327 int32_t num_completions; 2328 2329 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 2330 if (ctrlr->keep_alive_interval_ticks) { 2331 nvme_ctrlr_keep_alive(ctrlr); 2332 } 2333 num_completions = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 2334 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 2335 2336 return num_completions; 2337 } 2338 2339 const struct spdk_nvme_ctrlr_data * 2340 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 2341 { 2342 return &ctrlr->cdata; 2343 } 2344 2345 union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 2346 { 2347 union spdk_nvme_csts_register csts; 2348 2349 if (nvme_ctrlr_get_csts(ctrlr, &csts)) { 2350 csts.raw = 0xFFFFFFFFu; 2351 } 2352 return csts; 2353 } 2354 2355 union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr) 2356 { 2357 return ctrlr->cap; 2358 } 2359 2360 union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 2361 { 2362 return ctrlr->vs; 2363 } 2364 2365 union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr) 2366 { 2367 union spdk_nvme_cmbsz_register cmbsz; 2368 2369 if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) { 2370 cmbsz.raw = 0; 2371 } 2372 2373 return cmbsz; 2374 } 2375 2376 uint32_t 2377 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 2378 { 2379 return ctrlr->num_ns; 2380 } 2381 2382 static int32_t 2383 spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 2384 { 2385 int32_t result = -1; 2386 2387 if (ctrlr->active_ns_list == NULL || nsid == 0 || nsid > ctrlr->num_ns) { 2388 return result; 2389 } 2390 2391 int32_t lower = 0; 2392 int32_t upper = ctrlr->num_ns - 1; 2393 int32_t mid; 2394 2395 while (lower <= upper) { 2396 mid = lower + (upper - lower) / 2; 2397 if (ctrlr->active_ns_list[mid] == nsid) { 2398 result = mid; 2399 break; 2400 } else { 2401 if (ctrlr->active_ns_list[mid] != 0 && ctrlr->active_ns_list[mid] < nsid) { 2402 lower = mid + 1; 2403 } else { 2404 upper = mid - 1; 2405 } 2406 2407 } 2408 } 2409 2410 return result; 2411 } 2412 2413 bool 2414 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 2415 { 2416 return spdk_nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1; 2417 } 2418 2419 uint32_t 2420 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 2421 { 2422 return ctrlr->active_ns_list ? ctrlr->active_ns_list[0] : 0; 2423 } 2424 2425 uint32_t 2426 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid) 2427 { 2428 int32_t nsid_idx = spdk_nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid); 2429 if (ctrlr->active_ns_list && nsid_idx >= 0 && (uint32_t)nsid_idx < ctrlr->num_ns - 1) { 2430 return ctrlr->active_ns_list[nsid_idx + 1]; 2431 } 2432 return 0; 2433 } 2434 2435 struct spdk_nvme_ns * 2436 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 2437 { 2438 if (nsid < 1 || nsid > ctrlr->num_ns) { 2439 return NULL; 2440 } 2441 2442 return &ctrlr->ns[nsid - 1]; 2443 } 2444 2445 struct spdk_pci_device * 2446 spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr) 2447 { 2448 if (ctrlr == NULL) { 2449 return NULL; 2450 } 2451 2452 if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) { 2453 return NULL; 2454 } 2455 2456 return nvme_ctrlr_proc_get_devhandle(ctrlr); 2457 } 2458 2459 uint32_t 2460 spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr) 2461 { 2462 return ctrlr->max_xfer_size; 2463 } 2464 2465 void 2466 spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr, 2467 spdk_nvme_aer_cb aer_cb_fn, 2468 void *aer_cb_arg) 2469 { 2470 struct spdk_nvme_ctrlr_process *active_proc; 2471 2472 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 2473 2474 active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); 2475 if (active_proc) { 2476 active_proc->aer_cb_fn = aer_cb_fn; 2477 active_proc->aer_cb_arg = aer_cb_arg; 2478 } 2479 2480 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 2481 } 2482 2483 void 2484 spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr, 2485 uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg) 2486 { 2487 struct spdk_nvme_ctrlr_process *active_proc; 2488 2489 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 2490 2491 active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); 2492 if (active_proc) { 2493 active_proc->timeout_ticks = timeout_us * spdk_get_ticks_hz() / 1000000ULL; 2494 active_proc->timeout_cb_fn = cb_fn; 2495 active_proc->timeout_cb_arg = cb_arg; 2496 } 2497 2498 ctrlr->timeout_enabled = true; 2499 2500 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 2501 } 2502 2503 bool 2504 spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page) 2505 { 2506 /* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */ 2507 SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch"); 2508 return ctrlr->log_page_supported[log_page]; 2509 } 2510 2511 bool 2512 spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code) 2513 { 2514 /* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */ 2515 SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch"); 2516 return ctrlr->feature_supported[feature_code]; 2517 } 2518 2519 int 2520 spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 2521 struct spdk_nvme_ctrlr_list *payload) 2522 { 2523 struct nvme_completion_poll_status status; 2524 int res; 2525 struct spdk_nvme_ns *ns; 2526 2527 res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload, 2528 nvme_completion_poll_cb, &status); 2529 if (res) { 2530 return res; 2531 } 2532 if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) { 2533 SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n"); 2534 return -ENXIO; 2535 } 2536 2537 res = nvme_ctrlr_identify_active_ns(ctrlr); 2538 if (res) { 2539 return res; 2540 } 2541 2542 ns = &ctrlr->ns[nsid - 1]; 2543 return nvme_ns_construct(ns, nsid, ctrlr); 2544 } 2545 2546 int 2547 spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 2548 struct spdk_nvme_ctrlr_list *payload) 2549 { 2550 struct nvme_completion_poll_status status; 2551 int res; 2552 struct spdk_nvme_ns *ns; 2553 2554 res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload, 2555 nvme_completion_poll_cb, &status); 2556 if (res) { 2557 return res; 2558 } 2559 if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) { 2560 SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n"); 2561 return -ENXIO; 2562 } 2563 2564 res = nvme_ctrlr_identify_active_ns(ctrlr); 2565 if (res) { 2566 return res; 2567 } 2568 2569 ns = &ctrlr->ns[nsid - 1]; 2570 /* Inactive NS */ 2571 nvme_ns_destruct(ns); 2572 2573 return 0; 2574 } 2575 2576 uint32_t 2577 spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload) 2578 { 2579 struct nvme_completion_poll_status status; 2580 int res; 2581 uint32_t nsid; 2582 struct spdk_nvme_ns *ns; 2583 2584 res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status); 2585 if (res) { 2586 return 0; 2587 } 2588 if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) { 2589 SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n"); 2590 return 0; 2591 } 2592 2593 nsid = status.cpl.cdw0; 2594 ns = &ctrlr->ns[nsid - 1]; 2595 /* Inactive NS */ 2596 res = nvme_ns_construct(ns, nsid, ctrlr); 2597 if (res) { 2598 return 0; 2599 } 2600 2601 /* Return the namespace ID that was created */ 2602 return nsid; 2603 } 2604 2605 int 2606 spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 2607 { 2608 struct nvme_completion_poll_status status; 2609 int res; 2610 struct spdk_nvme_ns *ns; 2611 2612 res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status); 2613 if (res) { 2614 return res; 2615 } 2616 if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) { 2617 SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n"); 2618 return -ENXIO; 2619 } 2620 2621 res = nvme_ctrlr_identify_active_ns(ctrlr); 2622 if (res) { 2623 return res; 2624 } 2625 2626 ns = &ctrlr->ns[nsid - 1]; 2627 nvme_ns_destruct(ns); 2628 2629 return 0; 2630 } 2631 2632 int 2633 spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 2634 struct spdk_nvme_format *format) 2635 { 2636 struct nvme_completion_poll_status status; 2637 int res; 2638 2639 res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb, 2640 &status); 2641 if (res) { 2642 return res; 2643 } 2644 if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) { 2645 SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n"); 2646 return -ENXIO; 2647 } 2648 2649 return spdk_nvme_ctrlr_reset(ctrlr); 2650 } 2651 2652 int 2653 spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size, 2654 int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status) 2655 { 2656 struct spdk_nvme_fw_commit fw_commit; 2657 struct nvme_completion_poll_status status; 2658 int res; 2659 unsigned int size_remaining; 2660 unsigned int offset; 2661 unsigned int transfer; 2662 void *p; 2663 2664 if (!completion_status) { 2665 return -EINVAL; 2666 } 2667 memset(completion_status, 0, sizeof(struct spdk_nvme_status)); 2668 if (size % 4) { 2669 SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid size!\n"); 2670 return -1; 2671 } 2672 2673 /* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG 2674 * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG 2675 */ 2676 if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) && 2677 (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) { 2678 SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid command!\n"); 2679 return -1; 2680 } 2681 2682 /* Firmware download */ 2683 size_remaining = size; 2684 offset = 0; 2685 p = payload; 2686 2687 while (size_remaining > 0) { 2688 transfer = spdk_min(size_remaining, ctrlr->min_page_size); 2689 2690 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p, 2691 nvme_completion_poll_cb, 2692 &status); 2693 if (res) { 2694 return res; 2695 } 2696 2697 if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) { 2698 SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n"); 2699 return -ENXIO; 2700 } 2701 p += transfer; 2702 offset += transfer; 2703 size_remaining -= transfer; 2704 } 2705 2706 /* Firmware commit */ 2707 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 2708 fw_commit.fs = slot; 2709 fw_commit.ca = commit_action; 2710 2711 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb, 2712 &status); 2713 if (res) { 2714 return res; 2715 } 2716 2717 res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock); 2718 2719 memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status)); 2720 2721 if (res) { 2722 if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC || 2723 status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) { 2724 if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC && 2725 status.cpl.status.sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) { 2726 SPDK_NOTICELOG("firmware activation requires conventional reset to be performed. !\n"); 2727 } else { 2728 SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n"); 2729 } 2730 return -ENXIO; 2731 } 2732 } 2733 2734 return spdk_nvme_ctrlr_reset(ctrlr); 2735 } 2736 2737 void * 2738 spdk_nvme_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size) 2739 { 2740 void *buf; 2741 2742 if (size == 0) { 2743 return NULL; 2744 } 2745 2746 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 2747 buf = nvme_transport_ctrlr_alloc_cmb_io_buffer(ctrlr, size); 2748 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 2749 2750 return buf; 2751 } 2752 2753 void 2754 spdk_nvme_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size) 2755 { 2756 if (buf && size) { 2757 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 2758 nvme_transport_ctrlr_free_cmb_io_buffer(ctrlr, buf, size); 2759 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 2760 } 2761 } 2762 2763 bool 2764 spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr) 2765 { 2766 assert(ctrlr); 2767 2768 return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN, 2769 strlen(SPDK_NVMF_DISCOVERY_NQN)); 2770 } 2771 2772 int 2773 spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp, 2774 uint16_t spsp, uint8_t nssf, void *payload, size_t size) 2775 { 2776 struct nvme_completion_poll_status status; 2777 int res; 2778 2779 res = nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size, 2780 nvme_completion_poll_cb, &status); 2781 if (res) { 2782 return res; 2783 } 2784 if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) { 2785 SPDK_ERRLOG("spdk_nvme_ctrlr_security_receive failed!\n"); 2786 return -ENXIO; 2787 } 2788 2789 return 0; 2790 } 2791 2792 int 2793 spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp, 2794 uint16_t spsp, uint8_t nssf, void *payload, size_t size) 2795 { 2796 struct nvme_completion_poll_status status; 2797 int res; 2798 2799 res = nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size, nvme_completion_poll_cb, 2800 &status); 2801 if (res) { 2802 return res; 2803 } 2804 if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) { 2805 SPDK_ERRLOG("spdk_nvme_ctrlr_security_send failed!\n"); 2806 return -ENXIO; 2807 } 2808 2809 return 0; 2810 } 2811