1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvme_internal.h" 37 38 #include "spdk/env.h" 39 40 static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr, 41 struct nvme_async_event_request *aer); 42 43 static int 44 nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc) 45 { 46 return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw), 47 &cc->raw); 48 } 49 50 static int 51 nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts) 52 { 53 return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw), 54 &csts->raw); 55 } 56 57 int 58 nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap) 59 { 60 return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw), 61 &cap->raw); 62 } 63 64 static int 65 nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs) 66 { 67 return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw), 68 &vs->raw); 69 } 70 71 static int 72 nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc) 73 { 74 return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw), 75 cc->raw); 76 } 77 78 void 79 spdk_nvme_ctrlr_opts_set_defaults(struct spdk_nvme_ctrlr_opts *opts) 80 { 81 opts->num_io_queues = DEFAULT_MAX_IO_QUEUES; 82 opts->use_cmb_sqs = true; 83 opts->arb_mechanism = SPDK_NVME_CC_AMS_RR; 84 opts->keep_alive_timeout_ms = 10 * 1000; 85 opts->io_queue_size = DEFAULT_IO_QUEUE_SIZE; 86 strncpy(opts->hostnqn, DEFAULT_HOSTNQN, sizeof(opts->hostnqn)); 87 opts->io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS; 88 } 89 90 /** 91 * This function will be called when the process allocates the IO qpair. 92 * Note: the ctrlr_lock must be held when calling this function. 93 */ 94 static void 95 nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair) 96 { 97 struct spdk_nvme_ctrlr_process *active_proc; 98 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 99 pid_t pid = getpid(); 100 101 TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { 102 if (active_proc->pid == pid) { 103 TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, 104 per_process_tailq); 105 break; 106 } 107 } 108 } 109 110 /** 111 * This function will be called when the process frees the IO qpair. 112 * Note: the ctrlr_lock must be held when calling this function. 113 */ 114 static void 115 nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair) 116 { 117 struct spdk_nvme_ctrlr_process *active_proc; 118 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 119 struct spdk_nvme_qpair *active_qpair, *tmp_qpair; 120 pid_t pid = getpid(); 121 bool proc_found = false; 122 123 TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { 124 if (active_proc->pid == pid) { 125 proc_found = true; 126 break; 127 } 128 } 129 130 if (proc_found == false) { 131 return; 132 } 133 134 TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs, 135 per_process_tailq, tmp_qpair) { 136 if (active_qpair == qpair) { 137 TAILQ_REMOVE(&active_proc->allocated_io_qpairs, 138 active_qpair, per_process_tailq); 139 140 break; 141 } 142 } 143 } 144 145 void 146 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 147 struct spdk_nvme_io_qpair_opts *opts, 148 size_t opts_size) 149 { 150 if (!ctrlr || !opts) { 151 return; 152 } 153 154 memset(opts, 0, opts_size); 155 156 #define FIELD_OK(field) \ 157 offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size 158 159 if (FIELD_OK(qprio)) { 160 opts->qprio = SPDK_NVME_QPRIO_URGENT; 161 } 162 163 if (FIELD_OK(io_queue_size)) { 164 opts->io_queue_size = ctrlr->opts.io_queue_size; 165 } 166 167 if (FIELD_OK(io_queue_requests)) { 168 opts->io_queue_requests = ctrlr->opts.io_queue_requests; 169 } 170 171 #undef FIELD_OK 172 } 173 174 struct spdk_nvme_qpair * 175 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 176 const struct spdk_nvme_io_qpair_opts *user_opts, 177 size_t opts_size) 178 { 179 uint32_t qid; 180 struct spdk_nvme_qpair *qpair; 181 union spdk_nvme_cc_register cc; 182 struct spdk_nvme_io_qpair_opts opts; 183 184 if (!ctrlr) { 185 return NULL; 186 } 187 188 /* 189 * Get the default options, then overwrite them with the user-provided options 190 * up to opts_size. 191 * 192 * This allows for extensions of the opts structure without breaking 193 * ABI compatibility. 194 */ 195 spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts)); 196 if (user_opts) { 197 memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size)); 198 } 199 200 if (nvme_ctrlr_get_cc(ctrlr, &cc)) { 201 SPDK_ERRLOG("get_cc failed\n"); 202 return NULL; 203 } 204 205 /* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */ 206 if ((opts.qprio & 3) != opts.qprio) { 207 return NULL; 208 } 209 210 /* 211 * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the 212 * default round robin arbitration method. 213 */ 214 if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts.qprio != SPDK_NVME_QPRIO_URGENT)) { 215 SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n"); 216 return NULL; 217 } 218 219 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 220 221 /* 222 * Get the first available I/O queue ID. 223 */ 224 qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1); 225 if (qid > ctrlr->opts.num_io_queues) { 226 SPDK_ERRLOG("No free I/O queue IDs\n"); 227 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 228 return NULL; 229 } 230 231 qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, &opts); 232 if (qpair == NULL) { 233 SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n"); 234 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 235 return NULL; 236 } 237 spdk_bit_array_clear(ctrlr->free_io_qids, qid); 238 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 239 240 nvme_ctrlr_proc_add_io_qpair(qpair); 241 242 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 243 244 if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) { 245 spdk_delay_us(100); 246 } 247 248 return qpair; 249 } 250 251 int 252 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 253 { 254 struct spdk_nvme_ctrlr *ctrlr; 255 void *req_buf; 256 257 if (qpair == NULL) { 258 return 0; 259 } 260 261 ctrlr = qpair->ctrlr; 262 263 if (qpair->in_completion_context) { 264 /* 265 * There are many cases where it is convenient to delete an io qpair in the context 266 * of that qpair's completion routine. To handle this properly, set a flag here 267 * so that the completion routine will perform an actual delete after the context 268 * unwinds. 269 */ 270 qpair->delete_after_completion_context = 1; 271 return 0; 272 } 273 274 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 275 276 nvme_ctrlr_proc_remove_io_qpair(qpair); 277 278 TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq); 279 spdk_bit_array_set(ctrlr->free_io_qids, qpair->id); 280 281 req_buf = qpair->req_buf; 282 283 if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) { 284 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 285 return -1; 286 } 287 288 spdk_dma_free(req_buf); 289 290 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 291 return 0; 292 } 293 294 static void 295 nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr, 296 struct spdk_nvme_intel_log_page_directory *log_page_directory) 297 { 298 if (log_page_directory == NULL) { 299 return; 300 } 301 302 if (ctrlr->cdata.vid != SPDK_PCI_VID_INTEL) { 303 return; 304 } 305 306 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true; 307 308 if (log_page_directory->read_latency_log_len || 309 (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) { 310 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true; 311 } 312 if (log_page_directory->write_latency_log_len || 313 (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) { 314 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true; 315 } 316 if (log_page_directory->temperature_statistics_log_len) { 317 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true; 318 } 319 if (log_page_directory->smart_log_len) { 320 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true; 321 } 322 if (log_page_directory->marketing_description_log_len) { 323 ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true; 324 } 325 } 326 327 static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr) 328 { 329 uint64_t phys_addr = 0; 330 struct nvme_completion_poll_status status; 331 struct spdk_nvme_intel_log_page_directory *log_page_directory; 332 333 log_page_directory = spdk_dma_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory), 334 64, &phys_addr); 335 if (log_page_directory == NULL) { 336 SPDK_ERRLOG("could not allocate log_page_directory\n"); 337 return -ENXIO; 338 } 339 340 status.done = false; 341 spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG, 342 log_page_directory, sizeof(struct spdk_nvme_intel_log_page_directory), 0, 343 nvme_completion_poll_cb, 344 &status); 345 while (status.done == false) { 346 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 347 } 348 if (spdk_nvme_cpl_is_error(&status.cpl)) { 349 spdk_dma_free(log_page_directory); 350 SPDK_ERRLOG("nvme_ctrlr_cmd_get_log_page failed!\n"); 351 return -ENXIO; 352 } 353 354 nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory); 355 spdk_dma_free(log_page_directory); 356 return 0; 357 } 358 359 static void 360 nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr) 361 { 362 memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported)); 363 /* Mandatory pages */ 364 ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true; 365 ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true; 366 ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true; 367 if (ctrlr->cdata.lpa.celp) { 368 ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true; 369 } 370 if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) { 371 nvme_ctrlr_set_intel_support_log_pages(ctrlr); 372 } 373 } 374 375 static void 376 nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr) 377 { 378 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true; 379 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true; 380 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true; 381 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true; 382 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true; 383 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true; 384 ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true; 385 } 386 387 static void 388 nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr) 389 { 390 memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported)); 391 /* Mandatory features */ 392 ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true; 393 ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true; 394 ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true; 395 ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true; 396 ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true; 397 ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true; 398 ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true; 399 ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true; 400 ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true; 401 /* Optional features */ 402 if (ctrlr->cdata.vwc.present) { 403 ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true; 404 } 405 if (ctrlr->cdata.apsta.supported) { 406 ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true; 407 } 408 if (ctrlr->cdata.hmpre) { 409 ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true; 410 } 411 if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) { 412 nvme_ctrlr_set_intel_supported_features(ctrlr); 413 } 414 } 415 416 void 417 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove) 418 { 419 /* 420 * Set the flag here and leave the work failure of qpairs to 421 * spdk_nvme_qpair_process_completions(). 422 */ 423 if (hot_remove) { 424 ctrlr->is_removed = true; 425 } 426 ctrlr->is_failed = true; 427 } 428 429 static void 430 nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr) 431 { 432 union spdk_nvme_cc_register cc; 433 union spdk_nvme_csts_register csts; 434 int ms_waited = 0; 435 436 if (ctrlr->is_removed) { 437 return; 438 } 439 440 if (nvme_ctrlr_get_cc(ctrlr, &cc)) { 441 SPDK_ERRLOG("get_cc() failed\n"); 442 return; 443 } 444 445 cc.bits.shn = SPDK_NVME_SHN_NORMAL; 446 447 if (nvme_ctrlr_set_cc(ctrlr, &cc)) { 448 SPDK_ERRLOG("set_cc() failed\n"); 449 return; 450 } 451 452 /* 453 * The NVMe spec does not define a timeout period 454 * for shutdown notification, so we just pick 455 * 5 seconds as a reasonable amount of time to 456 * wait before proceeding. 457 */ 458 do { 459 if (nvme_ctrlr_get_csts(ctrlr, &csts)) { 460 SPDK_ERRLOG("get_csts() failed\n"); 461 return; 462 } 463 464 if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) { 465 SPDK_TRACELOG(SPDK_TRACE_NVME, "shutdown complete\n"); 466 return; 467 } 468 469 nvme_delay(1000); 470 ms_waited++; 471 } while (ms_waited < 5000); 472 473 SPDK_ERRLOG("did not shutdown within 5 seconds\n"); 474 } 475 476 static int 477 nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr) 478 { 479 union spdk_nvme_cc_register cc; 480 int rc; 481 482 rc = nvme_transport_ctrlr_enable(ctrlr); 483 if (rc != 0) { 484 SPDK_ERRLOG("transport ctrlr_enable failed\n"); 485 return rc; 486 } 487 488 if (nvme_ctrlr_get_cc(ctrlr, &cc)) { 489 SPDK_ERRLOG("get_cc() failed\n"); 490 return -EIO; 491 } 492 493 if (cc.bits.en != 0) { 494 SPDK_ERRLOG("%s called with CC.EN = 1\n", __func__); 495 return -EINVAL; 496 } 497 498 cc.bits.en = 1; 499 cc.bits.css = 0; 500 cc.bits.shn = 0; 501 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 502 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 503 504 /* Page size is 2 ^ (12 + mps). */ 505 cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12; 506 507 switch (ctrlr->opts.arb_mechanism) { 508 case SPDK_NVME_CC_AMS_RR: 509 break; 510 case SPDK_NVME_CC_AMS_WRR: 511 if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) { 512 break; 513 } 514 return -EINVAL; 515 case SPDK_NVME_CC_AMS_VS: 516 if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) { 517 break; 518 } 519 return -EINVAL; 520 default: 521 return -EINVAL; 522 } 523 524 cc.bits.ams = ctrlr->opts.arb_mechanism; 525 526 if (nvme_ctrlr_set_cc(ctrlr, &cc)) { 527 SPDK_ERRLOG("set_cc() failed\n"); 528 return -EIO; 529 } 530 531 return 0; 532 } 533 534 #ifdef DEBUG 535 static const char * 536 nvme_ctrlr_state_string(enum nvme_ctrlr_state state) 537 { 538 switch (state) { 539 case NVME_CTRLR_STATE_INIT: 540 return "init"; 541 case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1: 542 return "disable and wait for CSTS.RDY = 1"; 543 case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0: 544 return "disable and wait for CSTS.RDY = 0"; 545 case NVME_CTRLR_STATE_ENABLE: 546 return "enable controller by writing CC.EN = 1"; 547 case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1: 548 return "wait for CSTS.RDY = 1"; 549 case NVME_CTRLR_STATE_READY: 550 return "ready"; 551 } 552 return "unknown"; 553 }; 554 #endif /* DEBUG */ 555 556 static void 557 nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state, 558 uint64_t timeout_in_ms) 559 { 560 ctrlr->state = state; 561 if (timeout_in_ms == NVME_TIMEOUT_INFINITE) { 562 SPDK_TRACELOG(SPDK_TRACE_NVME, "setting state to %s (no timeout)\n", 563 nvme_ctrlr_state_string(ctrlr->state)); 564 ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE; 565 } else { 566 SPDK_TRACELOG(SPDK_TRACE_NVME, "setting state to %s (timeout %" PRIu64 " ms)\n", 567 nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms); 568 ctrlr->state_timeout_tsc = spdk_get_ticks() + (timeout_in_ms * spdk_get_ticks_hz()) / 1000; 569 } 570 } 571 572 int 573 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr) 574 { 575 int rc = 0; 576 struct spdk_nvme_qpair *qpair; 577 struct nvme_request *req, *tmp; 578 579 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 580 581 if (ctrlr->is_resetting || ctrlr->is_failed) { 582 /* 583 * Controller is already resetting or has failed. Return 584 * immediately since there is no need to kick off another 585 * reset in these cases. 586 */ 587 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 588 return 0; 589 } 590 591 ctrlr->is_resetting = true; 592 593 SPDK_NOTICELOG("resetting controller\n"); 594 595 /* Free all of the queued abort requests */ 596 STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) { 597 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq); 598 nvme_free_request(req); 599 ctrlr->outstanding_aborts--; 600 } 601 602 /* Disable all queues before disabling the controller hardware. */ 603 nvme_qpair_disable(ctrlr->adminq); 604 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) { 605 nvme_qpair_disable(qpair); 606 } 607 608 /* Set the state back to INIT to cause a full hardware reset. */ 609 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE); 610 611 while (ctrlr->state != NVME_CTRLR_STATE_READY) { 612 if (nvme_ctrlr_process_init(ctrlr) != 0) { 613 SPDK_ERRLOG("%s: controller reinitialization failed\n", __func__); 614 nvme_ctrlr_fail(ctrlr, false); 615 rc = -1; 616 break; 617 } 618 } 619 620 if (!ctrlr->is_failed) { 621 /* Reinitialize qpairs */ 622 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) { 623 if (nvme_transport_ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) { 624 nvme_ctrlr_fail(ctrlr, false); 625 rc = -1; 626 } 627 } 628 } 629 630 ctrlr->is_resetting = false; 631 632 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 633 634 return rc; 635 } 636 637 static int 638 nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr) 639 { 640 struct nvme_completion_poll_status status; 641 int rc; 642 643 status.done = false; 644 rc = nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 645 nvme_completion_poll_cb, &status); 646 if (rc != 0) { 647 return rc; 648 } 649 650 while (status.done == false) { 651 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 652 } 653 if (spdk_nvme_cpl_is_error(&status.cpl)) { 654 SPDK_ERRLOG("nvme_identify_controller failed!\n"); 655 return -ENXIO; 656 } 657 658 /* 659 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 660 * controller supports. 661 */ 662 ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr); 663 SPDK_TRACELOG(SPDK_TRACE_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size); 664 if (ctrlr->cdata.mdts > 0) { 665 ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size, 666 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 667 SPDK_TRACELOG(SPDK_TRACE_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size); 668 } 669 670 return 0; 671 } 672 673 static int 674 nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr) 675 { 676 struct nvme_completion_poll_status status; 677 int cq_allocated, sq_allocated; 678 int rc; 679 uint32_t i; 680 681 status.done = false; 682 683 if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) { 684 SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n", 685 ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES); 686 ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES; 687 } else if (ctrlr->opts.num_io_queues < 1) { 688 SPDK_NOTICELOG("Requested num_io_queues 0, increasing to 1\n"); 689 ctrlr->opts.num_io_queues = 1; 690 } 691 692 rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues, 693 nvme_completion_poll_cb, &status); 694 if (rc != 0) { 695 return rc; 696 } 697 698 while (status.done == false) { 699 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 700 } 701 if (spdk_nvme_cpl_is_error(&status.cpl)) { 702 SPDK_ERRLOG("nvme_set_num_queues failed!\n"); 703 return -ENXIO; 704 } 705 706 /* 707 * Data in cdw0 is 0-based. 708 * Lower 16-bits indicate number of submission queues allocated. 709 * Upper 16-bits indicate number of completion queues allocated. 710 */ 711 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 712 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 713 714 ctrlr->opts.num_io_queues = spdk_min(sq_allocated, cq_allocated); 715 716 ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1); 717 if (ctrlr->free_io_qids == NULL) { 718 return -ENOMEM; 719 } 720 721 /* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */ 722 spdk_bit_array_clear(ctrlr->free_io_qids, 0); 723 for (i = 1; i <= ctrlr->opts.num_io_queues; i++) { 724 spdk_bit_array_set(ctrlr->free_io_qids, i); 725 } 726 727 return 0; 728 } 729 730 static int 731 nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr) 732 { 733 struct nvme_completion_poll_status status; 734 uint32_t keep_alive_interval_ms; 735 int rc; 736 737 if (ctrlr->opts.keep_alive_timeout_ms == 0) { 738 return 0; 739 } 740 741 if (ctrlr->cdata.kas == 0) { 742 SPDK_TRACELOG(SPDK_TRACE_NVME, "Controller KAS is 0 - not enabling Keep Alive\n"); 743 ctrlr->opts.keep_alive_timeout_ms = 0; 744 return 0; 745 } 746 747 /* Retrieve actual keep alive timeout, since the controller may have adjusted it. */ 748 status.done = false; 749 rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0, 750 nvme_completion_poll_cb, &status); 751 if (rc != 0) { 752 SPDK_ERRLOG("Keep alive timeout Get Feature failed: %d\n", rc); 753 ctrlr->opts.keep_alive_timeout_ms = 0; 754 return rc; 755 } 756 757 while (status.done == false) { 758 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 759 } 760 if (spdk_nvme_cpl_is_error(&status.cpl)) { 761 SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n", 762 status.cpl.status.sc, status.cpl.status.sct); 763 ctrlr->opts.keep_alive_timeout_ms = 0; 764 return -ENXIO; 765 } 766 767 if (ctrlr->opts.keep_alive_timeout_ms != status.cpl.cdw0) { 768 SPDK_TRACELOG(SPDK_TRACE_NVME, "Controller adjusted keep alive timeout to %u ms\n", 769 status.cpl.cdw0); 770 } 771 772 ctrlr->opts.keep_alive_timeout_ms = status.cpl.cdw0; 773 774 keep_alive_interval_ms = ctrlr->opts.keep_alive_timeout_ms / 2; 775 if (keep_alive_interval_ms == 0) { 776 keep_alive_interval_ms = 1; 777 } 778 SPDK_TRACELOG(SPDK_TRACE_NVME, "Sending keep alive every %u ms\n", keep_alive_interval_ms); 779 780 ctrlr->keep_alive_interval_ticks = (keep_alive_interval_ms * spdk_get_ticks_hz()) / UINT64_C(1000); 781 782 /* Schedule the first Keep Alive to be sent as soon as possible. */ 783 ctrlr->next_keep_alive_tick = spdk_get_ticks(); 784 785 return 0; 786 } 787 788 static void 789 nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr) 790 { 791 if (ctrlr->ns) { 792 uint32_t i, num_ns = ctrlr->num_ns; 793 794 for (i = 0; i < num_ns; i++) { 795 nvme_ns_destruct(&ctrlr->ns[i]); 796 } 797 798 spdk_dma_free(ctrlr->ns); 799 ctrlr->ns = NULL; 800 ctrlr->num_ns = 0; 801 } 802 803 if (ctrlr->nsdata) { 804 spdk_dma_free(ctrlr->nsdata); 805 ctrlr->nsdata = NULL; 806 } 807 } 808 809 static int 810 nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr) 811 { 812 uint32_t i, nn = ctrlr->cdata.nn; 813 uint64_t phys_addr = 0; 814 815 if (nn == 0) { 816 SPDK_ERRLOG("controller has 0 namespaces\n"); 817 return -1; 818 } 819 820 /* ctrlr->num_ns may be 0 (startup) or a different number of namespaces (reset), 821 * so check if we need to reallocate. 822 */ 823 if (nn != ctrlr->num_ns) { 824 nvme_ctrlr_destruct_namespaces(ctrlr); 825 826 ctrlr->ns = spdk_dma_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64, 827 &phys_addr); 828 if (ctrlr->ns == NULL) { 829 goto fail; 830 } 831 832 ctrlr->nsdata = spdk_dma_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64, 833 &phys_addr); 834 if (ctrlr->nsdata == NULL) { 835 goto fail; 836 } 837 838 ctrlr->num_ns = nn; 839 } 840 841 for (i = 0; i < nn; i++) { 842 struct spdk_nvme_ns *ns = &ctrlr->ns[i]; 843 uint32_t nsid = i + 1; 844 845 if (nvme_ns_construct(ns, nsid, ctrlr) != 0) { 846 goto fail; 847 } 848 } 849 850 return 0; 851 852 fail: 853 nvme_ctrlr_destruct_namespaces(ctrlr); 854 return -1; 855 } 856 857 static void 858 nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl) 859 { 860 struct nvme_async_event_request *aer = arg; 861 struct spdk_nvme_ctrlr *ctrlr = aer->ctrlr; 862 863 if (cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) { 864 /* 865 * This is simulated when controller is being shut down, to 866 * effectively abort outstanding asynchronous event requests 867 * and make sure all memory is freed. Do not repost the 868 * request in this case. 869 */ 870 return; 871 } 872 873 if (ctrlr->aer_cb_fn != NULL) { 874 ctrlr->aer_cb_fn(ctrlr->aer_cb_arg, cpl); 875 } 876 877 /* 878 * Repost another asynchronous event request to replace the one 879 * that just completed. 880 */ 881 if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) { 882 /* 883 * We can't do anything to recover from a failure here, 884 * so just print a warning message and leave the AER unsubmitted. 885 */ 886 SPDK_ERRLOG("resubmitting AER failed!\n"); 887 } 888 } 889 890 static int 891 nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr, 892 struct nvme_async_event_request *aer) 893 { 894 struct nvme_request *req; 895 896 aer->ctrlr = ctrlr; 897 req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer); 898 aer->req = req; 899 if (req == NULL) { 900 return -1; 901 } 902 903 req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 904 return nvme_ctrlr_submit_admin_request(ctrlr, req); 905 } 906 907 static int 908 nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr) 909 { 910 union spdk_nvme_critical_warning_state state; 911 struct nvme_async_event_request *aer; 912 uint32_t i; 913 struct nvme_completion_poll_status status; 914 int rc; 915 916 status.done = false; 917 918 state.raw = 0xFF; 919 state.bits.reserved = 0; 920 rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, nvme_completion_poll_cb, &status); 921 if (rc != 0) { 922 return rc; 923 } 924 925 while (status.done == false) { 926 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 927 } 928 if (spdk_nvme_cpl_is_error(&status.cpl)) { 929 SPDK_ERRLOG("nvme_ctrlr_cmd_set_async_event_config failed!\n"); 930 return 0; 931 } 932 933 /* aerl is a zero-based value, so we need to add 1 here. */ 934 ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1)); 935 936 for (i = 0; i < ctrlr->num_aers; i++) { 937 aer = &ctrlr->aer[i]; 938 if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) { 939 SPDK_ERRLOG("nvme_ctrlr_construct_and_submit_aer failed!\n"); 940 return -1; 941 } 942 } 943 944 return 0; 945 } 946 947 /** 948 * This function will be called when a process is using the controller. 949 * 1. For the primary process, it is called when constructing the controller. 950 * 2. For the secondary process, it is called at probing the controller. 951 * Note: will check whether the process is already added for the same process. 952 */ 953 int 954 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle) 955 { 956 struct spdk_nvme_ctrlr_process *ctrlr_proc, *active_proc; 957 pid_t pid = getpid(); 958 959 /* Check whether the process is already added or not */ 960 TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { 961 if (active_proc->pid == pid) { 962 return 0; 963 } 964 } 965 966 /* Initialize the per process properties for this ctrlr */ 967 ctrlr_proc = spdk_dma_zmalloc(sizeof(struct spdk_nvme_ctrlr_process), 64, NULL); 968 if (ctrlr_proc == NULL) { 969 SPDK_ERRLOG("failed to allocate memory to track the process props\n"); 970 971 return -1; 972 } 973 974 ctrlr_proc->is_primary = spdk_process_is_primary(); 975 ctrlr_proc->pid = pid; 976 STAILQ_INIT(&ctrlr_proc->active_reqs); 977 ctrlr_proc->devhandle = devhandle; 978 ctrlr_proc->ref = 0; 979 TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs); 980 981 TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq); 982 983 return 0; 984 } 985 986 /** 987 * This function will be called when the process detaches the controller. 988 * Note: the ctrlr_lock must be held when calling this function. 989 */ 990 static void 991 nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr, 992 struct spdk_nvme_ctrlr_process *proc) 993 { 994 struct spdk_nvme_qpair *qpair, *tmp_qpair; 995 996 assert(STAILQ_EMPTY(&proc->active_reqs)); 997 998 TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) { 999 spdk_nvme_ctrlr_free_io_qpair(qpair); 1000 } 1001 1002 TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq); 1003 1004 spdk_dma_free(proc); 1005 } 1006 1007 /** 1008 * This function will be called when the process exited unexpectedly 1009 * in order to free any incomplete nvme request, allocated IO qpairs 1010 * and allocated memory. 1011 * Note: the ctrlr_lock must be held when calling this function. 1012 */ 1013 static void 1014 nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc) 1015 { 1016 struct nvme_request *req, *tmp_req; 1017 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1018 1019 STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) { 1020 STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq); 1021 1022 assert(req->pid == proc->pid); 1023 1024 nvme_free_request(req); 1025 } 1026 1027 TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) { 1028 TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq); 1029 1030 /* 1031 * The process may have been killed while some qpairs were in their 1032 * completion context. Clear that flag here to allow these IO 1033 * qpairs to be deleted. 1034 */ 1035 qpair->in_completion_context = 0; 1036 1037 qpair->no_deletion_notification_needed = 1; 1038 1039 spdk_nvme_ctrlr_free_io_qpair(qpair); 1040 } 1041 1042 spdk_dma_free(proc); 1043 } 1044 1045 /** 1046 * This function will be called when destructing the controller. 1047 * 1. There is no more admin request on this controller. 1048 * 2. Clean up any left resource allocation when its associated process is gone. 1049 */ 1050 void 1051 nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr) 1052 { 1053 struct spdk_nvme_ctrlr_process *active_proc, *tmp; 1054 1055 /* Free all the processes' properties and make sure no pending admin IOs */ 1056 TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) { 1057 TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq); 1058 1059 assert(STAILQ_EMPTY(&active_proc->active_reqs)); 1060 1061 spdk_dma_free(active_proc); 1062 } 1063 } 1064 1065 /** 1066 * This function will be called when any other process attaches or 1067 * detaches the controller in order to cleanup those unexpectedly 1068 * terminated processes. 1069 * Note: the ctrlr_lock must be held when calling this function. 1070 */ 1071 static int 1072 nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr) 1073 { 1074 struct spdk_nvme_ctrlr_process *active_proc, *tmp; 1075 int active_proc_count = 0; 1076 1077 TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) { 1078 if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) { 1079 SPDK_ERRLOG("process %d terminated unexpected\n", active_proc->pid); 1080 1081 TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq); 1082 1083 nvme_ctrlr_cleanup_process(active_proc); 1084 } else { 1085 active_proc_count++; 1086 } 1087 } 1088 1089 return active_proc_count; 1090 } 1091 1092 void 1093 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr) 1094 { 1095 struct spdk_nvme_ctrlr_process *active_proc; 1096 pid_t pid = getpid(); 1097 1098 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1099 1100 nvme_ctrlr_remove_inactive_proc(ctrlr); 1101 1102 TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { 1103 if (active_proc->pid == pid) { 1104 active_proc->ref++; 1105 break; 1106 } 1107 } 1108 1109 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1110 } 1111 1112 void 1113 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr) 1114 { 1115 struct spdk_nvme_ctrlr_process *active_proc, *tmp; 1116 pid_t pid = getpid(); 1117 int proc_count; 1118 1119 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1120 1121 proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr); 1122 1123 TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) { 1124 if (active_proc->pid == pid) { 1125 active_proc->ref--; 1126 assert(active_proc->ref >= 0); 1127 1128 /* 1129 * The last active process will be removed at the end of 1130 * the destruction of the controller. 1131 */ 1132 if (active_proc->ref == 0 && proc_count != 1) { 1133 nvme_ctrlr_remove_process(ctrlr, active_proc); 1134 } 1135 1136 break; 1137 } 1138 } 1139 1140 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1141 } 1142 1143 int 1144 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr) 1145 { 1146 struct spdk_nvme_ctrlr_process *active_proc; 1147 int ref = 0; 1148 1149 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1150 1151 nvme_ctrlr_remove_inactive_proc(ctrlr); 1152 1153 TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { 1154 ref += active_proc->ref; 1155 } 1156 1157 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1158 1159 return ref; 1160 } 1161 1162 /** 1163 * Get the PCI device handle which is only visible to its associated process. 1164 */ 1165 struct spdk_pci_device * 1166 nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr) 1167 { 1168 struct spdk_nvme_ctrlr_process *active_proc; 1169 pid_t pid = getpid(); 1170 struct spdk_pci_device *devhandle = NULL; 1171 1172 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1173 1174 TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { 1175 if (active_proc->pid == pid) { 1176 devhandle = active_proc->devhandle; 1177 break; 1178 } 1179 } 1180 1181 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1182 1183 return devhandle; 1184 } 1185 1186 /** 1187 * This function will be called repeatedly during initialization until the controller is ready. 1188 */ 1189 int 1190 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr) 1191 { 1192 union spdk_nvme_cc_register cc; 1193 union spdk_nvme_csts_register csts; 1194 uint32_t ready_timeout_in_ms; 1195 int rc; 1196 1197 /* 1198 * May need to avoid accessing any register on the target controller 1199 * for a while. Return early without touching the FSM. 1200 * Check sleep_timeout_tsc > 0 for unit test. 1201 */ 1202 if ((ctrlr->sleep_timeout_tsc > 0) && 1203 (spdk_get_ticks() <= ctrlr->sleep_timeout_tsc)) { 1204 return 0; 1205 } 1206 ctrlr->sleep_timeout_tsc = 0; 1207 1208 if (nvme_ctrlr_get_cc(ctrlr, &cc) || 1209 nvme_ctrlr_get_csts(ctrlr, &csts)) { 1210 if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) { 1211 /* While a device is resetting, it may be unable to service MMIO reads 1212 * temporarily. Allow for this case. 1213 */ 1214 SPDK_TRACELOG(SPDK_TRACE_NVME, "Get registers failed while waiting for CSTS.RDY == 0\n"); 1215 goto init_timeout; 1216 } 1217 SPDK_ERRLOG("Failed to read CC and CSTS in state %d\n", ctrlr->state); 1218 nvme_ctrlr_fail(ctrlr, false); 1219 return -EIO; 1220 } 1221 1222 ready_timeout_in_ms = 500 * ctrlr->cap.bits.to; 1223 1224 /* 1225 * Check if the current initialization step is done or has timed out. 1226 */ 1227 switch (ctrlr->state) { 1228 case NVME_CTRLR_STATE_INIT: 1229 /* Begin the hardware initialization by making sure the controller is disabled. */ 1230 if (cc.bits.en) { 1231 SPDK_TRACELOG(SPDK_TRACE_NVME, "CC.EN = 1\n"); 1232 /* 1233 * Controller is currently enabled. We need to disable it to cause a reset. 1234 * 1235 * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready. 1236 * Wait for the ready bit to be 1 before disabling the controller. 1237 */ 1238 if (csts.bits.rdy == 0) { 1239 SPDK_TRACELOG(SPDK_TRACE_NVME, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n"); 1240 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1, ready_timeout_in_ms); 1241 return 0; 1242 } 1243 1244 /* CC.EN = 1 && CSTS.RDY == 1, so we can immediately disable the controller. */ 1245 SPDK_TRACELOG(SPDK_TRACE_NVME, "Setting CC.EN = 0\n"); 1246 cc.bits.en = 0; 1247 if (nvme_ctrlr_set_cc(ctrlr, &cc)) { 1248 SPDK_ERRLOG("set_cc() failed\n"); 1249 nvme_ctrlr_fail(ctrlr, false); 1250 return -EIO; 1251 } 1252 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms); 1253 1254 /* 1255 * Wait 2 secsonds before accessing PCI registers. 1256 * Not using sleep() to avoid blocking other controller's initialization. 1257 */ 1258 if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) { 1259 SPDK_TRACELOG(SPDK_TRACE_NVME, "Applying quirk: delay 2 seconds before reading registers\n"); 1260 ctrlr->sleep_timeout_tsc = spdk_get_ticks() + 2 * spdk_get_ticks_hz(); 1261 } 1262 return 0; 1263 } else { 1264 if (csts.bits.rdy == 1) { 1265 SPDK_TRACELOG(SPDK_TRACE_NVME, "CC.EN = 0 && CSTS.RDY = 1 - waiting for shutdown to complete\n"); 1266 } 1267 1268 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms); 1269 return 0; 1270 } 1271 break; 1272 1273 case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1: 1274 if (csts.bits.rdy == 1) { 1275 SPDK_TRACELOG(SPDK_TRACE_NVME, "CC.EN = 1 && CSTS.RDY = 1 - disabling controller\n"); 1276 /* CC.EN = 1 && CSTS.RDY = 1, so we can set CC.EN = 0 now. */ 1277 SPDK_TRACELOG(SPDK_TRACE_NVME, "Setting CC.EN = 0\n"); 1278 cc.bits.en = 0; 1279 if (nvme_ctrlr_set_cc(ctrlr, &cc)) { 1280 SPDK_ERRLOG("set_cc() failed\n"); 1281 nvme_ctrlr_fail(ctrlr, false); 1282 return -EIO; 1283 } 1284 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms); 1285 return 0; 1286 } 1287 break; 1288 1289 case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0: 1290 if (csts.bits.rdy == 0) { 1291 SPDK_TRACELOG(SPDK_TRACE_NVME, "CC.EN = 0 && CSTS.RDY = 0\n"); 1292 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms); 1293 /* 1294 * Delay 100us before setting CC.EN = 1. Some NVMe SSDs miss CC.EN getting 1295 * set to 1 if it is too soon after CSTS.RDY is reported as 0. 1296 */ 1297 spdk_delay_us(100); 1298 return 0; 1299 } 1300 break; 1301 1302 case NVME_CTRLR_STATE_ENABLE: 1303 SPDK_TRACELOG(SPDK_TRACE_NVME, "Setting CC.EN = 1\n"); 1304 rc = nvme_ctrlr_enable(ctrlr); 1305 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms); 1306 return rc; 1307 1308 case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1: 1309 if (csts.bits.rdy == 1) { 1310 SPDK_TRACELOG(SPDK_TRACE_NVME, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n"); 1311 /* 1312 * The controller has been enabled. 1313 * Perform the rest of initialization in nvme_ctrlr_start() serially. 1314 */ 1315 rc = nvme_ctrlr_start(ctrlr); 1316 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE); 1317 return rc; 1318 } 1319 break; 1320 1321 default: 1322 assert(0); 1323 nvme_ctrlr_fail(ctrlr, false); 1324 return -1; 1325 } 1326 1327 init_timeout: 1328 if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE && 1329 spdk_get_ticks() > ctrlr->state_timeout_tsc) { 1330 SPDK_ERRLOG("Initialization timed out in state %d\n", ctrlr->state); 1331 nvme_ctrlr_fail(ctrlr, false); 1332 return -1; 1333 } 1334 1335 return 0; 1336 } 1337 1338 int 1339 nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr) 1340 { 1341 nvme_transport_qpair_reset(ctrlr->adminq); 1342 1343 nvme_qpair_enable(ctrlr->adminq); 1344 1345 if (nvme_ctrlr_identify(ctrlr) != 0) { 1346 return -1; 1347 } 1348 1349 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 1350 return -1; 1351 } 1352 1353 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 1354 return -1; 1355 } 1356 1357 if (nvme_ctrlr_configure_aer(ctrlr) != 0) { 1358 return -1; 1359 } 1360 1361 nvme_ctrlr_set_supported_log_pages(ctrlr); 1362 nvme_ctrlr_set_supported_features(ctrlr); 1363 1364 if (ctrlr->cdata.sgls.supported) { 1365 ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED; 1366 ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr); 1367 } 1368 1369 if (nvme_ctrlr_set_keep_alive_timeout(ctrlr) != 0) { 1370 SPDK_ERRLOG("Setting keep alive timeout failed\n"); 1371 return -1; 1372 } 1373 1374 return 0; 1375 } 1376 1377 int 1378 nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx) 1379 { 1380 pthread_mutexattr_t attr; 1381 int rc = 0; 1382 1383 if (pthread_mutexattr_init(&attr)) { 1384 return -1; 1385 } 1386 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) || 1387 #ifndef __FreeBSD__ 1388 pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) || 1389 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) || 1390 #endif 1391 pthread_mutex_init(mtx, &attr)) { 1392 rc = -1; 1393 } 1394 pthread_mutexattr_destroy(&attr); 1395 return rc; 1396 } 1397 1398 int 1399 nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr) 1400 { 1401 int rc; 1402 1403 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE); 1404 ctrlr->flags = 0; 1405 ctrlr->free_io_qids = NULL; 1406 ctrlr->is_resetting = false; 1407 ctrlr->is_failed = false; 1408 1409 TAILQ_INIT(&ctrlr->active_io_qpairs); 1410 STAILQ_INIT(&ctrlr->queued_aborts); 1411 ctrlr->outstanding_aborts = 0; 1412 1413 rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock); 1414 if (rc != 0) { 1415 return rc; 1416 } 1417 1418 TAILQ_INIT(&ctrlr->active_procs); 1419 ctrlr->timeout_cb_fn = NULL; 1420 ctrlr->timeout_cb_arg = NULL; 1421 ctrlr->timeout_ticks = 0; 1422 1423 return rc; 1424 } 1425 1426 /* This function should be called once at ctrlr initialization to set up constant properties. */ 1427 void 1428 nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap) 1429 { 1430 uint32_t max_io_queue_size = nvme_transport_ctrlr_get_max_io_queue_size(ctrlr); 1431 1432 ctrlr->cap = *cap; 1433 1434 ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin); 1435 1436 /* For now, always select page_size == min_page_size. */ 1437 ctrlr->page_size = ctrlr->min_page_size; 1438 1439 ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES); 1440 ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u); 1441 ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, max_io_queue_size); 1442 1443 ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size); 1444 } 1445 1446 void 1447 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) 1448 { 1449 struct spdk_nvme_qpair *qpair, *tmp; 1450 1451 TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) { 1452 spdk_nvme_ctrlr_free_io_qpair(qpair); 1453 } 1454 1455 nvme_ctrlr_shutdown(ctrlr); 1456 1457 nvme_ctrlr_destruct_namespaces(ctrlr); 1458 1459 spdk_bit_array_free(&ctrlr->free_io_qids); 1460 1461 pthread_mutex_destroy(&ctrlr->ctrlr_lock); 1462 1463 nvme_transport_ctrlr_destruct(ctrlr); 1464 } 1465 1466 int 1467 nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, 1468 struct nvme_request *req) 1469 { 1470 return nvme_qpair_submit_request(ctrlr->adminq, req); 1471 } 1472 1473 static void 1474 nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl) 1475 { 1476 /* Do nothing */ 1477 } 1478 1479 /* 1480 * Check if we need to send a Keep Alive command. 1481 * Caller must hold ctrlr->ctrlr_lock. 1482 */ 1483 static void 1484 nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr) 1485 { 1486 uint64_t now; 1487 struct nvme_request *req; 1488 struct spdk_nvme_cmd *cmd; 1489 int rc; 1490 1491 now = spdk_get_ticks(); 1492 if (now < ctrlr->next_keep_alive_tick) { 1493 return; 1494 } 1495 1496 req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL); 1497 if (req == NULL) { 1498 return; 1499 } 1500 1501 cmd = &req->cmd; 1502 cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE; 1503 1504 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 1505 if (rc != 0) { 1506 SPDK_ERRLOG("Submitting Keep Alive failed\n"); 1507 } 1508 1509 ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks; 1510 } 1511 1512 int32_t 1513 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 1514 { 1515 int32_t num_completions; 1516 1517 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1518 if (ctrlr->keep_alive_interval_ticks) { 1519 nvme_ctrlr_keep_alive(ctrlr); 1520 } 1521 num_completions = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1522 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1523 1524 return num_completions; 1525 } 1526 1527 const struct spdk_nvme_ctrlr_data * 1528 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 1529 { 1530 return &ctrlr->cdata; 1531 } 1532 1533 union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 1534 { 1535 union spdk_nvme_csts_register csts; 1536 1537 if (nvme_ctrlr_get_csts(ctrlr, &csts)) { 1538 csts.raw = 0; 1539 } 1540 return csts; 1541 } 1542 1543 union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr) 1544 { 1545 return ctrlr->cap; 1546 } 1547 1548 union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 1549 { 1550 union spdk_nvme_vs_register vs; 1551 1552 if (nvme_ctrlr_get_vs(ctrlr, &vs)) { 1553 vs.raw = 0; 1554 } 1555 return vs; 1556 } 1557 1558 uint32_t 1559 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 1560 { 1561 return ctrlr->num_ns; 1562 } 1563 1564 struct spdk_nvme_ns * 1565 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t ns_id) 1566 { 1567 if (ns_id < 1 || ns_id > ctrlr->num_ns) { 1568 return NULL; 1569 } 1570 1571 return &ctrlr->ns[ns_id - 1]; 1572 } 1573 1574 void 1575 spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr, 1576 spdk_nvme_aer_cb aer_cb_fn, 1577 void *aer_cb_arg) 1578 { 1579 ctrlr->aer_cb_fn = aer_cb_fn; 1580 ctrlr->aer_cb_arg = aer_cb_arg; 1581 } 1582 1583 void 1584 spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr, 1585 uint32_t nvme_timeout, spdk_nvme_timeout_cb cb_fn, void *cb_arg) 1586 { 1587 ctrlr->timeout_ticks = nvme_timeout * spdk_get_ticks_hz(); 1588 ctrlr->timeout_cb_fn = cb_fn; 1589 ctrlr->timeout_cb_arg = cb_arg; 1590 } 1591 1592 bool 1593 spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page) 1594 { 1595 /* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */ 1596 SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch"); 1597 return ctrlr->log_page_supported[log_page]; 1598 } 1599 1600 bool 1601 spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code) 1602 { 1603 /* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */ 1604 SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch"); 1605 return ctrlr->feature_supported[feature_code]; 1606 } 1607 1608 int 1609 spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 1610 struct spdk_nvme_ctrlr_list *payload) 1611 { 1612 struct nvme_completion_poll_status status; 1613 int res; 1614 1615 status.done = false; 1616 res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload, 1617 nvme_completion_poll_cb, &status); 1618 if (res) 1619 return res; 1620 while (status.done == false) { 1621 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1622 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1623 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1624 } 1625 if (spdk_nvme_cpl_is_error(&status.cpl)) { 1626 SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n"); 1627 return -ENXIO; 1628 } 1629 1630 return spdk_nvme_ctrlr_reset(ctrlr); 1631 } 1632 1633 int 1634 spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 1635 struct spdk_nvme_ctrlr_list *payload) 1636 { 1637 struct nvme_completion_poll_status status; 1638 int res; 1639 1640 status.done = false; 1641 res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload, 1642 nvme_completion_poll_cb, &status); 1643 if (res) 1644 return res; 1645 while (status.done == false) { 1646 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1647 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1648 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1649 } 1650 if (spdk_nvme_cpl_is_error(&status.cpl)) { 1651 SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n"); 1652 return -ENXIO; 1653 } 1654 1655 return spdk_nvme_ctrlr_reset(ctrlr); 1656 } 1657 1658 uint32_t 1659 spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload) 1660 { 1661 struct nvme_completion_poll_status status; 1662 int res; 1663 1664 status.done = false; 1665 res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status); 1666 if (res) 1667 return 0; 1668 while (status.done == false) { 1669 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1670 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1671 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1672 } 1673 if (spdk_nvme_cpl_is_error(&status.cpl)) { 1674 SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n"); 1675 return 0; 1676 } 1677 1678 res = spdk_nvme_ctrlr_reset(ctrlr); 1679 if (res) { 1680 return 0; 1681 } 1682 1683 /* Return the namespace ID that was created */ 1684 return status.cpl.cdw0; 1685 } 1686 1687 int 1688 spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 1689 { 1690 struct nvme_completion_poll_status status; 1691 int res; 1692 1693 status.done = false; 1694 res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status); 1695 if (res) 1696 return res; 1697 while (status.done == false) { 1698 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1699 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1700 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1701 } 1702 if (spdk_nvme_cpl_is_error(&status.cpl)) { 1703 SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n"); 1704 return -ENXIO; 1705 } 1706 1707 return spdk_nvme_ctrlr_reset(ctrlr); 1708 } 1709 1710 int 1711 spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 1712 struct spdk_nvme_format *format) 1713 { 1714 struct nvme_completion_poll_status status; 1715 int res; 1716 1717 status.done = false; 1718 res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb, 1719 &status); 1720 if (res) 1721 return res; 1722 while (status.done == false) { 1723 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1724 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1725 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1726 } 1727 if (spdk_nvme_cpl_is_error(&status.cpl)) { 1728 SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n"); 1729 return -ENXIO; 1730 } 1731 1732 return spdk_nvme_ctrlr_reset(ctrlr); 1733 } 1734 1735 int 1736 spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size, 1737 int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status) 1738 { 1739 struct spdk_nvme_fw_commit fw_commit; 1740 struct nvme_completion_poll_status status; 1741 int res; 1742 unsigned int size_remaining; 1743 unsigned int offset; 1744 unsigned int transfer; 1745 void *p; 1746 1747 if (!completion_status) { 1748 return -EINVAL; 1749 } 1750 memset(completion_status, 0, sizeof(struct spdk_nvme_status)); 1751 if (size % 4) { 1752 SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid size!\n"); 1753 return -1; 1754 } 1755 1756 /* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG 1757 * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG 1758 */ 1759 if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) && 1760 (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) { 1761 SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid command!\n"); 1762 return -1; 1763 } 1764 1765 /* Firmware download */ 1766 size_remaining = size; 1767 offset = 0; 1768 p = payload; 1769 1770 while (size_remaining > 0) { 1771 transfer = spdk_min(size_remaining, ctrlr->min_page_size); 1772 status.done = false; 1773 1774 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p, 1775 nvme_completion_poll_cb, 1776 &status); 1777 if (res) 1778 return res; 1779 1780 while (status.done == false) { 1781 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1782 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1783 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1784 } 1785 if (spdk_nvme_cpl_is_error(&status.cpl)) { 1786 SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n"); 1787 return -ENXIO; 1788 } 1789 p += transfer; 1790 offset += transfer; 1791 size_remaining -= transfer; 1792 } 1793 1794 /* Firmware commit */ 1795 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 1796 fw_commit.fs = slot; 1797 fw_commit.ca = commit_action; 1798 1799 status.done = false; 1800 1801 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb, 1802 &status); 1803 if (res) 1804 return res; 1805 1806 while (status.done == false) { 1807 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1808 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1809 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1810 } 1811 memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status)); 1812 if (spdk_nvme_cpl_is_error(&status.cpl)) { 1813 if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC || 1814 status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) { 1815 if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC && 1816 status.cpl.status.sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) { 1817 SPDK_NOTICELOG("firmware activation requires conventional reset to be performed. !\n"); 1818 } else { 1819 SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n"); 1820 } 1821 return -ENXIO; 1822 } 1823 } 1824 1825 return spdk_nvme_ctrlr_reset(ctrlr); 1826 } 1827