Lines Matching +full:needs +full:- +full:reset +full:- +full:on +full:- +full:resume
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2012-2016 Intel Corporation
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags);
68 sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev));
95 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev));
115 qpair = &ctrlr->adminq;
116 qpair->id = 0;
117 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
118 qpair->domain = ctrlr->domain;
135 * max I/O xfer size. 16KB is sufficient here - maybe even less?
142 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
159 * fail-safe.
162 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) /
163 (1 << (ctrlr->dstrd + 1));
178 * also that for a queue size of N, we can only have (N-1) commands
179 * outstanding, hence the "-1" here.
181 num_trackers = min(num_trackers, (num_entries-1));
187 * of the storage system grows multi-queue support.
189 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
191 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
194 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) {
195 qpair = &ctrlr->ioq[i];
198 * Admin queue has ID=0. IO queues start at ID=1 -
201 qpair->id = i + 1;
202 if (ctrlr->num_io_queues > 1) {
207 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n;
208 qpair->domain = pcpu_find(qpair->cpu)->pc_domain;
210 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
211 qpair->domain = ctrlr->domain;
215 * For I/O queues, use the controller-wide max_xfer_size
227 if (ctrlr->num_io_queues > 1)
228 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
244 ctrlr->is_failed = true;
246 ctrlr->is_failed_admin = true;
247 nvme_qpair_fail(&ctrlr->adminq);
249 if (ctrlr->ioq != NULL) {
250 for (i = 0; i < ctrlr->num_io_queues; i++) {
251 nvme_qpair_fail(&ctrlr->ioq[i]);
266 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms);
276 if (timeout - ticks < 0) {
278 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
329 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
364 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
365 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
367 /* acqs and asqs are 0-based. */
368 qsize = ctrlr->adminq.num_entries - 1;
389 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps);
402 nvme_admin_qpair_disable(&ctrlr->adminq);
405 * reset, so do not try to disable them. Use is_initialized
406 * to determine if this is the initial HW reset.
408 if (ctrlr->is_initialized) {
409 for (i = 0; i < ctrlr->num_io_queues; i++)
410 nvme_io_qpair_disable(&ctrlr->ioq[i]);
421 ctrlr->is_failed_admin = true;
431 ctrlr->is_failed_admin = false;
442 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
447 * there is no need to kick off another reset.
451 if (!ctrlr->is_dying)
452 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
461 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
470 nvme_controller_data_swapbytes(&ctrlr->cdata);
476 if (ctrlr->cdata.mdts > 0)
477 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
478 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT +
479 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi)));
491 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
500 * Data in cdw0 is 0-based.
501 * Lower 16-bits indicate number of submission queues allocated.
502 * Upper 16-bits indicate number of completion queues allocated.
512 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
513 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
514 if (ctrlr->num_io_queues > vm_ndomains)
515 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains;
527 for (i = 0; i < ctrlr->num_io_queues; i++) {
528 qpair = &ctrlr->ioq[i];
558 for (int i = 0; i < ctrlr->num_io_queues; i++) {
559 qpair = &ctrlr->ioq[i];
589 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
590 ns = &ctrlr->ns[i];
624 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
697 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
698 aer->log_page_id, NULL, 0);
701 switch (aer->log_page_id) {
703 err = (struct nvme_error_information_entry *)aer->log_page_buffer;
704 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
709 (struct nvme_health_information_page *)aer->log_page_buffer);
713 (struct nvme_ns_list *)aer->log_page_buffer);
717 (struct nvme_command_effects_page *)aer->log_page_buffer);
721 (struct nvme_res_notification_page *)aer->log_page_buffer);
725 (struct nvme_sanitize_status_page *)aer->log_page_buffer);
731 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
733 aer->log_page_buffer;
734 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
735 health_info->critical_warning);
743 aer->ctrlr->async_event_config &=
744 ~health_info->critical_warning;
745 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
746 aer->ctrlr->async_event_config, NULL, NULL);
747 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
749 nsl = (struct nvme_ns_list *)aer->log_page_buffer;
750 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
751 if (nsl->ns[i] > NVME_MAX_NAMESPACES)
753 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
761 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
762 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
769 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
788 aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0);
790 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
791 " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0),
792 NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0),
793 aer->log_page_id);
795 if (is_log_page_id_valid(aer->log_page_id)) {
796 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
797 aer->log_page_id);
798 memcpy(&aer->cpl, cpl, sizeof(*cpl));
799 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
800 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
801 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
805 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
812 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
822 aer->ctrlr = ctrlr;
824 * XXX-MJ this should be M_WAITOK but we might be in a non-sleepable
825 * callback context. AER completions should be handled on a dedicated
830 aer->req = req;
836 req->timeout = false;
837 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
848 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
852 if (ctrlr->cdata.ver >= NVME_REV(1, 2))
853 ctrlr->async_event_config |=
854 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE |
866 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
869 ctrlr->async_event_config, NULL, NULL);
871 /* aerl is a zero-based value, so we need to add 1 here. */
872 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
874 for (i = 0; i < ctrlr->num_aers; i++) {
875 aer = &ctrlr->aer[i];
884 ctrlr->int_coal_time = 0;
886 &ctrlr->int_coal_time);
888 ctrlr->int_coal_threshold = 0;
890 &ctrlr->int_coal_threshold);
892 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
893 ctrlr->int_coal_threshold, NULL, NULL);
902 if (ctrlr->hmb_desc_paddr) {
903 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map);
904 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
905 ctrlr->hmb_desc_map);
906 ctrlr->hmb_desc_paddr = 0;
908 if (ctrlr->hmb_desc_tag) {
909 bus_dma_tag_destroy(ctrlr->hmb_desc_tag);
910 ctrlr->hmb_desc_tag = NULL;
912 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
913 hmbc = &ctrlr->hmb_chunks[i];
914 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map);
915 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
916 hmbc->hmbc_map);
918 ctrlr->hmb_nchunks = 0;
919 if (ctrlr->hmb_tag) {
920 bus_dma_tag_destroy(ctrlr->hmb_tag);
921 ctrlr->hmb_tag = NULL;
923 if (ctrlr->hmb_chunks) {
924 free(ctrlr->hmb_chunks, M_NVME);
925 ctrlr->hmb_chunks = NULL;
945 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS;
948 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max);
949 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size);
950 if (min > 0 && ctrlr->cdata.hmmaxd > 0)
951 minc = MAX(minc, min / ctrlr->cdata.hmmaxd);
952 ctrlr->hmb_chunk = pref;
957 * are all based on the current MPS (ctrlr->page_size).
959 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size);
960 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk);
961 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd)
962 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd;
963 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) *
964 ctrlr->hmb_nchunks, M_NVME, M_WAITOK);
965 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
966 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
967 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag);
974 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
975 hmbc = &ctrlr->hmb_chunks[i];
976 if (bus_dmamem_alloc(ctrlr->hmb_tag,
977 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT,
978 &hmbc->hmbc_map)) {
982 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map,
983 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map,
984 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) {
985 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
986 hmbc->hmbc_map);
990 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map,
994 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min &&
995 ctrlr->hmb_chunk / 2 >= minc) {
996 ctrlr->hmb_nchunks = i;
998 ctrlr->hmb_chunk /= 2;
1001 ctrlr->hmb_nchunks = i;
1002 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) {
1007 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks;
1008 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
1010 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag);
1016 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag,
1017 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK,
1018 &ctrlr->hmb_desc_map)) {
1023 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1024 ctrlr->hmb_desc_vaddr, size, nvme_single_map,
1025 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) {
1026 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
1027 ctrlr->hmb_desc_map);
1033 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
1034 memset(&ctrlr->hmb_desc_vaddr[i], 0,
1036 ctrlr->hmb_desc_vaddr[i].addr =
1037 htole64(ctrlr->hmb_chunks[i].hmbc_paddr);
1038 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size);
1040 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1044 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk
1061 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size,
1062 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32,
1063 ctrlr->hmb_nchunks, NULL, 0,
1080 * Only reset adminq here when we are restarting the
1081 * controller after a reset. During initialization,
1083 * the number of I/O queues supported, so cannot reset
1087 nvme_qpair_reset(&ctrlr->adminq);
1088 nvme_admin_qpair_enable(&ctrlr->adminq);
1091 if (ctrlr->ioq != NULL) {
1092 for (i = 0; i < ctrlr->num_io_queues; i++)
1093 nvme_qpair_reset(&ctrlr->ioq[i]);
1097 * If it was a reset on initialization command timeout, just
1100 if (resetting && !ctrlr->is_initialized)
1112 * after any reset for controllers that depend on the driver to
1117 old_num_io_queues = ctrlr->num_io_queues;
1123 if (old_num_io_queues != ctrlr->num_io_queues) {
1125 old_num_io_queues, ctrlr->num_io_queues);
1129 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) {
1131 if (ctrlr->hmb_nchunks > 0)
1133 } else if (ctrlr->hmb_nchunks > 0)
1149 for (i = 0; i < ctrlr->num_io_queues; i++)
1150 nvme_io_qpair_enable(&ctrlr->ioq[i]);
1161 if (nvme_ctrlr_hw_reset(ctrlr) != 0 || ctrlr->fail_on_reset != 0) {
1163 config_intrhook_disestablish(&ctrlr->config_hook);
1167 nvme_qpair_reset(&ctrlr->adminq);
1168 nvme_admin_qpair_enable(&ctrlr->adminq);
1178 config_intrhook_disestablish(&ctrlr->config_hook);
1180 if (!ctrlr->is_failed) {
1181 ctrlr->is_initialized = true;
1193 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"start\"");
1196 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"success\"");
1199 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"timed_out\"");
1203 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1207 * Poll all the queues enabled on the device for completion.
1214 nvme_qpair_process_completions(&ctrlr->adminq);
1216 for (i = 0; i < ctrlr->num_io_queues; i++)
1217 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
1218 nvme_qpair_process_completions(&ctrlr->ioq[i]);
1222 * Poll the single-vector interrupt case: num_io_queues will be 1 and
1240 struct mtx *mtx = pt->driver_lock;
1243 bzero(&pt->cpl, sizeof(pt->cpl));
1244 pt->cpl.cdw0 = cpl->cdw0;
1246 status = cpl->status;
1248 pt->cpl.status = status;
1251 pt->driver_lock = NULL;
1266 if (pt->len > 0) {
1267 if (pt->len > ctrlr->max_xfer_size) {
1268 nvme_printf(ctrlr, "pt->len (%d) "
1269 "exceeds max_xfer_size (%d)\n", pt->len,
1270 ctrlr->max_xfer_size);
1275 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1276 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
1280 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1283 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1288 /* Assume user space already converted to little-endian */
1289 req->cmd.opc = pt->cmd.opc;
1290 req->cmd.fuse = pt->cmd.fuse;
1291 req->cmd.rsvd2 = pt->cmd.rsvd2;
1292 req->cmd.rsvd3 = pt->cmd.rsvd3;
1293 req->cmd.cdw10 = pt->cmd.cdw10;
1294 req->cmd.cdw11 = pt->cmd.cdw11;
1295 req->cmd.cdw12 = pt->cmd.cdw12;
1296 req->cmd.cdw13 = pt->cmd.cdw13;
1297 req->cmd.cdw14 = pt->cmd.cdw14;
1298 req->cmd.cdw15 = pt->cmd.cdw15;
1300 req->cmd.nsid = htole32(nsid);
1303 pt->driver_lock = mtx;
1311 while (pt->driver_lock != NULL)
1328 struct mtx *mtx = (void *)(uintptr_t)npc->metadata;
1330 npc->result = cpl->cdw0; /* cpl in host order by now */
1332 npc->metadata = 0;
1351 if (npc->metadata != 0 || npc->metadata_len != 0)
1354 if (npc->data_len > 0 && npc->addr != 0) {
1355 if (npc->data_len > ctrlr->max_xfer_size) {
1357 "npc->data_len (%d) exceeds max_xfer_size (%d)\n",
1358 npc->data_len, ctrlr->max_xfer_size);
1362 if ((npc->opcode & 0x3) == 0 || (npc->opcode & 0x3) == 3)
1366 buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ;
1367 if (vmapbuf(buf, (void *)(uintptr_t)npc->addr,
1368 npc->data_len, 1) < 0) {
1372 req = nvme_allocate_request_vaddr(buf->b_data,
1373 npc->data_len, M_WAITOK, nvme_npc_done, npc);
1376 (void *)(uintptr_t)npc->addr, npc->data_len,
1381 req->cmd.opc = npc->opcode;
1382 req->cmd.fuse = npc->flags;
1383 req->cmd.rsvd2 = htole16(npc->cdw2);
1384 req->cmd.rsvd3 = htole16(npc->cdw3);
1385 req->cmd.cdw10 = htole32(npc->cdw10);
1386 req->cmd.cdw11 = htole32(npc->cdw11);
1387 req->cmd.cdw12 = htole32(npc->cdw12);
1388 req->cmd.cdw13 = htole32(npc->cdw13);
1389 req->cmd.cdw14 = htole32(npc->cdw14);
1390 req->cmd.cdw15 = htole32(npc->cdw15);
1392 req->cmd.nsid = htole32(nsid);
1395 npc->metadata = (uintptr_t) mtx;
1404 while (npc->metadata != 0)
1424 ctrlr = cdev->si_drv1;
1433 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1438 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1439 sizeof(gnsid->cdev));
1440 gnsid->nsid = 0;
1444 *(uint64_t *)arg = ctrlr->max_xfer_size;
1448 td->td_retval[0] = 0xfffffffful;
1455 return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true,
1481 ctrlr->dev = dev;
1483 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1484 if (bus_get_domain(dev, &ctrlr->domain) != 0)
1485 ctrlr->domain = 0;
1487 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1497 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1532 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
1534 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi);
1535 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps);
1539 ctrlr->ready_timeout_in_ms = to * 500;
1545 ctrlr->admin_timeout_period = timeout_period;
1551 ctrlr->timeout_period = timeout_period;
1556 ctrlr->enable_aborts = 0;
1557 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1559 ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK);
1561 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */
1562 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size));
1567 * Create 2 threads for the taskqueue. The reset thread will block when
1569 * failed up the stack. The fail_req task needs to be able to run in
1577 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1578 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1579 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq");
1581 ctrlr->is_resetting = 0;
1582 ctrlr->is_initialized = false;
1583 ctrlr->notification_sent = 0;
1584 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1585 STAILQ_INIT(&ctrlr->fail_req);
1586 ctrlr->is_failed = false;
1595 status = make_dev_s(&md_args, &ctrlr->cdev, "%s",
1604 * Called on detach, or on error on attach. The nvme_controller won't be used
1606 * references this, no callbacks, etc), but don't need to reset all the state
1614 ctrlr->is_dying = true;
1616 if (ctrlr->resource == NULL)
1618 if (!mtx_initialized(&ctrlr->adminq.lock))
1632 nvme_ns_destruct(&ctrlr->ns[i]);
1634 if (ctrlr->cdev)
1635 destroy_dev(ctrlr->cdev);
1637 if (ctrlr->is_initialized) {
1639 if (ctrlr->hmb_nchunks > 0)
1645 if (ctrlr->ioq != NULL) {
1646 for (i = 0; i < ctrlr->num_io_queues; i++)
1647 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1648 free(ctrlr->ioq, M_NVME);
1650 nvme_admin_qpair_destroy(&ctrlr->adminq);
1666 if (ctrlr->taskqueue)
1667 taskqueue_free(ctrlr->taskqueue);
1669 if (ctrlr->tag)
1670 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1672 if (ctrlr->res)
1673 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1674 rman_get_rid(ctrlr->res), ctrlr->res);
1676 if (ctrlr->bar4_resource != NULL) {
1678 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1682 ctrlr->resource_id, ctrlr->resource);
1685 if (ctrlr->alignment_splits)
1686 counter_u64_free(ctrlr->alignment_splits);
1688 mtx_destroy(&ctrlr->lock);
1703 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz :
1704 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000);
1711 if (timeout - ticks < 0) {
1724 nvme_qpair_submit_request(&ctrlr->adminq, req);
1733 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
1741 return (ctrlr->dev);
1748 return (&ctrlr->cdata);
1758 * need to do an explicit reset to bring it back, if that's even
1761 if (ctrlr->is_failed)
1765 * We don't want the reset taskqueue running, since it does similar
1766 * things, so prevent it from running after we start. Wait for any reset
1767 * that may have been started to complete. The reset process we follow
1769 * after we resume (though there should be none).
1771 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
1775 "Competing reset task didn't finish. Try again later.\n");
1779 if (ctrlr->hmb_nchunks > 0)
1802 * Can't touch failed controllers, so nothing to do to resume.
1804 if (ctrlr->is_failed)
1811 * Now that we've reset the hardware, we can restart the controller. Any
1814 * in the reset phase.
1817 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1822 * Since we can't bring the controller out of reset, announce and fail
1823 * the controller. However, we have to return success for the resume
1826 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
1828 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);