Lines Matching defs:ctrlr
50 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
54 nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags)
56 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags);
60 nvme_ctrlr_devctl_va(struct nvme_controller *ctrlr, const char *type,
68 sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev));
77 nvme_ctrlr_devctl(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
82 nvme_ctrlr_devctl_va(ctrlr, type, msg, ap);
87 nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
95 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev));
104 nvme_ctrlr_devctl_va(ctrlr, type, msg, ap);
109 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
115 qpair = &ctrlr->adminq;
117 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
118 qpair->domain = ctrlr->domain;
128 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
138 ctrlr);
142 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
145 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
162 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) /
163 (1 << (ctrlr->dstrd + 1));
166 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
189 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
191 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
194 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) {
195 qpair = &ctrlr->ioq[i];
202 if (ctrlr->num_io_queues > 1) {
204 for (n = 1; QP(ctrlr, c + n) == i; n++)
207 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n;
210 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
211 qpair->domain = ctrlr->domain;
219 ctrlr);
227 if (ctrlr->num_io_queues > 1)
228 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
235 nvme_ctrlr_fail(struct nvme_controller *ctrlr, bool admin_also)
244 ctrlr->is_failed = true;
246 ctrlr->is_failed_admin = true;
247 nvme_qpair_fail(&ctrlr->adminq);
249 if (ctrlr->ioq != NULL) {
250 for (i = 0; i < ctrlr->num_io_queues; i++) {
251 nvme_qpair_fail(&ctrlr->ioq[i]);
254 nvme_notify_fail_consumers(ctrlr);
264 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
266 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms);
271 csts = nvme_mmio_read_4(ctrlr, csts);
277 nvme_printf(ctrlr, "controller ready did not become %d "
278 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
290 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
297 cc = nvme_mmio_read_4(ctrlr, cc);
298 csts = nvme_mmio_read_4(ctrlr, csts);
313 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
317 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
323 nvme_mmio_write_4(ctrlr, cc, cc);
329 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
331 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
335 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
344 cc = nvme_mmio_read_4(ctrlr, cc);
345 csts = nvme_mmio_read_4(ctrlr, csts);
356 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
360 err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
364 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
365 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
368 qsize = ctrlr->adminq.num_entries - 1;
373 nvme_mmio_write_4(ctrlr, aqa, aqa);
389 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps);
391 nvme_ctrlr_barrier(ctrlr, BUS_SPACE_BARRIER_WRITE);
392 nvme_mmio_write_4(ctrlr, cc, cc);
394 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
398 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr)
402 nvme_admin_qpair_disable(&ctrlr->adminq);
408 if (ctrlr->is_initialized) {
409 for (i = 0; i < ctrlr->num_io_queues; i++)
410 nvme_io_qpair_disable(&ctrlr->ioq[i]);
415 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
421 ctrlr->is_failed_admin = true;
422 nvme_ctrlr_disable_qpairs(ctrlr);
424 err = nvme_ctrlr_disable(ctrlr);
428 err = nvme_ctrlr_enable(ctrlr);
431 ctrlr->is_failed_admin = false;
438 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
442 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
451 if (!ctrlr->is_dying)
452 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
456 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
461 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
465 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
470 nvme_controller_data_swapbytes(&ctrlr->cdata);
476 if (ctrlr->cdata.mdts > 0)
477 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
478 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT +
479 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi)));
485 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
491 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
495 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
512 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
513 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
514 if (ctrlr->num_io_queues > vm_ndomains)
515 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains;
521 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
527 for (i = 0; i < ctrlr->num_io_queues; i++) {
528 qpair = &ctrlr->ioq[i];
531 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
535 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
540 nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair,
544 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
553 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr)
558 for (int i = 0; i < ctrlr->num_io_queues; i++) {
559 qpair = &ctrlr->ioq[i];
562 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
566 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
571 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
575 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
584 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
589 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
590 ns = &ctrlr->ns[i];
591 nvme_ns_construct(ns, i+1, ctrlr);
616 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
624 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
653 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
658 nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n");
661 nvme_printf(ctrlr, "SMART WARNING: temperature above threshold\n");
664 nvme_printf(ctrlr, "SMART WARNING: device reliability degraded\n");
667 nvme_printf(ctrlr, "SMART WARNING: media placed in read only mode\n");
670 nvme_printf(ctrlr, "SMART WARNING: volatile memory backup device failed\n");
673 nvme_printf(ctrlr, "SMART WARNING: persistent memory read only or unreliable\n");
676 nvme_printf(ctrlr, "SMART WARNING: unknown critical warning(s): state = 0x%02x\n",
679 nvme_ctrlr_devctl(ctrlr, "critical", "SMART_ERROR", "state=0x%02x", state);
697 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
704 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
734 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
743 aer->ctrlr->async_event_config &=
745 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
746 aer->ctrlr->async_event_config, NULL, NULL);
753 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
761 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
769 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
790 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
796 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
799 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
805 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
812 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
817 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
822 aer->ctrlr = ctrlr;
838 nvme_ctrlr_submit_admin_request(ctrlr, req);
842 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
848 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
852 if (ctrlr->cdata.ver >= NVME_REV(1, 2))
853 ctrlr->async_event_config |=
854 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE |
858 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
864 nvme_printf(ctrlr, "temperature threshold not supported\n");
866 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
868 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
869 ctrlr->async_event_config, NULL, NULL);
872 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
874 for (i = 0; i < ctrlr->num_aers; i++) {
875 aer = &ctrlr->aer[i];
876 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
881 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
884 ctrlr->int_coal_time = 0;
886 &ctrlr->int_coal_time);
888 ctrlr->int_coal_threshold = 0;
890 &ctrlr->int_coal_threshold);
892 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
893 ctrlr->int_coal_threshold, NULL, NULL);
897 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr)
902 if (ctrlr->hmb_desc_paddr) {
903 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map);
904 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
905 ctrlr->hmb_desc_map);
906 ctrlr->hmb_desc_paddr = 0;
908 if (ctrlr->hmb_desc_tag) {
909 bus_dma_tag_destroy(ctrlr->hmb_desc_tag);
910 ctrlr->hmb_desc_tag = NULL;
912 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
913 hmbc = &ctrlr->hmb_chunks[i];
914 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map);
915 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
918 ctrlr->hmb_nchunks = 0;
919 if (ctrlr->hmb_tag) {
920 bus_dma_tag_destroy(ctrlr->hmb_tag);
921 ctrlr->hmb_tag = NULL;
923 if (ctrlr->hmb_chunks) {
924 free(ctrlr->hmb_chunks, M_NVME);
925 ctrlr->hmb_chunks = NULL;
930 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr)
945 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS;
948 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max);
949 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size);
950 if (min > 0 && ctrlr->cdata.hmmaxd > 0)
951 minc = MAX(minc, min / ctrlr->cdata.hmmaxd);
952 ctrlr->hmb_chunk = pref;
957 * are all based on the current MPS (ctrlr->page_size).
959 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size);
960 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk);
961 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd)
962 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd;
963 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) *
964 ctrlr->hmb_nchunks, M_NVME, M_WAITOK);
965 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
966 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
967 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag);
969 nvme_printf(ctrlr, "HMB tag create failed %d\n", err);
970 nvme_ctrlr_hmb_free(ctrlr);
974 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
975 hmbc = &ctrlr->hmb_chunks[i];
976 if (bus_dmamem_alloc(ctrlr->hmb_tag,
979 nvme_printf(ctrlr, "failed to alloc HMB\n");
982 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map,
983 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map,
985 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
987 nvme_printf(ctrlr, "failed to load HMB\n");
990 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map,
994 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min &&
995 ctrlr->hmb_chunk / 2 >= minc) {
996 ctrlr->hmb_nchunks = i;
997 nvme_ctrlr_hmb_free(ctrlr);
998 ctrlr->hmb_chunk /= 2;
1001 ctrlr->hmb_nchunks = i;
1002 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) {
1003 nvme_ctrlr_hmb_free(ctrlr);
1007 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks;
1008 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
1010 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag);
1012 nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err);
1013 nvme_ctrlr_hmb_free(ctrlr);
1016 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag,
1017 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK,
1018 &ctrlr->hmb_desc_map)) {
1019 nvme_printf(ctrlr, "failed to alloc HMB desc\n");
1020 nvme_ctrlr_hmb_free(ctrlr);
1023 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1024 ctrlr->hmb_desc_vaddr, size, nvme_single_map,
1025 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) {
1026 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
1027 ctrlr->hmb_desc_map);
1028 nvme_printf(ctrlr, "failed to load HMB desc\n");
1029 nvme_ctrlr_hmb_free(ctrlr);
1033 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
1034 memset(&ctrlr->hmb_desc_vaddr[i], 0,
1036 ctrlr->hmb_desc_vaddr[i].addr =
1037 htole64(ctrlr->hmb_chunks[i].hmbc_paddr);
1038 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size);
1040 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1043 nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n",
1044 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk
1049 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret)
1060 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11,
1061 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size,
1062 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32,
1063 ctrlr->hmb_nchunks, NULL, 0,
1067 nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n");
1073 struct nvme_controller *ctrlr = ctrlr_arg;
1087 nvme_qpair_reset(&ctrlr->adminq);
1088 nvme_admin_qpair_enable(&ctrlr->adminq);
1091 if (ctrlr->ioq != NULL) {
1092 for (i = 0; i < ctrlr->num_io_queues; i++)
1093 nvme_qpair_reset(&ctrlr->ioq[i]);
1100 if (resetting && !ctrlr->is_initialized)
1103 if (resetting && nvme_ctrlr_identify(ctrlr) != 0) {
1104 nvme_ctrlr_fail(ctrlr, false);
1117 old_num_io_queues = ctrlr->num_io_queues;
1118 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
1119 nvme_ctrlr_fail(ctrlr, false);
1123 if (old_num_io_queues != ctrlr->num_io_queues) {
1125 old_num_io_queues, ctrlr->num_io_queues);
1129 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) {
1130 nvme_ctrlr_hmb_alloc(ctrlr);
1131 if (ctrlr->hmb_nchunks > 0)
1132 nvme_ctrlr_hmb_enable(ctrlr, true, false);
1133 } else if (ctrlr->hmb_nchunks > 0)
1134 nvme_ctrlr_hmb_enable(ctrlr, true, true);
1136 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
1137 nvme_ctrlr_fail(ctrlr, false);
1141 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
1142 nvme_ctrlr_fail(ctrlr, false);
1146 nvme_ctrlr_configure_aer(ctrlr);
1147 nvme_ctrlr_configure_int_coalescing(ctrlr);
1149 for (i = 0; i < ctrlr->num_io_queues; i++)
1150 nvme_io_qpair_enable(&ctrlr->ioq[i]);
1157 struct nvme_controller *ctrlr = arg;
1161 if (nvme_ctrlr_hw_reset(ctrlr) != 0 || ctrlr->fail_on_reset != 0) {
1162 nvme_ctrlr_fail(ctrlr, true);
1163 config_intrhook_disestablish(&ctrlr->config_hook);
1167 nvme_qpair_reset(&ctrlr->adminq);
1168 nvme_admin_qpair_enable(&ctrlr->adminq);
1170 if (nvme_ctrlr_identify(ctrlr) == 0 &&
1171 nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
1172 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
1173 nvme_ctrlr_start(ctrlr, false);
1175 nvme_ctrlr_fail(ctrlr, false);
1177 nvme_sysctl_initialize_ctrlr(ctrlr);
1178 config_intrhook_disestablish(&ctrlr->config_hook);
1180 if (!ctrlr->is_failed) {
1181 ctrlr->is_initialized = true;
1182 nvme_notify_new_controller(ctrlr);
1190 struct nvme_controller *ctrlr = arg;
1193 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"start\"");
1194 status = nvme_ctrlr_hw_reset(ctrlr);
1196 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"success\"");
1197 nvme_ctrlr_start(ctrlr, true);
1199 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"timed_out\"");
1200 nvme_ctrlr_fail(ctrlr, true);
1203 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1210 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
1214 nvme_qpair_process_completions(&ctrlr->adminq);
1216 for (i = 0; i < ctrlr->num_io_queues; i++)
1217 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
1218 nvme_qpair_process_completions(&ctrlr->ioq[i]);
1229 struct nvme_controller *ctrlr = arg;
1231 nvme_mmio_write_4(ctrlr, intms, 1);
1232 nvme_ctrlr_poll(ctrlr);
1233 nvme_mmio_write_4(ctrlr, intmc, 1);
1257 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1267 if (pt->len > ctrlr->max_xfer_size) {
1268 nvme_printf(ctrlr, "pt->len (%d) "
1270 ctrlr->max_xfer_size);
1306 nvme_ctrlr_submit_admin_request(ctrlr, req);
1308 nvme_ctrlr_submit_io_request(ctrlr, req);
1340 nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
1355 if (npc->data_len > ctrlr->max_xfer_size) {
1356 nvme_printf(ctrlr,
1358 npc->data_len, ctrlr->max_xfer_size);
1399 nvme_ctrlr_submit_admin_request(ctrlr, req);
1401 nvme_ctrlr_submit_io_request(ctrlr, req);
1421 struct nvme_controller *ctrlr;
1424 ctrlr = cdev->si_drv1;
1429 nvme_ctrlr_reset(ctrlr);
1433 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1438 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1444 *(uint64_t *)arg = ctrlr->max_xfer_size;
1455 return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true,
1473 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1481 ctrlr->dev = dev;
1483 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1484 if (bus_get_domain(dev, &ctrlr->domain) != 0)
1485 ctrlr->domain = 0;
1487 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1497 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1515 vs = nvme_mmio_read_4(ctrlr, vs);
1520 pmrcap = nvme_mmio_read_4(ctrlr, pmrcap);
1532 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
1534 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi);
1535 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps);
1539 ctrlr->ready_timeout_in_ms = to * 500;
1545 ctrlr->admin_timeout_period = timeout_period;
1551 ctrlr->timeout_period = timeout_period;
1556 ctrlr->enable_aborts = 0;
1557 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1559 ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK);
1562 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size));
1563 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1577 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1578 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1579 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq");
1581 ctrlr->is_resetting = 0;
1582 ctrlr->is_initialized = false;
1583 ctrlr->notification_sent = 0;
1584 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1585 STAILQ_INIT(&ctrlr->fail_req);
1586 ctrlr->is_failed = false;
1594 md_args.mda_si_drv1 = (void *)ctrlr;
1595 status = make_dev_s(&md_args, &ctrlr->cdev, "%s",
1610 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1614 ctrlr->is_dying = true;
1616 if (ctrlr->resource == NULL)
1618 if (!mtx_initialized(&ctrlr->adminq.lock))
1625 gone = (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE);
1627 nvme_ctrlr_fail(ctrlr, true);
1629 nvme_notify_fail_consumers(ctrlr);
1632 nvme_ns_destruct(&ctrlr->ns[i]);
1634 if (ctrlr->cdev)
1635 destroy_dev(ctrlr->cdev);
1637 if (ctrlr->is_initialized) {
1639 if (ctrlr->hmb_nchunks > 0)
1640 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1641 nvme_ctrlr_delete_qpairs(ctrlr);
1643 nvme_ctrlr_hmb_free(ctrlr);
1645 if (ctrlr->ioq != NULL) {
1646 for (i = 0; i < ctrlr->num_io_queues; i++)
1647 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1648 free(ctrlr->ioq, M_NVME);
1650 nvme_admin_qpair_destroy(&ctrlr->adminq);
1660 nvme_ctrlr_shutdown(ctrlr);
1663 nvme_ctrlr_disable(ctrlr);
1666 if (ctrlr->taskqueue)
1667 taskqueue_free(ctrlr->taskqueue);
1669 if (ctrlr->tag)
1670 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1672 if (ctrlr->res)
1673 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1674 rman_get_rid(ctrlr->res), ctrlr->res);
1676 if (ctrlr->bar4_resource != NULL) {
1678 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1682 ctrlr->resource_id, ctrlr->resource);
1685 if (ctrlr->alignment_splits)
1686 counter_u64_free(ctrlr->alignment_splits);
1688 mtx_destroy(&ctrlr->lock);
1692 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1698 cc = nvme_mmio_read_4(ctrlr, cc);
1701 nvme_mmio_write_4(ctrlr, cc, cc);
1703 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz :
1704 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000);
1706 csts = nvme_mmio_read_4(ctrlr, csts);
1712 nvme_printf(ctrlr, "shutdown timeout\n");
1720 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1724 nvme_qpair_submit_request(&ctrlr->adminq, req);
1728 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1733 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
1738 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1741 return (ctrlr->dev);
1745 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1748 return (&ctrlr->cdata);
1752 nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
1761 if (ctrlr->is_failed)
1771 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
1774 nvme_printf(ctrlr,
1779 if (ctrlr->hmb_nchunks > 0)
1780 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1790 nvme_ctrlr_delete_qpairs(ctrlr);
1791 nvme_ctrlr_disable_qpairs(ctrlr);
1792 nvme_ctrlr_shutdown(ctrlr);
1798 nvme_ctrlr_resume(struct nvme_controller *ctrlr)
1804 if (ctrlr->is_failed)
1807 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1816 nvme_ctrlr_start(ctrlr, true);
1817 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1826 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
1827 nvme_ctrlr_fail(ctrlr, true);
1828 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);