Lines Matching defs:psc
432 int qwx_mhi_send_cmd(struct qwx_pci_softc *psc, uint32_t, uint32_t);
637 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
641 if (psc->sc_msi_cap & PCI_MSI_MC_C64) {
645 addr_lo = pci_conf_read(psc->sc_pc, psc->sc_tag,
646 psc->sc_msi_off + PCI_MSI_MA);
647 addr_hi = pci_conf_read(psc->sc_pc, psc->sc_tag,
648 psc->sc_msi_off + PCI_MSI_MAU32);
650 data = pci_conf_read(psc->sc_pc, psc->sc_tag,
651 psc->sc_msi_off + PCI_MSI_MD64);
653 addr = pci_conf_read(psc->sc_pc, psc->sc_tag,
654 psc->sc_msi_off + PCI_MSI_MA);
655 data = pci_conf_read(psc->sc_pc, psc->sc_tag,
656 psc->sc_msi_off + PCI_MSI_MD32);
741 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)self;
742 struct qwx_softc *sc = &psc->sc_sc;
754 psc->sc_pc = pa->pa_pc;
755 psc->sc_tag = pa->pa_tag;
763 sreg = pci_conf_read(psc->sc_pc, psc->sc_tag, PCI_SUBSYS_ID_REG);
784 if (pci_get_capability(psc->sc_pc, psc->sc_tag, PCI_CAP_PCIEXPRESS,
785 &psc->sc_cap_off, NULL) == 0) {
790 if (pci_get_capability(psc->sc_pc, psc->sc_tag, PCI_CAP_MSI,
791 &psc->sc_msi_off, &psc->sc_msi_cap) == 0) {
802 &psc->sc_st, &psc->sc_sh, &psc->sc_map, &psc->sc_mapsize, 0)) {
807 sc->mem = psc->sc_map;
824 psc->mhi_irq[MHI_ER_CTRL] = 1;
825 psc->mhi_irq[MHI_ER_DATA] = 2;
828 intrstr = pci_intr_string(psc->sc_pc, ih);
829 snprintf(psc->sc_ivname[0], sizeof(psc->sc_ivname[0]), "%s:bhi",
831 psc->sc_ih[0] = pci_intr_establish(psc->sc_pc, ih, IPL_NET,
832 qwx_pci_intr, psc, psc->sc_ivname[0]);
833 if (psc->sc_ih[0] == NULL) {
845 msivec = psc->mhi_irq[MHI_ER_CTRL];
851 snprintf(psc->sc_ivname[msivec],
852 sizeof(psc->sc_ivname[msivec]),
854 psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih,
855 IPL_NET, qwx_pci_intr_mhi_ctrl, psc,
856 psc->sc_ivname[msivec]);
857 if (psc->sc_ih[msivec] == NULL) {
863 msivec = psc->mhi_irq[MHI_ER_DATA];
869 snprintf(psc->sc_ivname[msivec],
870 sizeof(psc->sc_ivname[msivec]),
872 psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih,
873 IPL_NET, qwx_pci_intr_mhi_data, psc,
874 psc->sc_ivname[msivec]);
875 if (psc->sc_ih[msivec] == NULL) {
885 psc->sc_pci_ops = &qwx_pci_ops_qca6390;
901 psc->max_chan = QWX_MHI_CONFIG_QCA6390_MAX_CHANNELS;
904 psc->sc_pci_ops = &qwx_pci_ops_qcn9074;
906 psc->max_chan = QWX_MHI_CONFIG_QCA9074_MAX_CHANNELS;
934 psc->max_chan = QWX_MHI_CONFIG_QCA6390_MAX_CHANNELS;
956 psc->chan_ctxt = qwx_dmamem_alloc(sc->sc_dmat,
957 sizeof(struct qwx_mhi_chan_ctxt) * psc->max_chan, 0);
958 if (psc->chan_ctxt == NULL) {
964 if (psc->sc_pci_ops->alloc_xfer_rings(psc)) {
970 psc->event_ctxt = qwx_dmamem_alloc(sc->sc_dmat,
972 if (psc->event_ctxt == NULL) {
978 if (qwx_pci_alloc_event_rings(psc)) {
984 psc->cmd_ctxt = qwx_dmamem_alloc(sc->sc_dmat,
986 if (psc->cmd_ctxt == NULL) {
992 if (qwx_pci_init_cmd_ring(sc, &psc->cmd_ring)) {
1045 task_set(&psc->rddm_task, qwx_rddm_task, psc);
1110 qwx_pci_free_cmd_ring(psc);
1112 qwx_dmamem_free(sc->sc_dmat, psc->cmd_ctxt);
1113 psc->cmd_ctxt = NULL;
1115 qwx_pci_free_event_rings(psc);
1117 qwx_dmamem_free(sc->sc_dmat, psc->event_ctxt);
1118 psc->event_ctxt = NULL;
1120 qwx_pci_free_xfer_rings(psc);
1122 qwx_dmamem_free(sc->sc_dmat, psc->chan_ctxt);
1123 psc->chan_ctxt = NULL;
1126 pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]);
1133 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)self;
1134 struct qwx_softc *sc = &psc->sc_sc;
1136 if (psc->sc_ih[0]) {
1137 pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]);
1138 psc->sc_ih[0] = NULL;
1143 qwx_pci_free_event_rings(psc);
1144 qwx_pci_free_xfer_rings(psc);
1145 qwx_pci_free_cmd_ring(psc);
1147 if (psc->event_ctxt) {
1148 qwx_dmamem_free(sc->sc_dmat, psc->event_ctxt);
1149 psc->event_ctxt = NULL;
1151 if (psc->chan_ctxt) {
1152 qwx_dmamem_free(sc->sc_dmat, psc->chan_ctxt);
1153 psc->chan_ctxt = NULL;
1155 if (psc->cmd_ctxt) {
1156 qwx_dmamem_free(sc->sc_dmat, psc->cmd_ctxt);
1157 psc->cmd_ctxt = NULL;
1160 if (psc->amss_data) {
1161 qwx_dmamem_free(sc->sc_dmat, psc->amss_data);
1162 psc->amss_data = NULL;
1164 if (psc->amss_vec) {
1165 qwx_dmamem_free(sc->sc_dmat, psc->amss_vec);
1166 psc->amss_vec = NULL;
1184 qwx_pci_free_xfer_rings(struct qwx_pci_softc *psc)
1186 struct qwx_softc *sc = &psc->sc_sc;
1189 for (i = 0; i < nitems(psc->xfer_rings); i++) {
1190 struct qwx_pci_xfer_ring *ring = &psc->xfer_rings[i];
1289 qwx_pci_alloc_xfer_rings_qca6390(struct qwx_pci_softc *psc)
1291 struct qwx_softc *sc = &psc->sc_sc;
1295 &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND],
1301 &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND],
1308 qwx_pci_free_xfer_rings(psc);
1313 qwx_pci_alloc_xfer_rings_qcn9074(struct qwx_pci_softc *psc)
1315 struct qwx_softc *sc = &psc->sc_sc;
1319 &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND],
1325 &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND],
1332 qwx_pci_free_xfer_rings(psc);
1337 qwx_pci_free_event_rings(struct qwx_pci_softc *psc)
1339 struct qwx_softc *sc = &psc->sc_sc;
1342 for (i = 0; i < nitems(psc->event_rings); i++) {
1343 struct qwx_pci_event_ring *ring = &psc->event_rings[i];
1375 qwx_pci_alloc_event_rings(struct qwx_pci_softc *psc)
1377 struct qwx_softc *sc = &psc->sc_sc;
1380 ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[0],
1381 MHI_ER_CTRL, psc->mhi_irq[MHI_ER_CTRL], 0, 32);
1385 ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[1],
1386 MHI_ER_DATA, psc->mhi_irq[MHI_ER_DATA], 1, 256);
1392 qwx_pci_free_event_rings(psc);
1397 qwx_pci_free_cmd_ring(struct qwx_pci_softc *psc)
1399 struct qwx_softc *sc = &psc->sc_sc;
1400 struct qwx_pci_cmd_ring *ring = &psc->cmd_ring;
1427 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1429 return (bus_space_read_4(psc->sc_st, psc->sc_sh, addr));
1435 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1437 bus_space_write_4(psc->sc_st, psc->sc_sh, addr, val);
1456 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1466 if (wakeup_required && psc->sc_pci_ops->wakeup)
1467 ret = psc->sc_pci_ops->wakeup(sc);
1472 val = psc->sc_pci_ops->window_read32(sc, offset);
1474 if (wakeup_required && !ret && psc->sc_pci_ops->release)
1475 psc->sc_pci_ops->release(sc);
1483 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1492 if (wakeup_required && psc->sc_pci_ops->wakeup)
1493 ret = psc->sc_pci_ops->wakeup(sc);
1498 psc->sc_pci_ops->window_write32(sc, offset, value);
1500 if (wakeup_required && !ret && psc->sc_pci_ops->release)
1501 psc->sc_pci_ops->release(sc);
1573 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1622 snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]),
1624 psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih,
1625 IPL_NET, qwx_ext_intr, irq_grp, psc->sc_ivname[irq_idx]);
1626 if (psc->sc_ih[irq_idx] == NULL) {
1642 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1673 snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]),
1675 psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih,
1676 IPL_NET, qwx_ce_intr, ce_pipe, psc->sc_ivname[irq_idx]);
1677 if (psc->sc_ih[irq_idx] == NULL) {
1796 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1803 if (window != psc->register_window) {
1807 psc->register_window = window;
2083 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2086 val = pci_conf_read(psc->sc_pc, psc->sc_tag,
2087 psc->sc_msi_off + PCI_MSI_MC);
2094 pci_conf_write(psc->sc_pc, psc->sc_tag, psc->sc_msi_off + PCI_MSI_MC,
2113 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2115 psc->sc_lcsr = pci_conf_read(psc->sc_pc, psc->sc_tag,
2116 psc->sc_cap_off + PCI_PCIE_LCSR);
2119 (uint16_t)psc->sc_lcsr, (psc->sc_lcsr & PCI_PCIE_LCSR_ASPM_L0S),
2120 (psc->sc_lcsr & PCI_PCIE_LCSR_ASPM_L1));
2123 pci_conf_write(psc->sc_pc, psc->sc_tag, psc->sc_cap_off + PCI_PCIE_LCSR,
2124 psc->sc_lcsr & ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1));
2126 psc->sc_flags |= ATH11K_PCI_ASPM_RESTORE;
2132 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2134 if (psc->sc_flags & ATH11K_PCI_ASPM_RESTORE) {
2135 pci_conf_write(psc->sc_pc, psc->sc_tag,
2136 psc->sc_cap_off + PCI_PCIE_LCSR, psc->sc_lcsr);
2137 psc->sc_flags &= ~ATH11K_PCI_ASPM_RESTORE;
2144 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2147 psc->register_window = 0;
2159 error = qwx_mhi_start(psc);
2387 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2393 qwx_mhi_ring_doorbell(sc, psc->wake_db, 1);
2399 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2401 qwx_mhi_ring_doorbell(sc, psc->wake_db, 0);
2407 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2409 return (psc->mhi_state == MHI_STATE_M0); /* TODO other states? */
2413 qwx_mhi_init_xfer_rings(struct qwx_pci_softc *psc)
2415 struct qwx_softc *sc = &psc->sc_sc;
2421 cbase = (struct qwx_mhi_chan_ctxt *)QWX_DMA_KVA(psc->chan_ctxt);
2422 for (i = 0; i < psc->max_chan; i++) {
2436 for (i = 0; i < nitems(psc->xfer_rings); i++) {
2437 ring = &psc->xfer_rings[i];
2438 KASSERT(ring->mhi_chan_id < psc->max_chan);
2445 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0,
2446 QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
2450 qwx_mhi_init_event_rings(struct qwx_pci_softc *psc)
2452 struct qwx_softc *sc = &psc->sc_sc;
2459 c = (struct qwx_mhi_event_ctxt *)QWX_DMA_KVA(psc->event_ctxt);
2460 for (i = 0; i < nitems(psc->event_rings); i++, c++) {
2461 ring = &psc->event_rings[i];
2486 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
2487 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
2491 qwx_mhi_init_cmd_ring(struct qwx_pci_softc *psc)
2493 struct qwx_softc *sc = &psc->sc_sc;
2494 struct qwx_pci_cmd_ring *ring = &psc->cmd_ring;
2503 c = (struct qwx_mhi_cmd_ctxt *)QWX_DMA_KVA(psc->cmd_ctxt);
2509 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0,
2510 QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_PREWRITE);
2514 qwx_mhi_init_dev_ctxt(struct qwx_pci_softc *psc)
2516 qwx_mhi_init_xfer_rings(psc);
2517 qwx_mhi_init_event_rings(psc);
2518 qwx_mhi_init_cmd_ring(psc);
2537 qwx_mhi_cmd_ring_submit(struct qwx_pci_softc *psc,
2540 struct qwx_softc *sc = &psc->sc_sc;
2552 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0,
2553 QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_POSTREAD);
2555 c = (struct qwx_mhi_cmd_ctxt *)QWX_DMA_KVA(psc->cmd_ctxt);
2558 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0,
2559 QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_PREWRITE);
2567 qwx_mhi_send_cmd(struct qwx_pci_softc *psc, uint32_t cmd, uint32_t chan)
2569 struct qwx_softc *sc = &psc->sc_sc;
2570 struct qwx_pci_cmd_ring *ring = &psc->cmd_ring;
2588 return qwx_mhi_cmd_ring_submit(psc, ring);
2625 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2632 ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND];
2696 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0,
2697 QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
2704 qwx_mhi_start_channel(struct qwx_pci_softc *psc,
2707 struct qwx_softc *sc = &psc->sc_sc;
2731 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0,
2732 QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
2735 if (qwx_mhi_send_cmd(psc, MHI_CMD_START_CHAN, ring->mhi_chan_id))
2788 qwx_mhi_start_channels(struct qwx_pci_softc *psc)
2793 qwx_mhi_device_wake(&psc->sc_sc);
2795 ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND];
2796 if (qwx_mhi_start_channel(psc, ring)) {
2801 ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND];
2802 if (qwx_mhi_start_channel(psc, ring))
2805 qwx_mhi_device_zzz(&psc->sc_sc);
2810 qwx_mhi_start(struct qwx_pci_softc *psc)
2812 struct qwx_softc *sc = &psc->sc_sc;
2817 qwx_mhi_init_dev_ctxt(psc);
2819 psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET);
2820 DNPRINTF(QWX_D_MHI, "%s: BHI offset 0x%x\n", __func__, psc->bhi_off);
2822 psc->bhie_off = qwx_pci_read(sc, MHI_BHIE_OFFSET);
2823 DNPRINTF(QWX_D_MHI, "%s: BHIE offset 0x%x\n", __func__, psc->bhie_off);
2829 qwx_pci_write(sc, psc->bhie_off + off, 0x0);
2831 qwx_rddm_prepare(psc);
2834 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_INTVEC, 0x00);
2840 ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
2870 psc->bhi_ee = ee;
2871 psc->mhi_state = state;
2880 ret = qwx_mhi_fw_load_handler(psc);
2886 qwx_mhi_start_channels(psc);
2889 ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
2909 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2930 if (psc->bhi_off == 0)
2931 psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET);
2934 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_INTVEC, 0x00);
2974 qwx_mhi_fw_load_handler(struct qwx_pci_softc *psc)
2976 struct qwx_softc *sc = &psc->sc_sc;
3010 ret = qwx_mhi_fw_load_bhi(psc, data, MHI_DMA_VEC_CHUNK_SIZE);
3018 ret = qwx_mhi_fw_load_bhie(psc, data, len);
3025 while (psc->bhi_ee < MHI_EE_AMSS) {
3026 ret = tsleep_nsec(&psc->bhi_ee, 0, "qwxamss",
3094 qwx_mhi_ready_state_transition(struct qwx_pci_softc *psc)
3096 struct qwx_softc *sc = &psc->sc_sc;
3108 qwx_mhi_init_mmio(psc);
3111 for (i = 0; i < nitems(psc->event_rings); i++) {
3112 struct qwx_pci_event_ring *ring = &psc->event_rings[i];
3124 qwx_mhi_mission_mode_state_transition(struct qwx_pci_softc *psc)
3126 struct qwx_softc *sc = &psc->sc_sc;
3132 for (i = 0; i < nitems(psc->event_rings); i++) {
3133 struct qwx_pci_event_ring *ring = &psc->event_rings[i];
3143 qwx_mhi_low_power_mode_state_transition(struct qwx_pci_softc *psc)
3145 struct qwx_softc *sc = &psc->sc_sc;
3167 qwx_mhi_init_mmio(struct qwx_pci_softc *psc)
3169 struct qwx_softc *sc = &psc->sc_sc;
3177 psc->wake_db = reg + 8 * MHI_DEV_WAKE_DB;
3180 for (i = 0; i < nitems(psc->xfer_rings); i++) {
3181 struct qwx_pci_xfer_ring *ring = &psc->xfer_rings[i];
3187 for (i = 0; i < nitems(psc->event_rings); i++) {
3188 struct qwx_pci_event_ring *ring = &psc->event_rings[i];
3192 paddr = QWX_DMA_DVA(psc->chan_ctxt);
3196 paddr = QWX_DMA_DVA(psc->event_ctxt);
3200 paddr = QWX_DMA_DVA(psc->cmd_ctxt);
3221 qwx_mhi_fw_load_bhi(struct qwx_pci_softc *psc, uint8_t *data, size_t len)
3223 struct qwx_softc *sc = &psc->sc_sc;
3239 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_STATUS, 0);
3243 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGADDR_HIGH, paddr >> 32);
3244 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGADDR_LOW,
3246 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGSIZE, len);
3252 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGTXDB, seq);
3256 while (status != MHI_BHI_STATUS_SUCCESS && psc->bhi_ee < MHI_EE_SBL) {
3257 ret = tsleep_nsec(&psc->bhi_ee, 0, "qwxbhi", SEC_TO_NSEC(5));
3260 reg = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_STATUS);
3266 reg = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_STATUS);
3269 __func__, status, psc->bhi_ee);
3277 qwx_mhi_fw_load_bhie(struct qwx_pci_softc *psc, uint8_t *data, size_t len)
3279 struct qwx_softc *sc = &psc->sc_sc;
3294 if (psc->amss_data == NULL || QWX_DMA_LEN(psc->amss_data) < len) {
3295 if (psc->amss_data)
3296 qwx_dmamem_free(sc->sc_dmat, psc->amss_data);
3297 psc->amss_data = qwx_dmamem_alloc(sc->sc_dmat, len, 0);
3298 if (psc->amss_data == NULL) {
3306 if (psc->amss_vec == NULL || QWX_DMA_LEN(psc->amss_vec) < vec_size) {
3307 if (psc->amss_vec)
3308 qwx_dmamem_free(sc->sc_dmat, psc->amss_vec);
3309 psc->amss_vec = qwx_dmamem_alloc(sc->sc_dmat, vec_size, 0);
3310 if (psc->amss_vec == NULL) {
3313 qwx_dmamem_free(sc->sc_dmat, psc->amss_data);
3314 psc->amss_data = NULL;
3320 memcpy(QWX_DMA_KVA(psc->amss_data), data, len);
3323 paddr = QWX_DMA_DVA(psc->amss_data);
3324 vec = QWX_DMA_KVA(psc->amss_vec);
3337 paddr = QWX_DMA_DVA(psc->amss_vec);
3338 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECADDR_HIGH_OFFS,
3340 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECADDR_LOW_OFFS,
3342 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECSIZE_OFFS, vec_size);
3348 reg = qwx_pci_read(sc, psc->bhie_off + MHI_BHIE_TXVECDB_OFFS);
3351 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECDB_OFFS, reg);
3356 ret = tsleep_nsec(&psc->bhie_off, 0, "qwxbhie",
3361 psc->bhie_off + MHI_BHIE_TXVECSTATUS_OFFS);
3376 qwx_rddm_prepare(struct qwx_pci_softc *psc)
3378 struct qwx_softc *sc = &psc->sc_sc;
3427 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECADDR_HIGH_OFFS,
3429 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECADDR_LOW_OFFS,
3431 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECSIZE_OFFS, vec_size);
3438 reg = qwx_pci_read(sc, psc->bhie_off + MHI_BHIE_RXVECDB_OFFS);
3441 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECDB_OFFS, reg);
3443 psc->rddm_data = data_adm;
3444 psc->rddm_vec = vec_adm;
3451 struct qwx_pci_softc *psc = arg;
3452 struct qwx_softc *sc = &psc->sc_sc;
3465 if (psc->rddm_data == NULL) {
3474 psc->bhie_off + MHI_BHIE_RXVECSTATUS_OFFS);
3487 rddm = QWX_DMA_KVA(psc->rddm_data);
3495 tsleep_nsec(&psc->rddm_data, 0, "qwxrddm", SEC_TO_NSEC(30));
3536 qwx_dmamem_free(sc->sc_dmat, psc->rddm_data);
3537 qwx_dmamem_free(sc->sc_dmat, psc->rddm_vec);
3538 psc->rddm_data = NULL;
3539 psc->rddm_vec = NULL;
3561 qwx_mhi_state_change(struct qwx_pci_softc *psc, int ee, int mhi_state)
3563 struct qwx_softc *sc = &psc->sc_sc;
3564 uint32_t old_ee = psc->bhi_ee;
3565 uint32_t old_mhi_state = psc->mhi_state;
3567 if (ee != -1 && psc->bhi_ee != ee) {
3572 psc->bhi_ee = ee;
3575 psc->bhi_ee = ee;
3582 psc->bhi_ee = ee;
3584 wakeup(&psc->bhie_off);
3589 psc->bhi_ee = ee;
3598 if (mhi_state != -1 && psc->mhi_state != mhi_state) {
3605 psc->mhi_state = mhi_state;
3610 psc->mhi_state = mhi_state;
3611 qwx_mhi_ready_state_transition(psc);
3616 psc->mhi_state = mhi_state;
3617 qwx_mhi_mission_mode_state_transition(psc);
3622 psc->mhi_state = mhi_state;
3623 qwx_mhi_low_power_mode_state_transition(psc);
3629 psc->mhi_state = mhi_state;
3638 if (old_ee != psc->bhi_ee)
3639 wakeup(&psc->bhi_ee);
3640 if (old_mhi_state != psc->mhi_state)
3641 wakeup(&psc->mhi_state);
3645 qwx_pci_intr_ctrl_event_mhi(struct qwx_pci_softc *psc, uint32_t mhi_state)
3648 psc->mhi_state, mhi_state);
3650 if (psc->mhi_state != mhi_state)
3651 qwx_mhi_state_change(psc, -1, mhi_state);
3655 qwx_pci_intr_ctrl_event_ee(struct qwx_pci_softc *psc, uint32_t ee)
3658 psc->bhi_ee, ee);
3660 if (psc->bhi_ee != ee)
3661 qwx_mhi_state_change(psc, ee, -1);
3665 qwx_pci_intr_ctrl_event_cmd_complete(struct qwx_pci_softc *psc,
3668 struct qwx_pci_cmd_ring *cmd_ring = &psc->cmd_ring;
3682 for (i = 0; i < nitems(psc->xfer_rings); i++) {
3683 if (psc->xfer_rings[i].mhi_chan_id == chid) {
3684 xfer_ring = &psc->xfer_rings[i];
3704 qwx_pci_intr_ctrl_event(struct qwx_pci_softc *psc, struct qwx_pci_event_ring *ring)
3706 struct qwx_softc *sc = &psc->sc_sc;
3721 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
3722 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_POSTREAD);
3758 qwx_pci_intr_ctrl_event_mhi(psc, code);
3761 qwx_pci_intr_ctrl_event_ee(psc, code);
3764 qwx_pci_intr_ctrl_event_cmd_complete(psc,
3786 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
3787 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
3794 qwx_pci_intr_data_event_tx(struct qwx_pci_softc *psc, struct qwx_mhi_ring_element *e)
3796 struct qwx_softc *sc = &psc->sc_sc;
3812 for (i = 0; i < nitems(psc->xfer_rings); i++) {
3813 ring = &psc->xfer_rings[i];
3817 if (i == nitems(psc->xfer_rings)) {
3913 QWX_DMA_MAP(psc->chan_ctxt), 0,
3914 QWX_DMA_LEN(psc->chan_ctxt),
3927 qwx_pci_intr_data_event(struct qwx_pci_softc *psc, struct qwx_pci_event_ring *ring)
3929 struct qwx_softc *sc = &psc->sc_sc;
3944 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
3945 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_POSTREAD);
3979 qwx_pci_intr_data_event_tx(psc, e);
4000 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
4001 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
4010 struct qwx_pci_softc *psc = arg;
4012 if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
4021 struct qwx_pci_softc *psc = arg;
4023 if (qwx_pci_intr_data_event(psc, &psc->event_rings[1]))
4032 struct qwx_pci_softc *psc = arg;
4033 struct qwx_softc *sc = (void *)psc;
4041 if (psc->bhi_off == 0)
4042 psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET);
4044 ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
4051 sc->sc_dev.dv_xname, psc->bhi_ee, ee, psc->mhi_state, state);
4055 psc->bhi_ee = ee;
4057 if (!psc->rddm_triggered) {
4059 task_add(systq, &psc->rddm_task);
4060 psc->rddm_triggered = 1;
4074 } else if (psc->bhi_ee == MHI_EE_PBL || psc->bhi_ee == MHI_EE_SBL) {
4077 if (psc->bhi_ee != ee)
4080 if (psc->mhi_state != state)
4084 qwx_mhi_state_change(psc, new_ee, new_mhi_state);
4092 if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
4094 if (qwx_pci_intr_data_event(psc, &psc->event_rings[1]))