1 /* $OpenBSD: if_qwx_pci.c,v 1.1 2023/12/28 17:36:29 stsp Exp $ */ 2 3 /* 4 * Copyright 2023 Stefan Sperling <stsp@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. 21 * Copyright (c) 2018-2021 The Linux Foundation. 22 * All rights reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted (subject to the limitations in the disclaimer 26 * below) provided that the following conditions are met: 27 * 28 * * Redistributions of source code must retain the above copyright notice, 29 * this list of conditions and the following disclaimer. 30 * 31 * * Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 35 * * Neither the name of [Owner Organization] nor the names of its 36 * contributors may be used to endorse or promote products derived from 37 * this software without specific prior written permission. 38 * 39 * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY 40 * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND 41 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT 42 * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 43 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER 44 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 45 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 46 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 47 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 48 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 49 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 50 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 51 */ 52 53 #include <sys/param.h> 54 #include <sys/sockio.h> 55 #include <sys/mbuf.h> 56 #include <sys/kernel.h> 57 #include <sys/socket.h> 58 #include <sys/systm.h> 59 #include <sys/malloc.h> 60 #include <sys/conf.h> 61 #include <sys/device.h> 62 #include <sys/endian.h> 63 64 #include <machine/bus.h> 65 #include <machine/intr.h> 66 67 #include <net/if.h> 68 #include <net/if_media.h> 69 70 #include <netinet/in.h> 71 #include <netinet/if_ether.h> 72 73 #include <net80211/ieee80211_var.h> 74 #include <net80211/ieee80211_radiotap.h> 75 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcivar.h> 78 #include <dev/pci/pcidevs.h> 79 80 /* XXX linux porting goo */ 81 #ifdef __LP64__ 82 #define BITS_PER_LONG 64 83 #else 84 #define BITS_PER_LONG 32 85 #endif 86 #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l))) 87 #define __bf_shf(x) (__builtin_ffsll(x) - 1) 88 #define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m))) 89 #define BIT(x) (1UL << (x)) 90 #define test_bit(i, a) ((a) & (1 << (i))) 91 #define clear_bit(i, a) ((a)) &= ~(1 << (i)) 92 #define set_bit(i, a) ((a)) |= (1 << (i)) 93 94 /* #define QWX_DEBUG */ 95 96 #include <dev/ic/qwxreg.h> 97 #include <dev/ic/qwxvar.h> 98 99 /* Headers needed for RDDM dump */ 100 #include <sys/namei.h> 101 #include <sys/pledge.h> 102 #include <sys/vnode.h> 103 #include <sys/fcntl.h> 104 #include <sys/stat.h> 105 #include <sys/proc.h> 106 107 #define ATH11K_PCI_IRQ_CE0_OFFSET 3 108 #define ATH11K_PCI_IRQ_DP_OFFSET 14 109 110 #define ATH11K_PCI_CE_WAKE_IRQ 2 111 112 #define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000 113 #define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c 114 #define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19) 115 #define ATH11K_PCI_WINDOW_START 0x80000 116 #define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0) 117 118 /* BAR0 + 4k is always accessible, and no need to force wakeup. */ 119 #define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0 /* 4K - 32 = 0xFE0 */ 120 121 #define TCSR_SOC_HW_VERSION 0x0224 122 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) 123 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0) 124 125 /* 126 * pci.h 127 */ 128 #define PCIE_SOC_GLOBAL_RESET 0x3008 129 #define PCIE_SOC_GLOBAL_RESET_V 1 130 131 #define WLAON_WARM_SW_ENTRY 0x1f80504 132 #define WLAON_SOC_RESET_CAUSE_REG 0x01f8060c 133 134 #define PCIE_Q6_COOKIE_ADDR 0x01f80500 135 #define PCIE_Q6_COOKIE_DATA 0xc0000000 136 137 /* register to wake the UMAC from power collapse */ 138 #define PCIE_SCRATCH_0_SOC_PCIE_REG 0x4040 139 140 /* register used for handshake mechanism to validate UMAC is awake */ 141 #define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004 142 143 #define PCIE_PCIE_PARF_LTSSM 0x1e081b0 144 #define PARM_LTSSM_VALUE 0x111 145 146 #define GCC_GCC_PCIE_HOT_RST 0x1e402bc 147 #define GCC_GCC_PCIE_HOT_RST_VAL 0x10 148 149 #define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228 150 #define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2 151 #define PCIE_INT_CLEAR_ALL 0xffffffff 152 153 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(sc) \ 154 (sc->hw_params.regs->pcie_qserdes_sysclk_en_sel) 155 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10 156 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff 157 #define PCIE_PCS_OSC_DTCT_CONFIG1_REG(sc) \ 158 (sc->hw_params.regs->pcie_pcs_osc_dtct_config_base) 159 #define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02 160 #define PCIE_PCS_OSC_DTCT_CONFIG2_REG(sc) \ 161 (sc->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4) 162 #define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52 163 #define PCIE_PCS_OSC_DTCT_CONFIG4_REG(sc) \ 164 (sc->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc) 165 #define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff 166 #define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff 167 168 #define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c 169 #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 170 171 /* 172 * mhi.h 173 */ 174 #define PCIE_TXVECDB 0x360 175 #define PCIE_TXVECSTATUS 0x368 176 #define PCIE_RXVECDB 0x394 177 #define PCIE_RXVECSTATUS 0x39C 178 179 #define MHI_CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) 180 #define MHI_CHAN_CTX_CHSTATE_DISABLED 0 181 #define MHI_CHAN_CTX_CHSTATE_ENABLED 1 182 #define MHI_CHAN_CTX_CHSTATE_RUNNING 2 183 #define MHI_CHAN_CTX_CHSTATE_SUSPENDED 3 184 #define MHI_CHAN_CTX_CHSTATE_STOP 4 185 #define MHI_CHAN_CTX_CHSTATE_ERROR 5 186 #define MHI_CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) 187 #define MHI_CHAN_CTX_BRSTMODE_SHFT 8 188 #define MHI_CHAN_CTX_BRSTMODE_DISABLE 2 189 #define MHI_CHAN_CTX_BRSTMODE_ENABLE 3 190 #define MHI_CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) 191 #define MHI_CHAN_CTX_RESERVED_MASK GENMASK(31, 16) 192 193 #define QWX_MHI_CONFIG_QCA6390_MAX_CHANNELS 128 194 #define QWX_MHI_CONFIG_QCA6390_TIMEOUT_MS 2000 195 #define QWX_MHI_CONFIG_QCA9074_MAX_CHANNELS 30 196 197 #define MHI_CHAN_TYPE_INVALID 0 198 #define MHI_CHAN_TYPE_OUTBOUND 1 /* to device */ 199 #define MHI_CHAN_TYPE_INBOUND 2 /* from device */ 200 #define MHI_CHAN_TYPE_INBOUND_COALESCED 3 201 202 #define MHI_EV_CTX_RESERVED_MASK GENMASK(7, 0) 203 #define MHI_EV_CTX_INTMODC_MASK GENMASK(15, 8) 204 #define MHI_EV_CTX_INTMODT_MASK GENMASK(31, 16) 205 #define MHI_EV_CTX_INTMODT_SHFT 16 206 207 #define MHI_ER_TYPE_INVALID 0 208 #define MHI_ER_TYPE_VALID 1 209 210 #define MHI_ER_DATA 0 211 #define MHI_ER_CTRL 1 212 213 #define MHI_CH_STATE_DISABLED 0 214 #define MHI_CH_STATE_ENABLED 1 215 #define MHI_CH_STATE_RUNNING 2 216 #define MHI_CH_STATE_SUSPENDED 3 217 #define MHI_CH_STATE_STOP 4 218 #define MHI_CH_STATE_ERROR 5 219 220 #define QWX_NUM_EVENT_CTX 2 221 222 /* Event context. Shared with device. */ 223 struct qwx_mhi_event_ctxt { 224 uint32_t intmod; 225 uint32_t ertype; 226 uint32_t msivec; 227 228 uint64_t rbase; 229 uint64_t rlen; 230 uint64_t rp; 231 uint64_t wp; 232 } __packed; 233 234 /* Channel context. Shared with device. */ 235 struct qwx_mhi_chan_ctxt { 236 uint32_t chcfg; 237 uint32_t chtype; 238 uint32_t erindex; 239 240 uint64_t rbase; 241 uint64_t rlen; 242 uint64_t rp; 243 uint64_t wp; 244 } __packed; 245 246 /* Command context. Shared with device. */ 247 struct qwx_mhi_cmd_ctxt { 248 uint32_t reserved0; 249 uint32_t reserved1; 250 uint32_t reserved2; 251 252 uint64_t rbase; 253 uint64_t rlen; 254 uint64_t rp; 255 uint64_t wp; 256 } __packed; 257 258 struct qwx_mhi_ring_element { 259 uint64_t ptr; 260 uint32_t dword[2]; 261 }; 262 263 struct qwx_xfer_data { 264 bus_dmamap_t map; 265 struct mbuf *m; 266 }; 267 268 #define QWX_PCI_XFER_MAX_DATA_SIZE 0xffff 269 #define QWX_PCI_XFER_RING_MAX_ELEMENTS 64 270 271 struct qwx_pci_xfer_ring { 272 struct qwx_dmamem *dmamem; 273 bus_size_t size; 274 uint32_t mhi_chan_id; 275 uint32_t mhi_chan_state; 276 uint32_t mhi_chan_direction; 277 uint32_t mhi_chan_event_ring_index; 278 uint32_t db_addr; 279 uint32_t cmd_status; 280 int num_elements; 281 int queued; 282 struct qwx_xfer_data data[QWX_PCI_XFER_RING_MAX_ELEMENTS]; 283 uint64_t rp; 284 uint64_t wp; 285 struct qwx_mhi_chan_ctxt *chan_ctxt; 286 }; 287 288 289 #define QWX_PCI_EVENT_RING_MAX_ELEMENTS 256 290 291 struct qwx_pci_event_ring { 292 struct qwx_dmamem *dmamem; 293 bus_size_t size; 294 uint32_t mhi_er_type; 295 uint32_t mhi_er_irq; 296 uint32_t mhi_er_irq_moderation_ms; 297 uint32_t db_addr; 298 int num_elements; 299 uint64_t rp; 300 uint64_t wp; 301 struct qwx_mhi_event_ctxt *event_ctxt; 302 }; 303 304 struct qwx_cmd_data { 305 bus_dmamap_t map; 306 struct mbuf *m; 307 }; 308 309 #define QWX_PCI_CMD_RING_MAX_ELEMENTS 128 310 311 struct qwx_pci_cmd_ring { 312 struct qwx_dmamem *dmamem; 313 bus_size_t size; 314 uint64_t rp; 315 uint64_t wp; 316 int num_elements; 317 int queued; 318 }; 319 320 struct qwx_pci_ops; 321 struct qwx_msi_config; 322 323 struct qwx_mhi_newstate { 324 struct { 325 int mhi_state; 326 int ee; 327 } queue[4]; 328 int cur; 329 int tail; 330 int queued; 331 }; 332 333 struct qwx_pci_softc { 334 struct qwx_softc sc_sc; 335 pci_chipset_tag_t sc_pc; 336 pcitag_t sc_tag; 337 int sc_cap_off; 338 int sc_msi_off; 339 pcireg_t sc_msi_cap; 340 void *sc_ih; 341 bus_space_tag_t sc_st; 342 bus_space_handle_t sc_sh; 343 bus_addr_t sc_map; 344 bus_size_t sc_mapsize; 345 346 pcireg_t sc_lcsr; 347 uint32_t sc_flags; 348 #define ATH11K_PCI_ASPM_RESTORE 1 349 350 uint32_t register_window; 351 const struct qwx_pci_ops *sc_pci_ops; 352 353 uint32_t bhi_off; 354 uint32_t bhi_ee; 355 uint32_t bhie_off; 356 uint32_t mhi_state; 357 uint32_t max_chan; 358 359 uint64_t wake_db; 360 361 struct qwx_mhi_newstate mhi_newstate; 362 struct task mhi_newstate_task; 363 364 /* 365 * DMA memory for AMMS.bin firmware image. 366 * This memory must remain available to the device until 367 * the device is powered down. 368 */ 369 struct qwx_dmamem *amss_data; 370 struct qwx_dmamem *amss_vec; 371 372 struct qwx_dmamem *rddm_vec; 373 struct qwx_dmamem *rddm_data; 374 int rddm_triggered; 375 struct task rddm_task; 376 #define QWX_RDDM_DUMP_SIZE 0x420000 377 378 struct qwx_dmamem *chan_ctxt; 379 struct qwx_dmamem *event_ctxt; 380 struct qwx_dmamem *cmd_ctxt; 381 382 383 struct qwx_pci_xfer_ring xfer_rings[4]; 384 #define QWX_PCI_XFER_RING_LOOPBACK_OUTBOUND 0 385 #define QWX_PCI_XFER_RING_LOOPBACK_INBOUND 1 386 #define QWX_PCI_XFER_RING_IPCR_OUTBOUND 2 387 #define QWX_PCI_XFER_RING_IPCR_INBOUND 3 388 struct qwx_pci_event_ring event_rings[QWX_NUM_EVENT_CTX]; 389 struct qwx_pci_cmd_ring cmd_ring; 390 }; 391 392 int qwx_pci_match(struct device *, void *, void *); 393 void qwx_pci_attach(struct device *, struct device *, void *); 394 int qwx_pci_detach(struct device *, int); 395 void qwx_pci_attach_hook(struct device *); 396 int qwx_pci_activate(struct device *, int); 397 void qwx_pci_free_xfer_rings(struct qwx_pci_softc *); 398 int qwx_pci_alloc_xfer_ring(struct qwx_softc *, struct qwx_pci_xfer_ring *, 399 uint32_t, uint32_t, uint32_t, size_t); 400 int qwx_pci_alloc_xfer_rings_qca6390(struct qwx_pci_softc *); 401 int qwx_pci_alloc_xfer_rings_qcn9074(struct qwx_pci_softc *); 402 void qwx_pci_free_event_rings(struct qwx_pci_softc *); 403 int qwx_pci_alloc_event_ring(struct qwx_softc *, 404 struct qwx_pci_event_ring *, uint32_t, uint32_t, uint32_t, size_t); 405 int qwx_pci_alloc_event_rings(struct qwx_pci_softc *); 406 void qwx_pci_free_cmd_ring(struct qwx_pci_softc *); 407 int qwx_pci_init_cmd_ring(struct qwx_softc *, struct qwx_pci_cmd_ring *); 408 uint32_t qwx_pci_read(struct qwx_softc *, uint32_t); 409 void qwx_pci_write(struct qwx_softc *, uint32_t, uint32_t); 410 411 void qwx_pci_read_hw_version(struct qwx_softc *, uint32_t *, uint32_t *); 412 uint32_t qwx_pcic_read32(struct qwx_softc *, uint32_t); 413 void qwx_pcic_write32(struct qwx_softc *, uint32_t, uint32_t); 414 415 void qwx_pcic_ext_irq_enable(struct qwx_softc *); 416 void qwx_pcic_ext_irq_disable(struct qwx_softc *); 417 418 int qwx_pci_start(struct qwx_softc *); 419 void qwx_pci_stop(struct qwx_softc *); 420 void qwx_pci_aspm_disable(struct qwx_softc *); 421 void qwx_pci_aspm_restore(struct qwx_softc *); 422 int qwx_pci_power_up(struct qwx_softc *); 423 void qwx_pci_power_down(struct qwx_softc *); 424 425 int qwx_pci_bus_wake_up(struct qwx_softc *); 426 void qwx_pci_bus_release(struct qwx_softc *); 427 void qwx_pci_window_write32(struct qwx_softc *, uint32_t, uint32_t); 428 uint32_t qwx_pci_window_read32(struct qwx_softc *, uint32_t); 429 430 int qwx_mhi_register(struct qwx_softc *); 431 void qwx_mhi_unregister(struct qwx_softc *); 432 void qwx_mhi_ring_doorbell(struct qwx_softc *sc, uint64_t, uint64_t); 433 void qwx_mhi_device_wake(struct qwx_softc *); 434 void qwx_mhi_device_zzz(struct qwx_softc *); 435 int qwx_mhi_wake_db_clear_valid(struct qwx_softc *); 436 void qwx_mhi_init_xfer_rings(struct qwx_pci_softc *); 437 void qwx_mhi_init_event_rings(struct qwx_pci_softc *); 438 void qwx_mhi_init_cmd_ring(struct qwx_pci_softc *); 439 void qwx_mhi_init_dev_ctxt(struct qwx_pci_softc *); 440 int qwx_mhi_send_cmd(struct qwx_pci_softc *psc, uint32_t, uint32_t); 441 void * qwx_pci_xfer_ring_get_elem(struct qwx_pci_xfer_ring *, uint64_t); 442 struct qwx_xfer_data *qwx_pci_xfer_ring_get_data(struct qwx_pci_xfer_ring *, 443 uint64_t); 444 int qwx_mhi_submit_xfer(struct qwx_softc *sc, struct mbuf *m); 445 int qwx_mhi_start_channel(struct qwx_pci_softc *, 446 struct qwx_pci_xfer_ring *); 447 int qwx_mhi_start_channels(struct qwx_pci_softc *); 448 int qwx_mhi_start(struct qwx_pci_softc *); 449 void qwx_mhi_stop(struct qwx_softc *); 450 int qwx_mhi_reset_device(struct qwx_softc *, int); 451 void qwx_mhi_clear_vector(struct qwx_softc *); 452 int qwx_mhi_fw_load_handler(struct qwx_pci_softc *); 453 int qwx_mhi_await_device_reset(struct qwx_softc *); 454 int qwx_mhi_await_device_ready(struct qwx_softc *); 455 void qwx_mhi_ready_state_transition(struct qwx_pci_softc *); 456 void qwx_mhi_ee_amss_state_transition(struct qwx_pci_softc *); 457 void qwx_mhi_mission_mode_state_transition(struct qwx_pci_softc *); 458 void qwx_mhi_set_state(struct qwx_softc *, uint32_t); 459 void qwx_mhi_init_mmio(struct qwx_pci_softc *); 460 int qwx_mhi_fw_load_bhi(struct qwx_pci_softc *, uint8_t *, size_t); 461 int qwx_mhi_fw_load_bhie(struct qwx_pci_softc *, uint8_t *, size_t); 462 void qwx_rddm_prepare(struct qwx_pci_softc *); 463 void qwx_rddm_task(void *); 464 void * qwx_pci_event_ring_get_elem(struct qwx_pci_event_ring *, uint64_t); 465 void qwx_mhi_queue_state_change(struct qwx_pci_softc *, int, int); 466 void qwx_mhi_state_change(void *); 467 void qwx_pci_intr_ctrl_event_mhi(struct qwx_pci_softc *, uint32_t); 468 void qwx_pci_intr_ctrl_event_ee(struct qwx_pci_softc *, uint32_t); 469 void qwx_pci_intr_ctrl_event_cmd_complete(struct qwx_pci_softc *, 470 uint64_t, uint32_t); 471 int qwx_pci_intr_ctrl_event(struct qwx_pci_softc *, 472 struct qwx_pci_event_ring *); 473 void qwx_pci_intr_data_event_tx(struct qwx_pci_softc *, 474 struct qwx_mhi_ring_element *); 475 int qwx_pci_intr_data_event(struct qwx_pci_softc *, 476 struct qwx_pci_event_ring *); 477 int qwx_pci_intr(void *); 478 479 struct qwx_pci_ops { 480 int (*wakeup)(struct qwx_softc *); 481 void (*release)(struct qwx_softc *); 482 int (*get_msi_irq)(struct qwx_softc *, unsigned int); 483 void (*window_write32)(struct qwx_softc *, uint32_t, uint32_t); 484 uint32_t (*window_read32)(struct qwx_softc *, uint32_t); 485 int (*alloc_xfer_rings)(struct qwx_pci_softc *); 486 }; 487 488 489 static const struct qwx_pci_ops qwx_pci_ops_qca6390 = { 490 .wakeup = qwx_pci_bus_wake_up, 491 .release = qwx_pci_bus_release, 492 #if notyet 493 .get_msi_irq = qwx_pci_get_msi_irq, 494 #endif 495 .window_write32 = qwx_pci_window_write32, 496 .window_read32 = qwx_pci_window_read32, 497 .alloc_xfer_rings = qwx_pci_alloc_xfer_rings_qca6390, 498 }; 499 500 static const struct qwx_pci_ops qwx_pci_ops_qcn9074 = { 501 .wakeup = NULL, 502 .release = NULL, 503 #if notyet 504 .get_msi_irq = qwx_pci_get_msi_irq, 505 #endif 506 .window_write32 = qwx_pci_window_write32, 507 .window_read32 = qwx_pci_window_read32, 508 .alloc_xfer_rings = qwx_pci_alloc_xfer_rings_qcn9074, 509 }; 510 511 const struct cfattach qwx_pci_ca = { 512 sizeof(struct qwx_pci_softc), 513 qwx_pci_match, 514 qwx_pci_attach, 515 qwx_pci_detach, 516 qwx_pci_activate 517 }; 518 519 /* XXX pcidev */ 520 #define PCI_PRODUCT_QUALCOMM_QCA6390 0x1101 521 #define PCI_PRODUCT_QUALCOMM_QCN9074 0x1104 522 523 static const struct pci_matchid qwx_pci_devices[] = { 524 #if notyet 525 { PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCA6390 }, 526 { PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCN9074 }, 527 #endif 528 { PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCNFA765 } 529 }; 530 531 int 532 qwx_pci_match(struct device *parent, void *match, void *aux) 533 { 534 return pci_matchbyid(aux, qwx_pci_devices, nitems(qwx_pci_devices)); 535 } 536 537 void 538 qwx_pci_init_qmi_ce_config(struct qwx_softc *sc) 539 { 540 struct qwx_qmi_ce_cfg *cfg = &sc->qmi_ce_cfg; 541 542 qwx_ce_get_shadow_config(sc, &cfg->shadow_reg_v2, 543 &cfg->shadow_reg_v2_len); 544 } 545 546 const struct qwx_msi_config qwx_msi_config_one_msi = { 547 .total_vectors = 1, 548 .total_users = 4, 549 .users = (struct qwx_msi_user[]) { 550 { .name = "MHI", .num_vectors = 1, .base_vector = 0 }, 551 { .name = "CE", .num_vectors = 1, .base_vector = 0 }, 552 { .name = "WAKE", .num_vectors = 1, .base_vector = 0 }, 553 { .name = "DP", .num_vectors = 1, .base_vector = 0 }, 554 }, 555 }; 556 557 int 558 qwx_pci_alloc_msi(struct qwx_softc *sc) 559 { 560 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 561 uint64_t addr; 562 pcireg_t data; 563 564 /* 565 * OpenBSD only supports one MSI vector at present. 566 * Mulitple vectors are only supported with MSI-X. 567 */ 568 569 if (psc->sc_msi_cap & PCI_MSI_MC_C64) { 570 uint64_t addr_hi; 571 pcireg_t addr_lo; 572 573 addr_lo = pci_conf_read(psc->sc_pc, psc->sc_tag, 574 psc->sc_msi_off + PCI_MSI_MA); 575 addr_hi = pci_conf_read(psc->sc_pc, psc->sc_tag, 576 psc->sc_msi_off + PCI_MSI_MAU32); 577 addr = addr_hi << 32 | addr_lo; 578 data = pci_conf_read(psc->sc_pc, psc->sc_tag, 579 psc->sc_msi_off + PCI_MSI_MD64); 580 } else { 581 addr = pci_conf_read(psc->sc_pc, psc->sc_tag, 582 psc->sc_msi_off + PCI_MSI_MA); 583 data = pci_conf_read(psc->sc_pc, psc->sc_tag, 584 psc->sc_msi_off + PCI_MSI_MD32); 585 } 586 587 sc->msi_addr_lo = addr & 0xffffffff; 588 sc->msi_addr_hi = ((uint64_t)addr) >> 32; 589 sc->msi_data_start = data; 590 591 DPRINTF("%s: MSI addr: 0x%llx MSI data: 0x%x\n", sc->sc_dev.dv_xname, 592 addr, data); 593 594 sc->msi_cfg = &qwx_msi_config_one_msi; 595 return 0; 596 } 597 598 int 599 qwx_pcic_map_service_to_pipe(struct qwx_softc *sc, uint16_t service_id, 600 uint8_t *ul_pipe, uint8_t *dl_pipe) 601 { 602 const struct service_to_pipe *entry; 603 int ul_set = 0, dl_set = 0; 604 int i; 605 606 for (i = 0; i < sc->hw_params.svc_to_ce_map_len; i++) { 607 entry = &sc->hw_params.svc_to_ce_map[i]; 608 609 if (le32toh(entry->service_id) != service_id) 610 continue; 611 612 switch (le32toh(entry->pipedir)) { 613 case PIPEDIR_NONE: 614 break; 615 case PIPEDIR_IN: 616 *dl_pipe = le32toh(entry->pipenum); 617 dl_set = 1; 618 break; 619 case PIPEDIR_OUT: 620 *ul_pipe = le32toh(entry->pipenum); 621 ul_set = 1; 622 break; 623 case PIPEDIR_INOUT: 624 *dl_pipe = le32toh(entry->pipenum); 625 *ul_pipe = le32toh(entry->pipenum); 626 dl_set = 1; 627 ul_set = 1; 628 break; 629 } 630 } 631 632 if (!ul_set || !dl_set) { 633 DPRINTF("%s: found no uplink and no downlink\n", __func__); 634 return ENOENT; 635 } 636 637 return 0; 638 } 639 640 int 641 qwx_pcic_get_user_msi_vector(struct qwx_softc *sc, char *user_name, 642 int *num_vectors, uint32_t *user_base_data, uint32_t *base_vector) 643 { 644 const struct qwx_msi_config *msi_config = sc->msi_cfg; 645 int idx; 646 647 for (idx = 0; idx < msi_config->total_users; idx++) { 648 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 649 *num_vectors = msi_config->users[idx].num_vectors; 650 *base_vector = msi_config->users[idx].base_vector; 651 *user_base_data = *base_vector + sc->msi_data_start; 652 653 DPRINTF("%s: MSI assignment %s num_vectors %d " 654 "user_base_data %u base_vector %u\n", __func__, 655 user_name, *num_vectors, *user_base_data, 656 *base_vector); 657 return 0; 658 } 659 } 660 661 DPRINTF("%s: Failed to find MSI assignment for %s\n", 662 sc->sc_dev.dv_xname, user_name); 663 return EINVAL; 664 } 665 666 void 667 qwx_pci_attach(struct device *parent, struct device *self, void *aux) 668 { 669 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)self; 670 struct qwx_softc *sc = &psc->sc_sc; 671 struct ieee80211com *ic = &sc->sc_ic; 672 struct ifnet *ifp = &ic->ic_if; 673 uint32_t soc_hw_version_major, soc_hw_version_minor; 674 const struct qwx_pci_ops *pci_ops; 675 struct pci_attach_args *pa = aux; 676 pci_intr_handle_t ih; 677 pcireg_t memtype, reg; 678 const char *intrstr; 679 int error; 680 pcireg_t sreg; 681 682 sc->sc_dmat = pa->pa_dmat; 683 psc->sc_pc = pa->pa_pc; 684 psc->sc_tag = pa->pa_tag; 685 686 rw_init(&sc->ioctl_rwl, "qwxioctl"); 687 688 sreg = pci_conf_read(psc->sc_pc, psc->sc_tag, PCI_SUBSYS_ID_REG); 689 sc->id.bdf_search = ATH11K_BDF_SEARCH_DEFAULT; 690 sc->id.vendor = PCI_VENDOR(pa->pa_id); 691 sc->id.device = PCI_PRODUCT(pa->pa_id); 692 sc->id.subsystem_vendor = PCI_VENDOR(sreg); 693 sc->id.subsystem_device = PCI_PRODUCT(sreg); 694 695 strlcpy(sc->sc_bus_str, "pci", sizeof(sc->sc_bus_str)); 696 697 sc->ops.read32 = qwx_pcic_read32; 698 sc->ops.write32 = qwx_pcic_write32; 699 sc->ops.start = qwx_pci_start; 700 sc->ops.stop = qwx_pci_stop; 701 sc->ops.power_up = qwx_pci_power_up; 702 sc->ops.power_down = qwx_pci_power_down; 703 sc->ops.submit_xfer = qwx_mhi_submit_xfer; 704 sc->ops.irq_enable = qwx_pcic_ext_irq_enable; 705 sc->ops.irq_disable = qwx_pcic_ext_irq_disable; 706 sc->ops.map_service_to_pipe = qwx_pcic_map_service_to_pipe; 707 sc->ops.get_user_msi_vector = qwx_pcic_get_user_msi_vector; 708 709 if (pci_get_capability(psc->sc_pc, psc->sc_tag, PCI_CAP_PCIEXPRESS, 710 &psc->sc_cap_off, NULL) == 0) { 711 printf(": can't find PCIe capability structure\n"); 712 return; 713 } 714 715 if (pci_get_capability(psc->sc_pc, psc->sc_tag, PCI_CAP_MSI, 716 &psc->sc_msi_off, &psc->sc_msi_cap) == 0) { 717 printf(": can't find MSI capability structure\n"); 718 return; 719 } 720 721 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 722 reg |= PCI_COMMAND_MASTER_ENABLE; 723 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg); 724 725 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START); 726 if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0, 727 &psc->sc_st, &psc->sc_sh, &psc->sc_map, &psc->sc_mapsize, 0)) { 728 printf(": can't map mem space\n"); 729 return; 730 } 731 732 sc->mem = psc->sc_map; 733 734 if (pci_intr_map_msi(pa, &ih)) { 735 printf(": can't map interrupt\n"); 736 return; 737 } 738 739 intrstr = pci_intr_string(psc->sc_pc, ih); 740 psc->sc_ih = pci_intr_establish(psc->sc_pc, ih, IPL_NET, 741 qwx_pci_intr, psc, sc->sc_dev.dv_xname); 742 if (psc->sc_ih == NULL) { 743 printf(": can't establish interrupt"); 744 if (intrstr != NULL) 745 printf(" at %s", intrstr); 746 printf("\n"); 747 return; 748 } 749 printf(": %s\n", intrstr); 750 751 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 752 753 switch (PCI_PRODUCT(pa->pa_id)) { 754 case PCI_PRODUCT_QUALCOMM_QCA6390: 755 qwx_pci_read_hw_version(sc, &soc_hw_version_major, 756 &soc_hw_version_minor); 757 switch (soc_hw_version_major) { 758 case 2: 759 sc->sc_hw_rev = ATH11K_HW_QCA6390_HW20; 760 break; 761 default: 762 printf(": unsupported QCA6390 SOC version: %d %d\n", 763 soc_hw_version_major, soc_hw_version_minor); 764 return; 765 } 766 767 pci_ops = &qwx_pci_ops_qca6390; 768 psc->max_chan = QWX_MHI_CONFIG_QCA6390_MAX_CHANNELS; 769 break; 770 case PCI_PRODUCT_QUALCOMM_QCN9074: 771 pci_ops = &qwx_pci_ops_qcn9074; 772 sc->sc_hw_rev = ATH11K_HW_QCN9074_HW10; 773 psc->max_chan = QWX_MHI_CONFIG_QCA9074_MAX_CHANNELS; 774 break; 775 case PCI_PRODUCT_QUALCOMM_QCNFA765: 776 sc->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD; 777 qwx_pci_read_hw_version(sc, &soc_hw_version_major, 778 &soc_hw_version_minor); 779 switch (soc_hw_version_major) { 780 case 2: 781 switch (soc_hw_version_minor) { 782 case 0x00: 783 case 0x01: 784 sc->sc_hw_rev = ATH11K_HW_WCN6855_HW20; 785 break; 786 case 0x10: 787 case 0x11: 788 sc->sc_hw_rev = ATH11K_HW_WCN6855_HW21; 789 break; 790 default: 791 goto unsupported_wcn6855_soc; 792 } 793 break; 794 default: 795 unsupported_wcn6855_soc: 796 printf(": unsupported WCN6855 SOC version: %d %d\n", 797 soc_hw_version_major, soc_hw_version_minor); 798 return; 799 } 800 801 pci_ops = &qwx_pci_ops_qca6390; 802 psc->max_chan = QWX_MHI_CONFIG_QCA6390_MAX_CHANNELS; 803 break; 804 default: 805 printf(": unsupported chip\n"); 806 return; 807 } 808 809 /* register PCI ops */ 810 psc->sc_pci_ops = pci_ops; 811 812 /* init MSI config */ 813 clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags); 814 815 #if notyet 816 ret = ath11k_pcic_init_msi_config(ab); 817 if (ret) { 818 ath11k_err(ab, "failed to init msi config: %d\n", ret); 819 goto err_pci_free_region; 820 } 821 #endif 822 error = qwx_pci_alloc_msi(sc); 823 if (error) { 824 printf("%s: failed to enable msi: %d\n", sc->sc_dev.dv_xname, 825 error); 826 goto err_pci_free_region; 827 } 828 829 error = qwx_init_hw_params(sc); 830 if (error) 831 goto err_pci_disable_msi; 832 833 psc->chan_ctxt = qwx_dmamem_alloc(sc->sc_dmat, 834 sizeof(struct qwx_mhi_chan_ctxt) * psc->max_chan, 0); 835 if (psc->chan_ctxt == NULL) { 836 printf("%s: could not allocate channel context array\n", 837 sc->sc_dev.dv_xname); 838 goto err_pci_disable_msi; 839 } 840 841 if (psc->sc_pci_ops->alloc_xfer_rings(psc)) { 842 printf("%s: could not allocate transfer rings\n", 843 sc->sc_dev.dv_xname); 844 goto err_pci_free_chan_ctxt; 845 } 846 847 psc->event_ctxt = qwx_dmamem_alloc(sc->sc_dmat, 848 sizeof(struct qwx_mhi_event_ctxt) * QWX_NUM_EVENT_CTX, 0); 849 if (psc->event_ctxt == NULL) { 850 printf("%s: could not allocate event context array\n", 851 sc->sc_dev.dv_xname); 852 goto err_pci_free_xfer_rings; 853 } 854 855 if (qwx_pci_alloc_event_rings(psc)) { 856 printf("%s: could not allocate event rings\n", 857 sc->sc_dev.dv_xname); 858 goto err_pci_free_event_ctxt; 859 } 860 861 psc->cmd_ctxt = qwx_dmamem_alloc(sc->sc_dmat, 862 sizeof(struct qwx_mhi_cmd_ctxt), 0); 863 if (psc->cmd_ctxt == NULL) { 864 printf("%s: could not allocate command context array\n", 865 sc->sc_dev.dv_xname); 866 goto err_pci_free_event_rings; 867 } 868 869 if (qwx_pci_init_cmd_ring(sc, &psc->cmd_ring)) { 870 printf("%s: could not allocate command ring\n", 871 sc->sc_dev.dv_xname); 872 goto err_pci_free_cmd_ctxt; 873 } 874 875 error = qwx_mhi_register(sc); 876 if (error) { 877 printf(": failed to register mhi: %d\n", error); 878 goto err_pci_free_cmd_ring; 879 } 880 881 error = qwx_hal_srng_init(sc); 882 if (error) 883 goto err_mhi_unregister; 884 885 error = qwx_ce_alloc_pipes(sc); 886 if (error) { 887 printf(": failed to allocate ce pipes: %d\n", error); 888 goto err_hal_srng_deinit; 889 } 890 891 sc->sc_nswq = taskq_create("qwxns", 1, IPL_NET, 0); 892 if (sc->sc_nswq == NULL) 893 goto err_hal_srng_deinit; 894 895 qwx_pci_init_qmi_ce_config(sc); 896 897 #if notyet 898 ret = ath11k_pcic_config_irq(ab); 899 if (ret) { 900 ath11k_err(ab, "failed to config irq: %d\n", ret); 901 goto err_ce_free; 902 } 903 904 ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); 905 if (ret) { 906 ath11k_err(ab, "failed to set irq affinity %d\n", ret); 907 goto err_free_irq; 908 } 909 910 /* kernel may allocate a dummy vector before request_irq and 911 * then allocate a real vector when request_irq is called. 912 * So get msi_data here again to avoid spurious interrupt 913 * as msi_data will configured to srngs. 914 */ 915 ret = ath11k_pci_config_msi_data(ab_pci); 916 if (ret) { 917 ath11k_err(ab, "failed to config msi_data: %d\n", ret); 918 goto err_irq_affinity_cleanup; 919 } 920 #endif 921 task_set(&psc->mhi_newstate_task, qwx_mhi_state_change, psc); 922 task_set(&psc->rddm_task, qwx_rddm_task, psc); 923 924 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 925 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 926 ic->ic_state = IEEE80211_S_INIT; 927 928 /* Set device capabilities. */ 929 ic->ic_caps = 930 #if 0 931 IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */ 932 #endif 933 IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */ 934 IEEE80211_C_WEP | /* WEP */ 935 IEEE80211_C_RSN | /* WPA/RSN */ 936 IEEE80211_C_SCANALL | /* device scans all channels at once */ 937 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */ 938 #if 0 939 IEEE80211_C_MONITOR | /* monitor mode supported */ 940 #endif 941 IEEE80211_C_SHSLOT | /* short slot time supported */ 942 IEEE80211_C_SHPREAMBLE; /* short preamble supported */ 943 944 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a; 945 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 946 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 947 948 /* IBSS channel undefined for now. */ 949 ic->ic_ibss_chan = &ic->ic_channels[1]; 950 951 ifp->if_softc = sc; 952 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 953 ifp->if_ioctl = qwx_ioctl; 954 ifp->if_start = qwx_start; 955 ifp->if_watchdog = qwx_watchdog; 956 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 957 if_attach(ifp); 958 ieee80211_ifattach(ifp); 959 ieee80211_media_init(ifp, qwx_media_change, ieee80211_media_status); 960 961 ic->ic_node_alloc = qwx_node_alloc; 962 963 /* Override 802.11 state transition machine. */ 964 sc->sc_newstate = ic->ic_newstate; 965 ic->ic_newstate = qwx_newstate; 966 #if 0 967 ic->ic_updatechan = qwx_updatechan; 968 ic->ic_updateprot = qwx_updateprot; 969 ic->ic_updateslot = qwx_updateslot; 970 ic->ic_updateedca = qwx_updateedca; 971 ic->ic_updatedtim = qwx_updatedtim; 972 #endif 973 /* 974 * We cannot read the MAC address without loading the 975 * firmware from disk. Postpone until mountroot is done. 976 */ 977 config_mountroot(self, qwx_pci_attach_hook); 978 return; 979 980 err_hal_srng_deinit: 981 err_mhi_unregister: 982 err_pci_free_cmd_ring: 983 qwx_pci_free_cmd_ring(psc); 984 err_pci_free_cmd_ctxt: 985 qwx_dmamem_free(sc->sc_dmat, psc->cmd_ctxt); 986 psc->cmd_ctxt = NULL; 987 err_pci_free_event_rings: 988 qwx_pci_free_event_rings(psc); 989 err_pci_free_event_ctxt: 990 qwx_dmamem_free(sc->sc_dmat, psc->event_ctxt); 991 psc->event_ctxt = NULL; 992 err_pci_free_xfer_rings: 993 qwx_pci_free_xfer_rings(psc); 994 err_pci_free_chan_ctxt: 995 qwx_dmamem_free(sc->sc_dmat, psc->chan_ctxt); 996 psc->chan_ctxt = NULL; 997 err_pci_disable_msi: 998 err_pci_free_region: 999 pci_intr_disestablish(psc->sc_pc, psc->sc_ih); 1000 return; 1001 } 1002 1003 int 1004 qwx_pci_detach(struct device *self, int flags) 1005 { 1006 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)self; 1007 struct qwx_softc *sc = &psc->sc_sc; 1008 1009 if (psc->sc_ih) { 1010 pci_intr_disestablish(psc->sc_pc, psc->sc_ih); 1011 psc->sc_ih = NULL; 1012 } 1013 1014 qwx_detach(sc); 1015 1016 qwx_pci_free_event_rings(psc); 1017 qwx_pci_free_xfer_rings(psc); 1018 qwx_pci_free_cmd_ring(psc); 1019 1020 if (psc->event_ctxt) { 1021 qwx_dmamem_free(sc->sc_dmat, psc->event_ctxt); 1022 psc->event_ctxt = NULL; 1023 } 1024 if (psc->chan_ctxt) { 1025 qwx_dmamem_free(sc->sc_dmat, psc->chan_ctxt); 1026 psc->chan_ctxt = NULL; 1027 } 1028 if (psc->cmd_ctxt) { 1029 qwx_dmamem_free(sc->sc_dmat, psc->cmd_ctxt); 1030 psc->cmd_ctxt = NULL; 1031 } 1032 1033 if (psc->amss_data) { 1034 qwx_dmamem_free(sc->sc_dmat, psc->amss_data); 1035 psc->amss_data = NULL; 1036 } 1037 if (psc->amss_vec) { 1038 qwx_dmamem_free(sc->sc_dmat, psc->amss_vec); 1039 psc->amss_vec = NULL; 1040 } 1041 1042 return 0; 1043 } 1044 1045 void 1046 qwx_pci_attach_hook(struct device *self) 1047 { 1048 struct qwx_softc *sc = (void *)self; 1049 int s = splnet(); 1050 1051 qwx_attach(sc); 1052 1053 splx(s); 1054 } 1055 1056 int 1057 qwx_pci_activate(struct device *self, int act) 1058 { 1059 switch (act) { 1060 case DVACT_SUSPEND: 1061 break; 1062 case DVACT_WAKEUP: 1063 break; 1064 } 1065 1066 return 0; 1067 } 1068 1069 void 1070 qwx_pci_free_xfer_rings(struct qwx_pci_softc *psc) 1071 { 1072 struct qwx_softc *sc = &psc->sc_sc; 1073 int i; 1074 1075 for (i = 0; i < nitems(psc->xfer_rings); i++) { 1076 struct qwx_pci_xfer_ring *ring = &psc->xfer_rings[i]; 1077 if (ring->dmamem) { 1078 qwx_dmamem_free(sc->sc_dmat, ring->dmamem); 1079 ring->dmamem = NULL; 1080 } 1081 memset(ring, 0, sizeof(*ring)); 1082 } 1083 } 1084 1085 int 1086 qwx_pci_alloc_xfer_ring(struct qwx_softc *sc, struct qwx_pci_xfer_ring *ring, 1087 uint32_t id, uint32_t direction, uint32_t event_ring_index, 1088 size_t num_elements) 1089 { 1090 bus_size_t size; 1091 int i, err; 1092 1093 memset(ring, 0, sizeof(*ring)); 1094 1095 size = sizeof(struct qwx_mhi_ring_element) * num_elements; 1096 /* Hardware requires that rings are aligned to ring size. */ 1097 ring->dmamem = qwx_dmamem_alloc(sc->sc_dmat, size, size); 1098 if (ring->dmamem == NULL) 1099 return ENOMEM; 1100 1101 ring->size = size; 1102 ring->mhi_chan_id = id; 1103 ring->mhi_chan_state = MHI_CH_STATE_DISABLED; 1104 ring->mhi_chan_direction = direction; 1105 ring->mhi_chan_event_ring_index = event_ring_index; 1106 ring->num_elements = num_elements; 1107 1108 memset(ring->data, 0, sizeof(ring->data)); 1109 for (i = 0; i < ring->num_elements; i++) { 1110 struct qwx_xfer_data *xfer = &ring->data[i]; 1111 1112 err = bus_dmamap_create(sc->sc_dmat, QWX_PCI_XFER_MAX_DATA_SIZE, 1113 1, QWX_PCI_XFER_MAX_DATA_SIZE, 0, BUS_DMA_NOWAIT, 1114 &xfer->map); 1115 if (err) { 1116 printf("%s: could not create xfer DMA map\n", 1117 sc->sc_dev.dv_xname); 1118 goto fail; 1119 } 1120 1121 if (direction == MHI_CHAN_TYPE_INBOUND) { 1122 struct mbuf *m; 1123 1124 m = m_gethdr(M_DONTWAIT, MT_DATA); 1125 if (m == NULL) { 1126 err = ENOBUFS; 1127 goto fail; 1128 } 1129 1130 MCLGETL(m, M_DONTWAIT, QWX_PCI_XFER_MAX_DATA_SIZE); 1131 if ((m->m_flags & M_EXT) == 0) { 1132 m_freem(m); 1133 err = ENOBUFS; 1134 goto fail; 1135 } 1136 1137 m->m_len = m->m_pkthdr.len = QWX_PCI_XFER_MAX_DATA_SIZE; 1138 err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map, 1139 m, BUS_DMA_READ | BUS_DMA_NOWAIT); 1140 if (err) { 1141 printf("%s: can't map mbuf (error %d)\n", 1142 sc->sc_dev.dv_xname, err); 1143 m_freem(m); 1144 goto fail; 1145 } 1146 1147 bus_dmamap_sync(sc->sc_dmat, xfer->map, 0, 1148 QWX_PCI_XFER_MAX_DATA_SIZE, BUS_DMASYNC_PREREAD); 1149 xfer->m = m; 1150 } 1151 } 1152 1153 return 0; 1154 fail: 1155 for (i = 0; i < ring->num_elements; i++) { 1156 struct qwx_xfer_data *xfer = &ring->data[i]; 1157 1158 if (xfer->map) { 1159 bus_dmamap_sync(sc->sc_dmat, xfer->map, 0, 1160 xfer->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1161 bus_dmamap_unload(sc->sc_dmat, xfer->map); 1162 bus_dmamap_destroy(sc->sc_dmat, xfer->map); 1163 xfer->map = NULL; 1164 } 1165 1166 if (xfer->m) { 1167 m_freem(xfer->m); 1168 xfer->m = NULL; 1169 } 1170 } 1171 return 1; 1172 } 1173 1174 int 1175 qwx_pci_alloc_xfer_rings_qca6390(struct qwx_pci_softc *psc) 1176 { 1177 struct qwx_softc *sc = &psc->sc_sc; 1178 int ret; 1179 1180 ret = qwx_pci_alloc_xfer_ring(sc, 1181 &psc->xfer_rings[QWX_PCI_XFER_RING_LOOPBACK_OUTBOUND], 1182 0, MHI_CHAN_TYPE_OUTBOUND, 0, 32); 1183 if (ret) 1184 goto fail; 1185 1186 ret = qwx_pci_alloc_xfer_ring(sc, 1187 &psc->xfer_rings[QWX_PCI_XFER_RING_LOOPBACK_INBOUND], 1188 1, MHI_CHAN_TYPE_INBOUND, 0, 32); 1189 if (ret) 1190 goto fail; 1191 1192 ret = qwx_pci_alloc_xfer_ring(sc, 1193 &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND], 1194 20, MHI_CHAN_TYPE_OUTBOUND, 1, 64); 1195 if (ret) 1196 goto fail; 1197 1198 ret = qwx_pci_alloc_xfer_ring(sc, 1199 &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND], 1200 21, MHI_CHAN_TYPE_INBOUND, 1, 64); 1201 if (ret) 1202 goto fail; 1203 1204 return 0; 1205 fail: 1206 qwx_pci_free_xfer_rings(psc); 1207 return ret; 1208 } 1209 1210 int 1211 qwx_pci_alloc_xfer_rings_qcn9074(struct qwx_pci_softc *psc) 1212 { 1213 struct qwx_softc *sc = &psc->sc_sc; 1214 int ret; 1215 1216 ret = qwx_pci_alloc_xfer_ring(sc, 1217 &psc->xfer_rings[QWX_PCI_XFER_RING_LOOPBACK_OUTBOUND], 1218 0, MHI_CHAN_TYPE_OUTBOUND, 1, 32); 1219 if (ret) 1220 goto fail; 1221 1222 ret = qwx_pci_alloc_xfer_ring(sc, 1223 &psc->xfer_rings[QWX_PCI_XFER_RING_LOOPBACK_INBOUND], 1224 1, MHI_CHAN_TYPE_INBOUND, 1, 32); 1225 if (ret) 1226 goto fail; 1227 1228 ret = qwx_pci_alloc_xfer_ring(sc, 1229 &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND], 1230 20, MHI_CHAN_TYPE_OUTBOUND, 1, 32); 1231 if (ret) 1232 goto fail; 1233 1234 ret = qwx_pci_alloc_xfer_ring(sc, 1235 &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND], 1236 21, MHI_CHAN_TYPE_INBOUND, 1, 32); 1237 if (ret) 1238 goto fail; 1239 1240 return 0; 1241 fail: 1242 qwx_pci_free_xfer_rings(psc); 1243 return ret; 1244 } 1245 1246 void 1247 qwx_pci_free_event_rings(struct qwx_pci_softc *psc) 1248 { 1249 struct qwx_softc *sc = &psc->sc_sc; 1250 int i; 1251 1252 for (i = 0; i < nitems(psc->event_rings); i++) { 1253 struct qwx_pci_event_ring *ring = &psc->event_rings[i]; 1254 if (ring->dmamem) { 1255 qwx_dmamem_free(sc->sc_dmat, ring->dmamem); 1256 ring->dmamem = NULL; 1257 } 1258 memset(ring, 0, sizeof(*ring)); 1259 } 1260 } 1261 1262 int 1263 qwx_pci_alloc_event_ring(struct qwx_softc *sc, struct qwx_pci_event_ring *ring, 1264 uint32_t type, uint32_t irq, uint32_t intmod, size_t num_elements) 1265 { 1266 bus_size_t size; 1267 1268 memset(ring, 0, sizeof(*ring)); 1269 1270 size = sizeof(struct qwx_mhi_ring_element) * num_elements; 1271 /* Hardware requires that rings are aligned to ring size. */ 1272 ring->dmamem = qwx_dmamem_alloc(sc->sc_dmat, size, size); 1273 if (ring->dmamem == NULL) 1274 return ENOMEM; 1275 1276 ring->size = size; 1277 ring->mhi_er_type = type; 1278 ring->mhi_er_irq = irq; 1279 ring->mhi_er_irq_moderation_ms = intmod; 1280 ring->num_elements = num_elements; 1281 return 0; 1282 } 1283 1284 int 1285 qwx_pci_alloc_event_rings(struct qwx_pci_softc *psc) 1286 { 1287 struct qwx_softc *sc = &psc->sc_sc; 1288 int ret; 1289 1290 ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[0], 1291 MHI_ER_CTRL, 0, 0, 32); 1292 if (ret) 1293 goto fail; 1294 1295 ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[1], 1296 MHI_ER_DATA, 0, 1, 256); 1297 if (ret) 1298 goto fail; 1299 1300 return 0; 1301 fail: 1302 qwx_pci_free_event_rings(psc); 1303 return ret; 1304 } 1305 1306 void 1307 qwx_pci_free_cmd_ring(struct qwx_pci_softc *psc) 1308 { 1309 struct qwx_softc *sc = &psc->sc_sc; 1310 struct qwx_pci_cmd_ring *ring = &psc->cmd_ring; 1311 1312 if (ring->dmamem) 1313 qwx_dmamem_free(sc->sc_dmat, ring->dmamem); 1314 1315 memset(ring, 0, sizeof(*ring)); 1316 } 1317 1318 int 1319 qwx_pci_init_cmd_ring(struct qwx_softc *sc, struct qwx_pci_cmd_ring *ring) 1320 { 1321 memset(ring, 0, sizeof(*ring)); 1322 1323 ring->num_elements = QWX_PCI_CMD_RING_MAX_ELEMENTS; 1324 ring->size = sizeof(struct qwx_mhi_ring_element) * ring->num_elements; 1325 1326 /* Hardware requires that rings are aligned to ring size. */ 1327 ring->dmamem = qwx_dmamem_alloc(sc->sc_dmat, ring->size, ring->size); 1328 if (ring->dmamem == NULL) 1329 return ENOMEM; 1330 1331 return 0; 1332 } 1333 1334 uint32_t 1335 qwx_pci_read(struct qwx_softc *sc, uint32_t addr) 1336 { 1337 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1338 1339 return (bus_space_read_4(psc->sc_st, psc->sc_sh, addr)); 1340 } 1341 1342 void 1343 qwx_pci_write(struct qwx_softc *sc, uint32_t addr, uint32_t val) 1344 { 1345 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1346 1347 bus_space_write_4(psc->sc_st, psc->sc_sh, addr, val); 1348 } 1349 1350 void 1351 qwx_pci_read_hw_version(struct qwx_softc *sc, uint32_t *major, 1352 uint32_t *minor) 1353 { 1354 uint32_t soc_hw_version; 1355 1356 soc_hw_version = qwx_pcic_read32(sc, TCSR_SOC_HW_VERSION); 1357 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, soc_hw_version); 1358 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, soc_hw_version); 1359 DPRINTF("%s: pci tcsr_soc_hw_version major %d minor %d\n", 1360 sc->sc_dev.dv_xname, *major, *minor); 1361 } 1362 1363 uint32_t 1364 qwx_pcic_read32(struct qwx_softc *sc, uint32_t offset) 1365 { 1366 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1367 int ret = 0; 1368 uint32_t val; 1369 bool wakeup_required; 1370 1371 /* for offset beyond BAR + 4K - 32, may 1372 * need to wakeup the device to access. 1373 */ 1374 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags) 1375 && offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF; 1376 if (wakeup_required && psc->sc_pci_ops->wakeup) 1377 ret = psc->sc_pci_ops->wakeup(sc); 1378 1379 if (offset < ATH11K_PCI_WINDOW_START) 1380 val = qwx_pci_read(sc, offset); 1381 else 1382 val = psc->sc_pci_ops->window_read32(sc, offset); 1383 1384 if (wakeup_required && !ret && psc->sc_pci_ops->release) 1385 psc->sc_pci_ops->release(sc); 1386 1387 return val; 1388 } 1389 1390 void 1391 qwx_pcic_write32(struct qwx_softc *sc, uint32_t offset, uint32_t value) 1392 { 1393 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1394 int ret = 0; 1395 bool wakeup_required; 1396 1397 /* for offset beyond BAR + 4K - 32, may 1398 * need to wakeup the device to access. 1399 */ 1400 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags) 1401 && offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF; 1402 if (wakeup_required && psc->sc_pci_ops->wakeup) 1403 ret = psc->sc_pci_ops->wakeup(sc); 1404 1405 if (offset < ATH11K_PCI_WINDOW_START) 1406 qwx_pci_write(sc, offset, value); 1407 else 1408 psc->sc_pci_ops->window_write32(sc, offset, value); 1409 1410 if (wakeup_required && !ret && psc->sc_pci_ops->release) 1411 psc->sc_pci_ops->release(sc); 1412 } 1413 1414 void 1415 qwx_pcic_ext_irq_disable(struct qwx_softc *sc) 1416 { 1417 clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags); 1418 1419 /* In case of one MSI vector, we handle irq enable/disable in a 1420 * uniform way since we only have one irq 1421 */ 1422 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) 1423 return; 1424 1425 printf("%s not implemented\n", __func__); 1426 } 1427 1428 void 1429 qwx_pcic_ext_irq_enable(struct qwx_softc *sc) 1430 { 1431 set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags); 1432 1433 /* In case of one MSI vector, we handle irq enable/disable in a 1434 * uniform way since we only have one irq 1435 */ 1436 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) 1437 return; 1438 1439 printf("%s not implemented\n", __func__); 1440 } 1441 1442 void 1443 qwx_pcic_ce_irq_enable(struct qwx_softc *sc, uint16_t ce_id) 1444 { 1445 /* In case of one MSI vector, we handle irq enable/disable in a 1446 * uniform way since we only have one irq 1447 */ 1448 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) 1449 return; 1450 1451 printf("%s not implemented\n", __func__); 1452 } 1453 1454 void 1455 qwx_pcic_ce_irq_disable(struct qwx_softc *sc, uint16_t ce_id) 1456 { 1457 /* In case of one MSI vector, we handle irq enable/disable in a 1458 * uniform way since we only have one irq 1459 */ 1460 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) 1461 return; 1462 1463 printf("%s not implemented\n", __func__); 1464 } 1465 1466 void 1467 qwx_pcic_ce_irqs_enable(struct qwx_softc *sc) 1468 { 1469 int i; 1470 1471 set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags); 1472 1473 for (i = 0; i < sc->hw_params.ce_count; i++) { 1474 if (qwx_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR) 1475 continue; 1476 qwx_pcic_ce_irq_enable(sc, i); 1477 } 1478 } 1479 1480 void 1481 qwx_pcic_ce_irqs_disable(struct qwx_softc *sc) 1482 { 1483 int i; 1484 1485 clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags); 1486 1487 for (i = 0; i < sc->hw_params.ce_count; i++) { 1488 if (qwx_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR) 1489 continue; 1490 qwx_pcic_ce_irq_disable(sc, i); 1491 } 1492 } 1493 1494 int 1495 qwx_pci_start(struct qwx_softc *sc) 1496 { 1497 /* TODO: for now don't restore ASPM in case of single MSI 1498 * vector as MHI register reading in M2 causes system hang. 1499 */ 1500 if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) 1501 qwx_pci_aspm_restore(sc); 1502 else 1503 DPRINTF("%s: leaving PCI ASPM disabled to avoid MHI M2 problems" 1504 "\n", sc->sc_dev.dv_xname); 1505 1506 set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags); 1507 1508 qwx_ce_rx_post_buf(sc); 1509 qwx_pcic_ce_irqs_enable(sc); 1510 1511 return 0; 1512 } 1513 1514 void 1515 qwx_pcic_ce_irq_disable_sync(struct qwx_softc *sc) 1516 { 1517 qwx_pcic_ce_irqs_disable(sc); 1518 #if 0 1519 ath11k_pcic_sync_ce_irqs(ab); 1520 ath11k_pcic_kill_tasklets(ab); 1521 #endif 1522 } 1523 1524 void 1525 qwx_pci_stop(struct qwx_softc *sc) 1526 { 1527 qwx_pcic_ce_irq_disable_sync(sc); 1528 qwx_ce_cleanup_pipes(sc); 1529 } 1530 1531 int 1532 qwx_pci_bus_wake_up(struct qwx_softc *sc) 1533 { 1534 if (qwx_mhi_wake_db_clear_valid(sc)) 1535 qwx_mhi_device_wake(sc); 1536 1537 return 0; 1538 } 1539 1540 void 1541 qwx_pci_bus_release(struct qwx_softc *sc) 1542 { 1543 if (qwx_mhi_wake_db_clear_valid(sc)) 1544 qwx_mhi_device_zzz(sc); 1545 } 1546 1547 uint32_t 1548 qwx_pci_get_window_start(struct qwx_softc *sc, uint32_t offset) 1549 { 1550 if (!sc->hw_params.static_window_map) 1551 return ATH11K_PCI_WINDOW_START; 1552 1553 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) 1554 /* if offset lies within DP register range, use 3rd window */ 1555 return 3 * ATH11K_PCI_WINDOW_START; 1556 else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc)) < 1557 ATH11K_PCI_WINDOW_RANGE_MASK) 1558 /* if offset lies within CE register range, use 2nd window */ 1559 return 2 * ATH11K_PCI_WINDOW_START; 1560 else 1561 return ATH11K_PCI_WINDOW_START; 1562 } 1563 1564 void 1565 qwx_pci_select_window(struct qwx_softc *sc, uint32_t offset) 1566 { 1567 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1568 uint32_t window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset); 1569 1570 #if notyet 1571 lockdep_assert_held(&ab_pci->window_lock); 1572 #endif 1573 1574 if (window != psc->register_window) { 1575 qwx_pci_write(sc, ATH11K_PCI_WINDOW_REG_ADDRESS, 1576 ATH11K_PCI_WINDOW_ENABLE_BIT | window); 1577 (void) qwx_pci_read(sc, ATH11K_PCI_WINDOW_REG_ADDRESS); 1578 psc->register_window = window; 1579 } 1580 } 1581 1582 void 1583 qwx_pci_window_write32(struct qwx_softc *sc, uint32_t offset, uint32_t value) 1584 { 1585 uint32_t window_start; 1586 1587 window_start = qwx_pci_get_window_start(sc, offset); 1588 1589 if (window_start == ATH11K_PCI_WINDOW_START) { 1590 #if notyet 1591 spin_lock_bh(&ab_pci->window_lock); 1592 #endif 1593 qwx_pci_select_window(sc, offset); 1594 qwx_pci_write(sc, window_start + 1595 (offset & ATH11K_PCI_WINDOW_RANGE_MASK), value); 1596 #if notyet 1597 spin_unlock_bh(&ab_pci->window_lock); 1598 #endif 1599 } else { 1600 qwx_pci_write(sc, window_start + 1601 (offset & ATH11K_PCI_WINDOW_RANGE_MASK), value); 1602 } 1603 } 1604 1605 uint32_t 1606 qwx_pci_window_read32(struct qwx_softc *sc, uint32_t offset) 1607 { 1608 uint32_t window_start, val; 1609 1610 window_start = qwx_pci_get_window_start(sc, offset); 1611 1612 if (window_start == ATH11K_PCI_WINDOW_START) { 1613 #if notyet 1614 spin_lock_bh(&ab_pci->window_lock); 1615 #endif 1616 qwx_pci_select_window(sc, offset); 1617 val = qwx_pci_read(sc, window_start + 1618 (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); 1619 #if notyet 1620 spin_unlock_bh(&ab_pci->window_lock); 1621 #endif 1622 } else { 1623 val = qwx_pci_read(sc, window_start + 1624 (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); 1625 } 1626 1627 return val; 1628 } 1629 1630 void 1631 qwx_pci_select_static_window(struct qwx_softc *sc) 1632 { 1633 uint32_t umac_window; 1634 uint32_t ce_window; 1635 uint32_t window; 1636 1637 umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET); 1638 ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE); 1639 window = (umac_window << 12) | (ce_window << 6); 1640 1641 qwx_pci_write(sc, ATH11K_PCI_WINDOW_REG_ADDRESS, 1642 ATH11K_PCI_WINDOW_ENABLE_BIT | window); 1643 } 1644 1645 void 1646 qwx_pci_soc_global_reset(struct qwx_softc *sc) 1647 { 1648 uint32_t val, msecs; 1649 1650 val = qwx_pcic_read32(sc, PCIE_SOC_GLOBAL_RESET); 1651 1652 val |= PCIE_SOC_GLOBAL_RESET_V; 1653 1654 qwx_pcic_write32(sc, PCIE_SOC_GLOBAL_RESET, val); 1655 1656 /* TODO: exact time to sleep is uncertain */ 1657 msecs = 10; 1658 DELAY(msecs * 1000); 1659 1660 /* Need to toggle V bit back otherwise stuck in reset status */ 1661 val &= ~PCIE_SOC_GLOBAL_RESET_V; 1662 1663 qwx_pcic_write32(sc, PCIE_SOC_GLOBAL_RESET, val); 1664 1665 DELAY(msecs * 1000); 1666 1667 val = qwx_pcic_read32(sc, PCIE_SOC_GLOBAL_RESET); 1668 if (val == 0xffffffff) 1669 printf("%s: link down error during global reset\n", 1670 sc->sc_dev.dv_xname); 1671 } 1672 1673 void 1674 qwx_pci_clear_dbg_registers(struct qwx_softc *sc) 1675 { 1676 uint32_t val; 1677 1678 /* read cookie */ 1679 val = qwx_pcic_read32(sc, PCIE_Q6_COOKIE_ADDR); 1680 DPRINTF("%s: cookie:0x%x\n", sc->sc_dev.dv_xname, val); 1681 1682 val = qwx_pcic_read32(sc, WLAON_WARM_SW_ENTRY); 1683 DPRINTF("%s: WLAON_WARM_SW_ENTRY 0x%x\n", sc->sc_dev.dv_xname, val); 1684 1685 /* TODO: exact time to sleep is uncertain */ 1686 DELAY(10 * 1000); 1687 1688 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 1689 * continuing warm path and entering dead loop. 1690 */ 1691 qwx_pcic_write32(sc, WLAON_WARM_SW_ENTRY, 0); 1692 DELAY(10 * 1000); 1693 1694 val = qwx_pcic_read32(sc, WLAON_WARM_SW_ENTRY); 1695 DPRINTF("%s: WLAON_WARM_SW_ENTRY 0x%x\n", sc->sc_dev.dv_xname, val); 1696 1697 /* A read clear register. clear the register to prevent 1698 * Q6 from entering wrong code path. 1699 */ 1700 val = qwx_pcic_read32(sc, WLAON_SOC_RESET_CAUSE_REG); 1701 DPRINTF("%s: soc reset cause:%d\n", sc->sc_dev.dv_xname, val); 1702 } 1703 1704 int 1705 qwx_pci_set_link_reg(struct qwx_softc *sc, uint32_t offset, uint32_t value, 1706 uint32_t mask) 1707 { 1708 uint32_t v; 1709 int i; 1710 1711 v = qwx_pcic_read32(sc, offset); 1712 if ((v & mask) == value) 1713 return 0; 1714 1715 for (i = 0; i < 10; i++) { 1716 qwx_pcic_write32(sc, offset, (v & ~mask) | value); 1717 1718 v = qwx_pcic_read32(sc, offset); 1719 if ((v & mask) == value) 1720 return 0; 1721 1722 delay((2 * 1000)); 1723 } 1724 1725 DPRINTF("failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n", 1726 offset, v & mask, value); 1727 1728 return ETIMEDOUT; 1729 } 1730 1731 int 1732 qwx_pci_fix_l1ss(struct qwx_softc *sc) 1733 { 1734 int ret; 1735 1736 ret = qwx_pci_set_link_reg(sc, 1737 PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(sc), 1738 PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL, 1739 PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK); 1740 if (ret) { 1741 DPRINTF("failed to set sysclk: %d\n", ret); 1742 return ret; 1743 } 1744 1745 ret = qwx_pci_set_link_reg(sc, 1746 PCIE_PCS_OSC_DTCT_CONFIG1_REG(sc), 1747 PCIE_PCS_OSC_DTCT_CONFIG1_VAL, 1748 PCIE_PCS_OSC_DTCT_CONFIG_MSK); 1749 if (ret) { 1750 DPRINTF("failed to set dtct config1 error: %d\n", ret); 1751 return ret; 1752 } 1753 1754 ret = qwx_pci_set_link_reg(sc, 1755 PCIE_PCS_OSC_DTCT_CONFIG2_REG(sc), 1756 PCIE_PCS_OSC_DTCT_CONFIG2_VAL, 1757 PCIE_PCS_OSC_DTCT_CONFIG_MSK); 1758 if (ret) { 1759 DPRINTF("failed to set dtct config2: %d\n", ret); 1760 return ret; 1761 } 1762 1763 ret = qwx_pci_set_link_reg(sc, 1764 PCIE_PCS_OSC_DTCT_CONFIG4_REG(sc), 1765 PCIE_PCS_OSC_DTCT_CONFIG4_VAL, 1766 PCIE_PCS_OSC_DTCT_CONFIG_MSK); 1767 if (ret) { 1768 DPRINTF("failed to set dtct config4: %d\n", ret); 1769 return ret; 1770 } 1771 1772 return 0; 1773 } 1774 1775 void 1776 qwx_pci_enable_ltssm(struct qwx_softc *sc) 1777 { 1778 uint32_t val; 1779 int i; 1780 1781 val = qwx_pcic_read32(sc, PCIE_PCIE_PARF_LTSSM); 1782 1783 /* PCIE link seems very unstable after the Hot Reset*/ 1784 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { 1785 if (val == 0xffffffff) 1786 DELAY(5 * 1000); 1787 1788 qwx_pcic_write32(sc, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); 1789 val = qwx_pcic_read32(sc, PCIE_PCIE_PARF_LTSSM); 1790 } 1791 1792 DPRINTF("%s: pci ltssm 0x%x\n", sc->sc_dev.dv_xname, val); 1793 1794 val = qwx_pcic_read32(sc, GCC_GCC_PCIE_HOT_RST); 1795 val |= GCC_GCC_PCIE_HOT_RST_VAL; 1796 qwx_pcic_write32(sc, GCC_GCC_PCIE_HOT_RST, val); 1797 val = qwx_pcic_read32(sc, GCC_GCC_PCIE_HOT_RST); 1798 1799 DPRINTF("%s: pci pcie_hot_rst 0x%x\n", sc->sc_dev.dv_xname, val); 1800 1801 DELAY(5 * 1000); 1802 } 1803 1804 void 1805 qwx_pci_clear_all_intrs(struct qwx_softc *sc) 1806 { 1807 /* This is a WAR for PCIE Hotreset. 1808 * When target receive Hotreset, but will set the interrupt. 1809 * So when download SBL again, SBL will open Interrupt and 1810 * receive it, and crash immediately. 1811 */ 1812 qwx_pcic_write32(sc, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); 1813 } 1814 1815 void 1816 qwx_pci_set_wlaon_pwr_ctrl(struct qwx_softc *sc) 1817 { 1818 uint32_t val; 1819 1820 val = qwx_pcic_read32(sc, WLAON_QFPROM_PWR_CTRL_REG); 1821 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; 1822 qwx_pcic_write32(sc, WLAON_QFPROM_PWR_CTRL_REG, val); 1823 } 1824 1825 void 1826 qwx_pci_force_wake(struct qwx_softc *sc) 1827 { 1828 qwx_pcic_write32(sc, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 1829 DELAY(5 * 1000); 1830 } 1831 1832 void 1833 qwx_pci_sw_reset(struct qwx_softc *sc, bool power_on) 1834 { 1835 DELAY(100 * 1000); /* msecs */ 1836 1837 if (power_on) { 1838 qwx_pci_enable_ltssm(sc); 1839 qwx_pci_clear_all_intrs(sc); 1840 qwx_pci_set_wlaon_pwr_ctrl(sc); 1841 if (sc->hw_params.fix_l1ss) 1842 qwx_pci_fix_l1ss(sc); 1843 } 1844 1845 qwx_mhi_clear_vector(sc); 1846 qwx_pci_clear_dbg_registers(sc); 1847 qwx_pci_soc_global_reset(sc); 1848 qwx_mhi_reset_device(sc, 0); 1849 } 1850 1851 void 1852 qwx_pci_msi_config(struct qwx_softc *sc, bool enable) 1853 { 1854 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1855 uint32_t val; 1856 1857 val = pci_conf_read(psc->sc_pc, psc->sc_tag, 1858 psc->sc_msi_off + PCI_MSI_MC); 1859 1860 if (enable) 1861 val |= PCI_MSI_MC_MSIE; 1862 else 1863 val &= ~PCI_MSI_MC_MSIE; 1864 1865 pci_conf_write(psc->sc_pc, psc->sc_tag, psc->sc_msi_off + PCI_MSI_MC, 1866 val); 1867 } 1868 1869 void 1870 qwx_pci_msi_enable(struct qwx_softc *sc) 1871 { 1872 qwx_pci_msi_config(sc, true); 1873 } 1874 1875 void 1876 qwx_pci_msi_disable(struct qwx_softc *sc) 1877 { 1878 qwx_pci_msi_config(sc, false); 1879 } 1880 1881 void 1882 qwx_pci_aspm_disable(struct qwx_softc *sc) 1883 { 1884 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1885 1886 psc->sc_lcsr = pci_conf_read(psc->sc_pc, psc->sc_tag, 1887 psc->sc_cap_off + PCI_PCIE_LCSR); 1888 1889 DPRINTF("%s: pci link_ctl 0x%04x L0s %d L1 %d\n", sc->sc_dev.dv_xname, 1890 (uint16_t)psc->sc_lcsr, (psc->sc_lcsr & PCI_PCIE_LCSR_ASPM_L0S), 1891 (psc->sc_lcsr & PCI_PCIE_LCSR_ASPM_L1)); 1892 1893 /* disable L0s and L1 */ 1894 pci_conf_write(psc->sc_pc, psc->sc_tag, psc->sc_cap_off + PCI_PCIE_LCSR, 1895 psc->sc_lcsr & ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)); 1896 1897 psc->sc_flags |= ATH11K_PCI_ASPM_RESTORE; 1898 } 1899 1900 void 1901 qwx_pci_aspm_restore(struct qwx_softc *sc) 1902 { 1903 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1904 1905 if (psc->sc_flags & ATH11K_PCI_ASPM_RESTORE) { 1906 pci_conf_write(psc->sc_pc, psc->sc_tag, 1907 psc->sc_cap_off + PCI_PCIE_LCSR, psc->sc_lcsr); 1908 psc->sc_flags &= ~ATH11K_PCI_ASPM_RESTORE; 1909 } 1910 } 1911 1912 int 1913 qwx_pci_power_up(struct qwx_softc *sc) 1914 { 1915 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 1916 int error; 1917 1918 psc->register_window = 0; 1919 clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags); 1920 1921 qwx_pci_sw_reset(sc, true); 1922 1923 /* Disable ASPM during firmware download due to problems switching 1924 * to AMSS state. 1925 */ 1926 qwx_pci_aspm_disable(sc); 1927 1928 qwx_pci_msi_enable(sc); 1929 1930 error = qwx_mhi_start(psc); 1931 if (error) 1932 return error; 1933 1934 if (sc->hw_params.static_window_map) 1935 qwx_pci_select_static_window(sc); 1936 1937 return 0; 1938 } 1939 1940 void 1941 qwx_pci_power_down(struct qwx_softc *sc) 1942 { 1943 /* restore aspm in case firmware bootup fails */ 1944 qwx_pci_aspm_restore(sc); 1945 1946 qwx_pci_force_wake(sc); 1947 1948 qwx_pci_msi_disable(sc); 1949 1950 qwx_mhi_stop(sc); 1951 clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags); 1952 qwx_pci_sw_reset(sc, false); 1953 } 1954 1955 /* 1956 * MHI 1957 */ 1958 int 1959 qwx_mhi_register(struct qwx_softc *sc) 1960 { 1961 DNPRINTF(QWX_D_MHI, "%s: STUB %s()\n", sc->sc_dev.dv_xname, __func__); 1962 return 0; 1963 } 1964 1965 void 1966 qwx_mhi_unregister(struct qwx_softc *sc) 1967 { 1968 DNPRINTF(QWX_D_MHI, "%s: STUB %s()\n", sc->sc_dev.dv_xname, __func__); 1969 } 1970 1971 // XXX MHI is GPLd - we provide a compatible bare-bones implementation 1972 #define MHI_CFG 0x10 1973 #define MHI_CFG_NHWER_MASK GENMASK(31, 24) 1974 #define MHI_CFG_NHWER_SHFT 24 1975 #define MHI_CFG_NER_MASK GENMASK(23, 16) 1976 #define MHI_CFG_NER_SHFT 16 1977 #define MHI_CFG_NHWCH_MASK GENMASK(15, 8) 1978 #define MHI_CFG_NHWCH_SHFT 8 1979 #define MHI_CFG_NCH_MASK GENMASK(7, 0) 1980 #define MHI_CHDBOFF 0x18 1981 #define MHI_DEV_WAKE_DB 127 1982 #define MHI_ERDBOFF 0x20 1983 #define MHI_BHI_OFFSET 0x28 1984 #define MHI_BHI_IMGADDR_LOW 0x08 1985 #define MHI_BHI_IMGADDR_HIGH 0x0c 1986 #define MHI_BHI_IMGSIZE 0x10 1987 #define MHI_BHI_IMGTXDB 0x18 1988 #define MHI_BHI_INTVEC 0x20 1989 #define MHI_BHI_EXECENV 0x28 1990 #define MHI_BHI_STATUS 0x2c 1991 #define MHI_BHI_SERIALNU 0x40 1992 #define MHI_BHIE_OFFSET 0x2c 1993 #define MHI_BHIE_TXVECADDR_LOW_OFFS 0x2c 1994 #define MHI_BHIE_TXVECADDR_HIGH_OFFS 0x30 1995 #define MHI_BHIE_TXVECSIZE_OFFS 0x34 1996 #define MHI_BHIE_TXVECDB_OFFS 0x3c 1997 #define MHI_BHIE_TXVECSTATUS_OFFS 0x44 1998 #define MHI_BHIE_RXVECADDR_LOW_OFFS 0x60 1999 #define MHI_BHIE_RXVECSTATUS_OFFS 0x78 2000 #define MHI_CTRL 0x38 2001 #define MHI_CTRL_READY_MASK 0x1 2002 #define MHI_CTRL_RESET_MASK 0x2 2003 #define MHI_CTRL_MHISTATE_MASK GENMASK(15, 8) 2004 #define MHI_CTRL_MHISTATE_SHFT 8 2005 #define MHI_STATUS 0x48 2006 #define MHI_STATUS_MHISTATE_MASK GENMASK(15, 8) 2007 #define MHI_STATUS_MHISTATE_SHFT 8 2008 #define MHI_STATE_RESET 0x0 2009 #define MHI_STATE_READY 0x1 2010 #define MHI_STATE_M0 0x2 2011 #define MHI_STATE_M1 0x3 2012 #define MHI_STATE_M2 0x4 2013 #define MHI_STATE_M3 0x5 2014 #define MHI_STATE_M3_FAST 0x6 2015 #define MHI_STATE_BHI 0x7 2016 #define MHI_STATE_SYS_ERR 0xff 2017 #define MHI_STATUS_READY_MASK 0x1 2018 #define MHI_STATUS_SYSERR_MASK 0x4 2019 #define MHI_CCABAP_LOWER 0x58 2020 #define MHI_CCABAP_HIGHER 0x5c 2021 #define MHI_ECABAP_LOWER 0x60 2022 #define MHI_ECABAP_HIGHER 0x64 2023 #define MHI_CRCBAP_LOWER 0x68 2024 #define MHI_CRCBAP_HIGHER 0x6c 2025 #define MHI_CRDB_LOWER 0x70 2026 #define MHI_CRDB_HIGHER 0x74 2027 #define MHI_CTRLBASE_LOWER 0x80 2028 #define MHI_CTRLBASE_HIGHER 0x84 2029 #define MHI_CTRLLIMIT_LOWER 0x88 2030 #define MHI_CTRLLIMIT_HIGHER 0x8c 2031 #define MHI_DATABASE_LOWER 0x98 2032 #define MHI_DATABASE_HIGHER 0x9c 2033 #define MHI_DATALIMIT_LOWER 0xa0 2034 #define MHI_DATALIMIT_HIGHER 0xa4 2035 2036 #define MHI_EE_PBL 0x0 /* Primary Bootloader */ 2037 #define MHI_EE_SBL 0x1 /* Secondary Bootloader */ 2038 #define MHI_EE_AMSS 0x2 /* Modem, aka the primary runtime EE */ 2039 #define MHI_EE_RDDM 0x3 /* Ram dump download mode */ 2040 #define MHI_EE_WFW 0x4 /* WLAN firmware mode */ 2041 #define MHI_EE_PTHRU 0x5 /* Passthrough */ 2042 #define MHI_EE_EDL 0x6 /* Embedded downloader */ 2043 #define MHI_EE_FP 0x7 /* Flash Programmer Environment */ 2044 2045 #define MHI_IN_PBL(e) (e == MHI_EE_PBL || e == MHI_EE_PTHRU || e == MHI_EE_EDL) 2046 #define MHI_POWER_UP_CAPABLE(e) (MHI_IN_PBL(e) || e == MHI_EE_AMSS) 2047 #define MHI_IN_MISSION_MODE(e) \ 2048 (e == MHI_EE_AMSS || e == MHI_EE_WFW || e == MHI_EE_FP) 2049 2050 /* BHI register bits */ 2051 #define MHI_BHI_TXDB_SEQNUM_BMSK GENMASK(29, 0) 2052 #define MHI_BHI_TXDB_SEQNUM_SHFT 0 2053 #define MHI_BHI_STATUS_MASK GENMASK(31, 30) 2054 #define MHI_BHI_STATUS_SHFT 30 2055 #define MHI_BHI_STATUS_ERROR 0x03 2056 #define MHI_BHI_STATUS_SUCCESS 0x02 2057 #define MHI_BHI_STATUS_RESET 0x00 2058 2059 /* MHI BHIE registers */ 2060 #define MHI_BHIE_MSMSOCID_OFFS 0x00 2061 #define MHI_BHIE_RXVECADDR_LOW_OFFS 0x60 2062 #define MHI_BHIE_RXVECADDR_HIGH_OFFS 0x64 2063 #define MHI_BHIE_RXVECSIZE_OFFS 0x68 2064 #define MHI_BHIE_RXVECDB_OFFS 0x70 2065 #define MHI_BHIE_RXVECSTATUS_OFFS 0x78 2066 2067 /* BHIE register bits */ 2068 #define MHI_BHIE_TXVECDB_SEQNUM_BMSK GENMASK(29, 0) 2069 #define MHI_BHIE_TXVECDB_SEQNUM_SHFT 0 2070 #define MHI_BHIE_TXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0) 2071 #define MHI_BHIE_TXVECSTATUS_SEQNUM_SHFT 0 2072 #define MHI_BHIE_TXVECSTATUS_STATUS_BMSK GENMASK(31, 30) 2073 #define MHI_BHIE_TXVECSTATUS_STATUS_SHFT 30 2074 #define MHI_BHIE_TXVECSTATUS_STATUS_RESET 0x00 2075 #define MHI_BHIE_TXVECSTATUS_STATUS_XFER_COMPL 0x02 2076 #define MHI_BHIE_TXVECSTATUS_STATUS_ERROR 0x03 2077 #define MHI_BHIE_RXVECDB_SEQNUM_BMSK GENMASK(29, 0) 2078 #define MHI_BHIE_RXVECDB_SEQNUM_SHFT 0 2079 #define MHI_BHIE_RXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0) 2080 #define MHI_BHIE_RXVECSTATUS_SEQNUM_SHFT 0 2081 #define MHI_BHIE_RXVECSTATUS_STATUS_BMSK GENMASK(31, 30) 2082 #define MHI_BHIE_RXVECSTATUS_STATUS_SHFT 30 2083 #define MHI_BHIE_RXVECSTATUS_STATUS_RESET 0x00 2084 #define MHI_BHIE_RXVECSTATUS_STATUS_XFER_COMPL 0x02 2085 #define MHI_BHIE_RXVECSTATUS_STATUS_ERROR 0x03 2086 2087 #define MHI_EV_CC_INVALID 0x0 2088 #define MHI_EV_CC_SUCCESS 0x1 2089 #define MHI_EV_CC_EOT 0x2 2090 #define MHI_EV_CC_OVERFLOW 0x3 2091 #define MHI_EV_CC_EOB 0x4 2092 #define MHI_EV_CC_OOB 0x5 2093 #define MHI_EV_CC_DB_MODE 0x6 2094 #define MHI_EV_CC_UNDEFINED_ERR 0x10 2095 #define MHI_EV_CC_BAD_TRE 0x11 2096 2097 #define MHI_CMD_NOP 01 2098 #define MHI_CMD_RESET_CHAN 16 2099 #define MHI_CMD_STOP_CHAN 17 2100 #define MHI_CMD_START_CHAN 18 2101 2102 #define MHI_TRE_CMD_CHID_MASK GENMASK(31, 24) 2103 #define MHI_TRE_CMD_CHID_SHFT 24 2104 #define MHI_TRE_CMD_CMDID_MASK GENMASK(23, 16) 2105 #define MHI_TRE_CMD_CMDID_SHFT 16 2106 2107 #define MHI_TRE0_EV_LEN_MASK GENMASK(15, 0) 2108 #define MHI_TRE0_EV_LEN_SHFT 0 2109 #define MHI_TRE0_EV_CODE_MASK GENMASK(31, 24) 2110 #define MHI_TRE0_EV_CODE_SHFT 24 2111 #define MHI_TRE1_EV_TYPE_MASK GENMASK(23, 16) 2112 #define MHI_TRE1_EV_TYPE_SHFT 16 2113 #define MHI_TRE1_EV_CHID_MASK GENMASK(31, 24) 2114 #define MHI_TRE1_EV_CHID_SHFT 24 2115 2116 #define MHI_TRE0_DATA_LEN_MASK GENMASK(15, 0) 2117 #define MHI_TRE0_DATA_LEN_SHFT 0 2118 #define MHI_TRE1_DATA_CHAIN (1 << 0) 2119 #define MHI_TRE1_DATA_IEOB (1 << 8) 2120 #define MHI_TRE1_DATA_IEOT (1 << 9) 2121 #define MHI_TRE1_DATA_BEI (1 << 10) 2122 #define MHI_TRE1_DATA_TYPE_MASK GENMASK(23, 16) 2123 #define MHI_TRE1_DATA_TYPE_SHIFT 16 2124 #define MHI_TRE1_DATA_TYPE_TRANSFER 0x2 2125 2126 #define MHI_PKT_TYPE_INVALID 0x00 2127 #define MHI_PKT_TYPE_NOOP_CMD 0x01 2128 #define MHI_PKT_TYPE_TRANSFER 0x02 2129 #define MHI_PKT_TYPE_COALESCING 0x08 2130 #define MHI_PKT_TYPE_RESET_CHAN_CMD 0x10 2131 #define MHI_PKT_TYPE_STOP_CHAN_CMD 0x11 2132 #define MHI_PKT_TYPE_START_CHAN_CMD 0x12 2133 #define MHI_PKT_TYPE_STATE_CHANGE_EVENT 0x20 2134 #define MHI_PKT_TYPE_CMD_COMPLETION_EVENT 0x21 2135 #define MHI_PKT_TYPE_TX_EVENT 0x22 2136 #define MHI_PKT_TYPE_RSC_TX_EVENT 0x28 2137 #define MHI_PKT_TYPE_EE_EVENT 0x40 2138 #define MHI_PKT_TYPE_TSYNC_EVENT 0x48 2139 #define MHI_PKT_TYPE_BW_REQ_EVENT 0x50 2140 2141 2142 #define MHI_DMA_VEC_CHUNK_SIZE 524288 /* 512 KB */ 2143 struct qwx_dma_vec_entry { 2144 uint64_t paddr; 2145 uint64_t size; 2146 }; 2147 2148 void 2149 qwx_mhi_ring_doorbell(struct qwx_softc *sc, uint64_t db_addr, uint64_t val) 2150 { 2151 qwx_pci_write(sc, db_addr + 4, val >> 32); 2152 qwx_pci_write(sc, db_addr, val & 0xffffffff); 2153 } 2154 2155 void 2156 qwx_mhi_device_wake(struct qwx_softc *sc) 2157 { 2158 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 2159 2160 /* 2161 * Device wake is async only for now because we do not 2162 * keep track of PM state in software. 2163 */ 2164 qwx_mhi_ring_doorbell(sc, psc->wake_db, 1); 2165 } 2166 2167 void 2168 qwx_mhi_device_zzz(struct qwx_softc *sc) 2169 { 2170 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 2171 2172 qwx_mhi_ring_doorbell(sc, psc->wake_db, 0); 2173 } 2174 2175 int 2176 qwx_mhi_wake_db_clear_valid(struct qwx_softc *sc) 2177 { 2178 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 2179 2180 return (psc->mhi_state == MHI_STATE_M0); /* TODO other states? */ 2181 } 2182 2183 void 2184 qwx_mhi_init_xfer_rings(struct qwx_pci_softc *psc) 2185 { 2186 struct qwx_softc *sc = &psc->sc_sc; 2187 int i; 2188 uint32_t chcfg; 2189 struct qwx_pci_xfer_ring *ring; 2190 struct qwx_mhi_chan_ctxt *cbase, *c; 2191 2192 cbase = (struct qwx_mhi_chan_ctxt *)QWX_DMA_KVA(psc->chan_ctxt); 2193 for (i = 0; i < psc->max_chan; i++) { 2194 c = &cbase[i]; 2195 chcfg = le32toh(c->chcfg); 2196 chcfg &= ~(MHI_CHAN_CTX_CHSTATE_MASK | 2197 MHI_CHAN_CTX_BRSTMODE_MASK | 2198 MHI_CHAN_CTX_POLLCFG_MASK); 2199 chcfg |= (MHI_CHAN_CTX_CHSTATE_DISABLED | 2200 (MHI_CHAN_CTX_BRSTMODE_DISABLE << 2201 MHI_CHAN_CTX_BRSTMODE_SHFT)); 2202 c->chcfg = htole32(chcfg); 2203 c->chtype = htole32(MHI_CHAN_TYPE_INVALID); 2204 c->erindex = 0; 2205 } 2206 2207 for (i = 0; i < nitems(psc->xfer_rings); i++) { 2208 ring = &psc->xfer_rings[i]; 2209 KASSERT(ring->mhi_chan_id < psc->max_chan); 2210 c = &cbase[ring->mhi_chan_id]; 2211 c->chtype = htole32(ring->mhi_chan_direction); 2212 c->erindex = htole32(ring->mhi_chan_event_ring_index); 2213 ring->chan_ctxt = c; 2214 } 2215 2216 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0, 2217 QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE); 2218 } 2219 2220 void 2221 qwx_mhi_init_event_rings(struct qwx_pci_softc *psc) 2222 { 2223 struct qwx_softc *sc = &psc->sc_sc; 2224 int i; 2225 uint32_t intmod; 2226 uint64_t paddr, len; 2227 struct qwx_pci_event_ring *ring; 2228 struct qwx_mhi_event_ctxt *c; 2229 2230 c = (struct qwx_mhi_event_ctxt *)QWX_DMA_KVA(psc->event_ctxt); 2231 for (i = 0; i < nitems(psc->event_rings); i++, c++) { 2232 ring = &psc->event_rings[i]; 2233 2234 ring->event_ctxt = c; 2235 2236 intmod = le32toh(c->intmod); 2237 intmod &= ~(MHI_EV_CTX_INTMODC_MASK | MHI_EV_CTX_INTMODT_MASK); 2238 intmod |= (ring->mhi_er_irq_moderation_ms << 2239 MHI_EV_CTX_INTMODT_SHFT) & MHI_EV_CTX_INTMODT_MASK; 2240 c->intmod = htole32(intmod); 2241 2242 c->ertype = htole32(MHI_ER_TYPE_VALID); 2243 c->msivec = htole32(ring->mhi_er_irq); 2244 2245 paddr = QWX_DMA_DVA(ring->dmamem); 2246 ring->rp = paddr; 2247 ring->wp = paddr + ring->size - 2248 sizeof(struct qwx_mhi_ring_element); 2249 c->rbase = htole64(paddr); 2250 c->rp = htole64(ring->rp); 2251 c->wp = htole64(ring->wp); 2252 2253 len = sizeof(struct qwx_mhi_ring_element) * ring->num_elements; 2254 c->rlen = htole64(len); 2255 } 2256 2257 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0, 2258 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE); 2259 } 2260 2261 void 2262 qwx_mhi_init_cmd_ring(struct qwx_pci_softc *psc) 2263 { 2264 struct qwx_softc *sc = &psc->sc_sc; 2265 struct qwx_pci_cmd_ring *ring = &psc->cmd_ring; 2266 struct qwx_mhi_cmd_ctxt *c; 2267 uint64_t paddr, len; 2268 2269 paddr = QWX_DMA_DVA(ring->dmamem); 2270 len = ring->size; 2271 2272 ring->rp = ring->wp = paddr; 2273 2274 c = (struct qwx_mhi_cmd_ctxt *)QWX_DMA_KVA(psc->cmd_ctxt); 2275 c->rbase = htole64(paddr); 2276 c->rp = htole64(paddr); 2277 c->wp = htole64(paddr); 2278 c->rlen = htole64(len); 2279 2280 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0, 2281 QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_PREWRITE); 2282 } 2283 2284 void 2285 qwx_mhi_init_dev_ctxt(struct qwx_pci_softc *psc) 2286 { 2287 qwx_mhi_init_xfer_rings(psc); 2288 qwx_mhi_init_event_rings(psc); 2289 qwx_mhi_init_cmd_ring(psc); 2290 } 2291 2292 void * 2293 qwx_pci_cmd_ring_get_elem(struct qwx_pci_cmd_ring *ring, uint64_t ptr) 2294 { 2295 uint64_t base = QWX_DMA_DVA(ring->dmamem), offset; 2296 2297 if (ptr < base || ptr >= base + ring->size) 2298 return NULL; 2299 2300 offset = ptr - base; 2301 if (offset >= ring->size) 2302 return NULL; 2303 2304 return QWX_DMA_KVA(ring->dmamem) + offset; 2305 } 2306 2307 int 2308 qwx_mhi_cmd_ring_submit(struct qwx_pci_softc *psc, 2309 struct qwx_pci_cmd_ring *ring) 2310 { 2311 struct qwx_softc *sc = &psc->sc_sc; 2312 uint64_t base = QWX_DMA_DVA(ring->dmamem); 2313 struct qwx_mhi_cmd_ctxt *c; 2314 2315 if (ring->queued >= ring->num_elements) 2316 return 1; 2317 2318 if (ring->wp + sizeof(struct qwx_mhi_ring_element) >= base + ring->size) 2319 ring->wp = base; 2320 else 2321 ring->wp += sizeof(struct qwx_mhi_ring_element); 2322 2323 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0, 2324 QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_POSTREAD); 2325 2326 c = (struct qwx_mhi_cmd_ctxt *)QWX_DMA_KVA(psc->cmd_ctxt); 2327 c->wp = htole64(ring->wp); 2328 2329 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0, 2330 QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_PREWRITE); 2331 2332 ring->queued++; 2333 qwx_mhi_ring_doorbell(sc, MHI_CRDB_LOWER, ring->wp); 2334 return 0; 2335 } 2336 2337 int 2338 qwx_mhi_send_cmd(struct qwx_pci_softc *psc, uint32_t cmd, uint32_t chan) 2339 { 2340 struct qwx_softc *sc = &psc->sc_sc; 2341 struct qwx_pci_cmd_ring *ring = &psc->cmd_ring; 2342 struct qwx_mhi_ring_element *e; 2343 2344 if (ring->queued >= ring->num_elements) { 2345 printf("%s: command ring overflow\n", sc->sc_dev.dv_xname); 2346 return 1; 2347 } 2348 2349 e = qwx_pci_cmd_ring_get_elem(ring, ring->wp); 2350 if (e == NULL) 2351 return 1; 2352 2353 e->ptr = 0ULL; 2354 e->dword[0] = 0; 2355 e->dword[1] = htole32( 2356 ((chan << MHI_TRE_CMD_CHID_SHFT) & MHI_TRE_CMD_CHID_MASK) | 2357 ((cmd << MHI_TRE_CMD_CMDID_SHFT) & MHI_TRE_CMD_CMDID_MASK)); 2358 2359 return qwx_mhi_cmd_ring_submit(psc, ring); 2360 } 2361 2362 void * 2363 qwx_pci_xfer_ring_get_elem(struct qwx_pci_xfer_ring *ring, uint64_t wp) 2364 { 2365 uint64_t base = QWX_DMA_DVA(ring->dmamem), offset; 2366 void *addr = QWX_DMA_KVA(ring->dmamem); 2367 2368 if (wp < base) 2369 return NULL; 2370 2371 offset = wp - base; 2372 if (offset >= ring->size) 2373 return NULL; 2374 2375 return addr + offset; 2376 } 2377 2378 struct qwx_xfer_data * 2379 qwx_pci_xfer_ring_get_data(struct qwx_pci_xfer_ring *ring, uint64_t wp) 2380 { 2381 uint64_t base = QWX_DMA_DVA(ring->dmamem), offset; 2382 2383 if (wp < base) 2384 return NULL; 2385 2386 offset = wp - base; 2387 if (offset >= ring->size) 2388 return NULL; 2389 2390 return &ring->data[offset / sizeof(ring->data[0])]; 2391 } 2392 2393 int 2394 qwx_mhi_submit_xfer(struct qwx_softc *sc, struct mbuf *m) 2395 { 2396 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 2397 struct qwx_pci_xfer_ring *ring; 2398 struct qwx_mhi_ring_element *e; 2399 struct qwx_xfer_data *xfer; 2400 uint64_t paddr, base; 2401 int err; 2402 2403 ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND]; 2404 2405 if (ring->queued >= ring->num_elements) 2406 return 1; 2407 2408 if (m->m_pkthdr.len > QWX_PCI_XFER_MAX_DATA_SIZE) { 2409 /* TODO: chunk xfers */ 2410 printf("%s: xfer too large: %d bytes\n", __func__, m->m_pkthdr.len); 2411 return 1; 2412 2413 } 2414 2415 e = qwx_pci_xfer_ring_get_elem(ring, ring->wp); 2416 if (e == NULL) 2417 return 1; 2418 2419 xfer = qwx_pci_xfer_ring_get_data(ring, ring->wp); 2420 if (xfer == NULL || xfer->m != NULL) 2421 return 1; 2422 2423 err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map, m, 2424 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 2425 if (err && err != EFBIG) { 2426 printf("%s: can't map mbuf (error %d)\n", 2427 sc->sc_dev.dv_xname, err); 2428 return err; 2429 } 2430 if (err) { 2431 /* Too many DMA segments, linearize mbuf. */ 2432 if (m_defrag(m, M_DONTWAIT)) 2433 return ENOBUFS; 2434 err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map, m, 2435 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 2436 if (err) { 2437 printf("%s: can't map mbuf (error %d)\n", 2438 sc->sc_dev.dv_xname, err); 2439 return err; 2440 } 2441 } 2442 2443 bus_dmamap_sync(sc->sc_dmat, xfer->map, 0, m->m_pkthdr.len, 2444 BUS_DMASYNC_PREWRITE); 2445 2446 xfer->m = m; 2447 paddr = xfer->map->dm_segs[0].ds_addr; 2448 2449 e->ptr = htole64(paddr); 2450 e->dword[0] = htole32((m->m_pkthdr.len << MHI_TRE0_DATA_LEN_SHFT) & 2451 MHI_TRE0_DATA_LEN_MASK); 2452 e->dword[1] = htole32(MHI_TRE1_DATA_IEOT | 2453 MHI_TRE1_DATA_TYPE_TRANSFER << MHI_TRE1_DATA_TYPE_SHIFT); 2454 2455 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(ring->dmamem), 2456 0, QWX_DMA_LEN(ring->dmamem), BUS_DMASYNC_PREWRITE); 2457 2458 base = QWX_DMA_DVA(ring->dmamem); 2459 if (ring->wp + sizeof(struct qwx_mhi_ring_element) >= base + ring->size) 2460 ring->wp = base; 2461 else 2462 ring->wp += sizeof(struct qwx_mhi_ring_element); 2463 ring->queued++; 2464 2465 ring->chan_ctxt->wp = htole64(ring->wp); 2466 2467 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0, 2468 QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE); 2469 2470 qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp); 2471 return 0; 2472 } 2473 2474 int 2475 qwx_mhi_start_channel(struct qwx_pci_softc *psc, 2476 struct qwx_pci_xfer_ring *ring) 2477 { 2478 struct qwx_softc *sc = &psc->sc_sc; 2479 struct qwx_mhi_chan_ctxt *c; 2480 int ret = 0; 2481 uint32_t chcfg; 2482 uint64_t paddr, len; 2483 2484 DNPRINTF(QWX_D_MHI, "%s: start MHI channel %d in state %d\n", __func__, 2485 ring->mhi_chan_id, ring->mhi_chan_state); 2486 2487 c = ring->chan_ctxt; 2488 2489 chcfg = le32toh(c->chcfg); 2490 chcfg &= ~MHI_CHAN_CTX_CHSTATE_MASK; 2491 chcfg |= MHI_CHAN_CTX_CHSTATE_ENABLED; 2492 c->chcfg = htole32(chcfg); 2493 2494 paddr = QWX_DMA_DVA(ring->dmamem); 2495 ring->rp = ring->wp = paddr; 2496 c->rbase = htole64(paddr); 2497 c->rp = htole64(ring->rp); 2498 c->wp = htole64(ring->wp); 2499 len = sizeof(struct qwx_mhi_ring_element) * ring->num_elements; 2500 c->rlen = htole64(len); 2501 2502 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0, 2503 QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE); 2504 2505 ring->cmd_status = MHI_EV_CC_INVALID; 2506 if (qwx_mhi_send_cmd(psc, MHI_CMD_START_CHAN, ring->mhi_chan_id)) 2507 return 1; 2508 2509 while (ring->cmd_status != MHI_EV_CC_SUCCESS) { 2510 ret = tsleep_nsec(&ring->cmd_status, 0, "qwxcmd", 2511 SEC_TO_NSEC(5)); 2512 if (ret) 2513 break; 2514 } 2515 2516 if (ret) { 2517 printf("%s: could not start MHI channel %d in state %d: status 0x%x\n", 2518 sc->sc_dev.dv_xname, ring->mhi_chan_id, 2519 ring->mhi_chan_state, ring->cmd_status); 2520 return 1; 2521 } 2522 2523 if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) { 2524 uint64_t wp = QWX_DMA_DVA(ring->dmamem); 2525 int i; 2526 2527 for (i = 0; i < ring->num_elements; i++) { 2528 struct qwx_mhi_ring_element *e; 2529 struct qwx_xfer_data *xfer; 2530 uint64_t paddr; 2531 2532 e = qwx_pci_xfer_ring_get_elem(ring, wp); 2533 xfer = qwx_pci_xfer_ring_get_data(ring, wp); 2534 paddr = xfer->map->dm_segs[0].ds_addr; 2535 2536 e->ptr = htole64(paddr); 2537 e->dword[0] = htole32((QWX_PCI_XFER_MAX_DATA_SIZE << 2538 MHI_TRE0_DATA_LEN_SHFT) & 2539 MHI_TRE0_DATA_LEN_MASK); 2540 e->dword[1] = htole32(MHI_TRE1_DATA_IEOT | 2541 MHI_TRE1_DATA_BEI | 2542 MHI_TRE1_DATA_TYPE_TRANSFER << 2543 MHI_TRE1_DATA_TYPE_SHIFT); 2544 2545 ring->wp = wp; 2546 wp += sizeof(*e); 2547 } 2548 2549 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(ring->dmamem), 0, 2550 QWX_DMA_LEN(ring->dmamem), BUS_DMASYNC_PREWRITE); 2551 2552 qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp); 2553 } 2554 2555 return 0; 2556 } 2557 2558 int 2559 qwx_mhi_start_channels(struct qwx_pci_softc *psc) 2560 { 2561 struct qwx_pci_xfer_ring *ring; 2562 int ret = 0; 2563 2564 qwx_mhi_device_wake(&psc->sc_sc); 2565 2566 ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND]; 2567 if (qwx_mhi_start_channel(psc, ring)) { 2568 ret = 1; 2569 goto done; 2570 } 2571 2572 ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND]; 2573 if (qwx_mhi_start_channel(psc, ring)) 2574 ret = 1; 2575 done: 2576 qwx_mhi_device_zzz(&psc->sc_sc); 2577 return ret; 2578 } 2579 2580 int 2581 qwx_mhi_start(struct qwx_pci_softc *psc) 2582 { 2583 struct qwx_softc *sc = &psc->sc_sc; 2584 uint32_t off; 2585 uint32_t ee, state; 2586 int ret; 2587 2588 qwx_mhi_init_dev_ctxt(psc); 2589 2590 psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET); 2591 DNPRINTF(QWX_D_MHI, "%s: BHI offset 0x%x\n", __func__, psc->bhi_off); 2592 2593 psc->bhie_off = qwx_pci_read(sc, MHI_BHIE_OFFSET); 2594 DNPRINTF(QWX_D_MHI, "%s: BHIE offset 0x%x\n", __func__, psc->bhie_off); 2595 2596 /* Clean BHIE RX registers */ 2597 for (off = MHI_BHIE_RXVECADDR_LOW_OFFS; 2598 off < (MHI_BHIE_RXVECSTATUS_OFFS - 4); 2599 off += 4) 2600 qwx_pci_write(sc, psc->bhie_off + off, 0x0); 2601 2602 qwx_rddm_prepare(psc); 2603 2604 /* Program BHI INTVEC */ 2605 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_INTVEC, 0x00); 2606 2607 /* 2608 * Get BHI execution environment and confirm that it is valid 2609 * for power on. 2610 */ 2611 ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV); 2612 if (!MHI_POWER_UP_CAPABLE(ee)) { 2613 printf("%s: invalid EE for power on: 0x%x\n", 2614 sc->sc_dev.dv_xname, ee); 2615 return 1; 2616 } 2617 2618 /* 2619 * Get MHI state of the device and reset it if it is in system 2620 * error. 2621 */ 2622 state = qwx_pci_read(sc, MHI_STATUS); 2623 DNPRINTF(QWX_D_MHI, "%s: MHI power on with EE: 0x%x, status: 0x%x\n", 2624 sc->sc_dev.dv_xname, ee, state); 2625 state = (state & MHI_STATUS_MHISTATE_MASK) >> MHI_STATUS_MHISTATE_SHFT; 2626 if (state == MHI_STATE_SYS_ERR) { 2627 if (qwx_mhi_reset_device(sc, 0)) 2628 return 1; 2629 state = qwx_pci_read(sc, MHI_STATUS); 2630 DNPRINTF(QWX_D_MHI, "%s: MHI state after reset: 0x%x\n", 2631 sc->sc_dev.dv_xname, state); 2632 state = (state & MHI_STATUS_MHISTATE_MASK) >> 2633 MHI_STATUS_MHISTATE_SHFT; 2634 if (state == MHI_STATE_SYS_ERR) { 2635 printf("%s: MHI stuck in system error state\n", 2636 sc->sc_dev.dv_xname); 2637 return 1; 2638 } 2639 } 2640 2641 psc->bhi_ee = ee; 2642 psc->mhi_state = state; 2643 2644 #if notyet 2645 /* Enable IRQs */ 2646 // XXX todo? 2647 #endif 2648 2649 /* Transition to primary runtime. */ 2650 if (MHI_IN_PBL(ee)) { 2651 ret = qwx_mhi_fw_load_handler(psc); 2652 if (ret) 2653 return ret; 2654 } else { 2655 /* XXX Handle partially initialized device...?!? */ 2656 ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV); 2657 if (!MHI_IN_MISSION_MODE(ee)) { 2658 printf("%s: failed to power up MHI, ee=0x%x\n", 2659 sc->sc_dev.dv_xname, ee); 2660 return EIO; 2661 } 2662 } 2663 2664 return 0; 2665 } 2666 2667 void 2668 qwx_mhi_stop(struct qwx_softc *sc) 2669 { 2670 qwx_mhi_reset_device(sc, 1); 2671 } 2672 2673 int 2674 qwx_mhi_reset_device(struct qwx_softc *sc, int force) 2675 { 2676 struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; 2677 uint32_t reg; 2678 int ret = 0; 2679 2680 reg = qwx_pcic_read32(sc, MHI_STATUS); 2681 2682 DNPRINTF(QWX_D_MHI, "%s: MHISTATUS 0x%x\n", sc->sc_dev.dv_xname, reg); 2683 /* 2684 * Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS 2685 * has SYSERR bit set and thus need to set MHICTRL_RESET 2686 * to clear SYSERR. 2687 */ 2688 if (force || (reg & MHI_STATUS_SYSERR_MASK)) { 2689 /* Trigger MHI Reset in device. */ 2690 qwx_pcic_write32(sc, MHI_CTRL, MHI_CTRL_RESET_MASK); 2691 2692 /* Wait for the reset bit to be cleared by the device. */ 2693 ret = qwx_mhi_await_device_reset(sc); 2694 if (ret) 2695 return ret; 2696 2697 if (psc->bhi_off == 0) 2698 psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET); 2699 2700 /* Device clear BHI INTVEC so re-program it. */ 2701 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_INTVEC, 0x00); 2702 } 2703 2704 return 0; 2705 } 2706 2707 static inline void 2708 qwx_mhi_reset_txvecdb(struct qwx_softc *sc) 2709 { 2710 qwx_pcic_write32(sc, PCIE_TXVECDB, 0); 2711 } 2712 2713 static inline void 2714 qwx_mhi_reset_txvecstatus(struct qwx_softc *sc) 2715 { 2716 qwx_pcic_write32(sc, PCIE_TXVECSTATUS, 0); 2717 } 2718 2719 static inline void 2720 qwx_mhi_reset_rxvecdb(struct qwx_softc *sc) 2721 { 2722 qwx_pcic_write32(sc, PCIE_RXVECDB, 0); 2723 } 2724 2725 static inline void 2726 qwx_mhi_reset_rxvecstatus(struct qwx_softc *sc) 2727 { 2728 qwx_pcic_write32(sc, PCIE_RXVECSTATUS, 0); 2729 } 2730 2731 void 2732 qwx_mhi_clear_vector(struct qwx_softc *sc) 2733 { 2734 qwx_mhi_reset_txvecdb(sc); 2735 qwx_mhi_reset_txvecstatus(sc); 2736 qwx_mhi_reset_rxvecdb(sc); 2737 qwx_mhi_reset_rxvecstatus(sc); 2738 } 2739 2740 int 2741 qwx_mhi_fw_load_handler(struct qwx_pci_softc *psc) 2742 { 2743 struct qwx_softc *sc = &psc->sc_sc; 2744 int ret; 2745 char amss_path[PATH_MAX]; 2746 u_char *data; 2747 size_t len; 2748 2749 ret = snprintf(amss_path, sizeof(amss_path), "%s/%s/%s", 2750 ATH11K_FW_DIR, sc->hw_params.fw.dir, ATH11K_AMSS_FILE); 2751 if (ret < 0 || ret >= sizeof(amss_path)) 2752 return ENOSPC; 2753 2754 ret = loadfirmware(amss_path, &data, &len); 2755 if (ret) { 2756 printf("%s: could not read %s (error %d)\n", 2757 sc->sc_dev.dv_xname, amss_path, ret); 2758 return ret; 2759 } 2760 2761 if (len < MHI_DMA_VEC_CHUNK_SIZE) { 2762 printf("%s: %s is too short, have only %zu bytes\n", 2763 sc->sc_dev.dv_xname, amss_path, len); 2764 free(data, M_DEVBUF, len); 2765 return EINVAL; 2766 } 2767 2768 /* Second-stage boot loader sits in the first 512 KB of image. */ 2769 ret = qwx_mhi_fw_load_bhi(psc, data, MHI_DMA_VEC_CHUNK_SIZE); 2770 if (ret != 0) { 2771 printf("%s: could not load firmware %s\n", 2772 sc->sc_dev.dv_xname, amss_path); 2773 free(data, M_DEVBUF, len); 2774 return ret; 2775 } 2776 2777 while (psc->bhi_ee < MHI_EE_SBL) { 2778 ret = tsleep_nsec(&psc->bhi_ee, 0, "qwxsbl", 2779 SEC_TO_NSEC(2)); 2780 if (ret) 2781 break; 2782 } 2783 if (ret != 0) { 2784 printf("%s: device failed to start secondary bootloader\n", 2785 sc->sc_dev.dv_xname); 2786 free(data, M_DEVBUF, len); 2787 return ret; 2788 } 2789 2790 /* Now load the full image. */ 2791 ret = qwx_mhi_fw_load_bhie(psc, data, len); 2792 if (ret != 0) { 2793 printf("%s: could not load firmware %s\n", 2794 sc->sc_dev.dv_xname, amss_path); 2795 } 2796 2797 while (psc->bhi_ee < MHI_EE_AMSS) { 2798 ret = tsleep_nsec(&psc->bhi_ee, 0, "qwxamss", 2799 SEC_TO_NSEC(5)); 2800 if (ret) 2801 break; 2802 } 2803 if (ret != 0) { 2804 printf("%s: device failed to enter AMSS EE\n", 2805 sc->sc_dev.dv_xname); 2806 free(data, M_DEVBUF, len); 2807 return ret; 2808 } 2809 2810 free(data, M_DEVBUF, len); 2811 return ret; 2812 } 2813 2814 int 2815 qwx_mhi_await_device_reset(struct qwx_softc *sc) 2816 { 2817 const uint32_t msecs = 24, retries = 2; 2818 uint32_t reg; 2819 int timeout; 2820 2821 /* Poll for CTRL RESET to clear. */ 2822 timeout = retries; 2823 while (timeout > 0) { 2824 reg = qwx_pci_read(sc, MHI_CTRL); 2825 DNPRINTF(QWX_D_MHI, "%s: MHI_CTRL is 0x%x\n", __func__, reg); 2826 if ((reg & MHI_CTRL_RESET_MASK) == 0) 2827 break; 2828 DELAY((msecs / retries) * 1000); 2829 timeout--; 2830 } 2831 if (timeout == 0) { 2832 DNPRINTF(QWX_D_MHI, "%s: MHI reset failed\n", __func__); 2833 return ETIMEDOUT; 2834 } 2835 2836 return 0; 2837 } 2838 2839 int 2840 qwx_mhi_await_device_ready(struct qwx_softc *sc) 2841 { 2842 uint32_t reg; 2843 int timeout; 2844 const uint32_t msecs = 2000, retries = 4; 2845 2846 2847 /* Poll for READY to be set. */ 2848 timeout = retries; 2849 while (timeout > 0) { 2850 reg = qwx_pci_read(sc, MHI_STATUS); 2851 DNPRINTF(QWX_D_MHI, "%s: MHI_STATUS is 0x%x\n", __func__, reg); 2852 if (reg & MHI_STATUS_READY_MASK) { 2853 reg &= ~MHI_STATUS_READY_MASK; 2854 qwx_pci_write(sc, MHI_STATUS, reg); 2855 break; 2856 } 2857 DELAY((msecs / retries) * 1000); 2858 timeout--; 2859 } 2860 if (timeout == 0) { 2861 printf("%s: MHI not ready\n", sc->sc_dev.dv_xname); 2862 return ETIMEDOUT; 2863 } 2864 2865 return 0; 2866 } 2867 2868 void 2869 qwx_mhi_ready_state_transition(struct qwx_pci_softc *psc) 2870 { 2871 struct qwx_softc *sc = &psc->sc_sc; 2872 int ret, i; 2873 2874 ret = qwx_mhi_await_device_reset(sc); 2875 if (ret) 2876 return; 2877 2878 ret = qwx_mhi_await_device_ready(sc); 2879 if (ret) 2880 return; 2881 2882 /* Set up memory-mapped IO for channels, events, etc. */ 2883 qwx_mhi_init_mmio(psc); 2884 2885 /* Notify event rings. */ 2886 for (i = 0; i < nitems(psc->event_rings); i++) { 2887 struct qwx_pci_event_ring *ring = &psc->event_rings[i]; 2888 qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp); 2889 } 2890 2891 /* 2892 * Set the device into M0 state. The device will transition 2893 * into M0 and the execution environment will switch to SBL. 2894 */ 2895 qwx_mhi_set_state(sc, MHI_STATE_M0); 2896 } 2897 2898 void 2899 qwx_mhi_ee_amss_state_transition(struct qwx_pci_softc *psc) 2900 { 2901 qwx_mhi_start_channels(psc); 2902 } 2903 2904 void 2905 qwx_mhi_mission_mode_state_transition(struct qwx_pci_softc *psc) 2906 { 2907 struct qwx_softc *sc = &psc->sc_sc; 2908 int i; 2909 2910 qwx_mhi_device_wake(sc); 2911 2912 /* Notify event rings. */ 2913 for (i = 0; i < nitems(psc->event_rings); i++) { 2914 struct qwx_pci_event_ring *ring = &psc->event_rings[i]; 2915 qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp); 2916 } 2917 2918 /* TODO: Notify transfer/command rings? */ 2919 2920 qwx_mhi_device_zzz(sc); 2921 } 2922 2923 void 2924 qwx_mhi_set_state(struct qwx_softc *sc, uint32_t state) 2925 { 2926 uint32_t reg; 2927 2928 reg = qwx_pci_read(sc, MHI_CTRL); 2929 2930 if (state != MHI_STATE_RESET) { 2931 reg &= ~MHI_CTRL_MHISTATE_MASK; 2932 reg |= (state << MHI_CTRL_MHISTATE_SHFT) & MHI_CTRL_MHISTATE_MASK; 2933 } else 2934 reg |= MHI_CTRL_RESET_MASK; 2935 2936 qwx_pci_write(sc, MHI_CTRL, reg); 2937 } 2938 2939 void 2940 qwx_mhi_init_mmio(struct qwx_pci_softc *psc) 2941 { 2942 struct qwx_softc *sc = &psc->sc_sc; 2943 uint64_t paddr; 2944 uint32_t reg; 2945 int i; 2946 2947 reg = qwx_pci_read(sc, MHI_CHDBOFF); 2948 2949 /* Set device wake doorbell address. */ 2950 psc->wake_db = reg + 8 * MHI_DEV_WAKE_DB; 2951 2952 /* Set doorbell address for each transfer ring. */ 2953 for (i = 0; i < nitems(psc->xfer_rings); i++) { 2954 struct qwx_pci_xfer_ring *ring = &psc->xfer_rings[i]; 2955 ring->db_addr = reg + (8 * ring->mhi_chan_id); 2956 } 2957 2958 reg = qwx_pci_read(sc, MHI_ERDBOFF); 2959 /* Set doorbell address for each event ring. */ 2960 for (i = 0; i < nitems(psc->event_rings); i++) { 2961 struct qwx_pci_event_ring *ring = &psc->event_rings[i]; 2962 ring->db_addr = reg + (8 * i); 2963 } 2964 2965 paddr = QWX_DMA_DVA(psc->chan_ctxt); 2966 qwx_pci_write(sc, MHI_CCABAP_HIGHER, paddr >> 32); 2967 qwx_pci_write(sc, MHI_CCABAP_LOWER, paddr & 0xffffffff); 2968 2969 paddr = QWX_DMA_DVA(psc->event_ctxt); 2970 qwx_pci_write(sc, MHI_ECABAP_HIGHER, paddr >> 32); 2971 qwx_pci_write(sc, MHI_ECABAP_LOWER, paddr & 0xffffffff); 2972 2973 paddr = QWX_DMA_DVA(psc->cmd_ctxt); 2974 qwx_pci_write(sc, MHI_CRCBAP_HIGHER, paddr >> 32); 2975 qwx_pci_write(sc, MHI_CRCBAP_LOWER, paddr & 0xffffffff); 2976 2977 /* Not (yet?) using fixed memory space from a device-tree. */ 2978 qwx_pci_write(sc, MHI_CTRLBASE_HIGHER, 0); 2979 qwx_pci_write(sc, MHI_CTRLBASE_LOWER, 0); 2980 qwx_pci_write(sc, MHI_DATABASE_HIGHER, 0); 2981 qwx_pci_write(sc, MHI_DATABASE_LOWER, 0); 2982 qwx_pci_write(sc, MHI_CTRLLIMIT_HIGHER, 0x0); 2983 qwx_pci_write(sc, MHI_CTRLLIMIT_LOWER, 0xffffffff); 2984 qwx_pci_write(sc, MHI_DATALIMIT_HIGHER, 0x0); 2985 qwx_pci_write(sc, MHI_DATALIMIT_LOWER, 0xffffffff); 2986 2987 reg = qwx_pci_read(sc, MHI_CFG); 2988 reg &= ~(MHI_CFG_NER_MASK | MHI_CFG_NHWER_MASK); 2989 reg |= QWX_NUM_EVENT_CTX << MHI_CFG_NER_SHFT; 2990 qwx_pci_write(sc, MHI_CFG, reg); 2991 } 2992 2993 int 2994 qwx_mhi_fw_load_bhi(struct qwx_pci_softc *psc, uint8_t *data, size_t len) 2995 { 2996 struct qwx_softc *sc = &psc->sc_sc; 2997 struct qwx_dmamem *data_adm; 2998 uint32_t seq, reg, status = MHI_BHI_STATUS_RESET; 2999 uint64_t paddr; 3000 int ret; 3001 3002 data_adm = qwx_dmamem_alloc(sc->sc_dmat, len, 0); 3003 if (data_adm == NULL) { 3004 printf("%s: could not allocate BHI DMA data buffer\n", 3005 sc->sc_dev.dv_xname); 3006 return 1; 3007 } 3008 3009 /* Copy firmware image to DMA memory. */ 3010 memcpy(QWX_DMA_KVA(data_adm), data, len); 3011 3012 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_STATUS, 0); 3013 3014 /* Set data physical address and length. */ 3015 paddr = QWX_DMA_DVA(data_adm); 3016 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGADDR_HIGH, paddr >> 32); 3017 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGADDR_LOW, 3018 paddr & 0xffffffff); 3019 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGSIZE, len); 3020 3021 /* Set a random transaction sequence number. */ 3022 do { 3023 seq = arc4random_uniform(MHI_BHI_TXDB_SEQNUM_BMSK); 3024 } while (seq == 0); 3025 qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGTXDB, seq); 3026 3027 /* Wait for completion. */ 3028 ret = 0; 3029 while (status != MHI_BHI_STATUS_SUCCESS) { 3030 ret = tsleep_nsec(&psc->bhi_off, 0, "qwxbhi", 3031 SEC_TO_NSEC(1)); 3032 if (ret) 3033 break; 3034 3035 reg = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_STATUS); 3036 status = (reg & MHI_BHI_STATUS_MASK) >> MHI_BHI_STATUS_SHFT; 3037 DNPRINTF(QWX_D_MHI, "%s: BHI status is 0x%x\n", 3038 __func__, status); 3039 } 3040 3041 qwx_dmamem_free(sc->sc_dmat, data_adm); 3042 3043 if (ret) { 3044 printf("%s: BHI load timeout\n", sc->sc_dev.dv_xname); 3045 return ret; 3046 } 3047 return 0; 3048 } 3049 3050 int 3051 qwx_mhi_fw_load_bhie(struct qwx_pci_softc *psc, uint8_t *data, size_t len) 3052 { 3053 struct qwx_softc *sc = &psc->sc_sc; 3054 struct qwx_dma_vec_entry *vec; 3055 uint32_t seq, reg, state = MHI_BHIE_TXVECSTATUS_STATUS_RESET; 3056 uint64_t paddr; 3057 const size_t chunk_size = MHI_DMA_VEC_CHUNK_SIZE; 3058 size_t nseg, remain, vec_size; 3059 int i, ret; 3060 3061 nseg = howmany(len, chunk_size); 3062 if (nseg == 0) { 3063 printf("%s: BHIE data too short, have only %zu bytes\n", 3064 sc->sc_dev.dv_xname, len); 3065 return 1; 3066 } 3067 3068 if (psc->amss_data == NULL || QWX_DMA_LEN(psc->amss_data) < len) { 3069 if (psc->amss_data) 3070 qwx_dmamem_free(sc->sc_dmat, psc->amss_data); 3071 psc->amss_data = qwx_dmamem_alloc(sc->sc_dmat, len, 0); 3072 if (psc->amss_data == NULL) { 3073 printf("%s: could not allocate BHIE DMA data buffer\n", 3074 sc->sc_dev.dv_xname); 3075 return 1; 3076 } 3077 } 3078 3079 vec_size = nseg * sizeof(*vec); 3080 if (psc->amss_vec == NULL || QWX_DMA_LEN(psc->amss_vec) < vec_size) { 3081 if (psc->amss_vec) 3082 qwx_dmamem_free(sc->sc_dmat, psc->amss_vec); 3083 psc->amss_vec = qwx_dmamem_alloc(sc->sc_dmat, vec_size, 0); 3084 if (psc->amss_vec == NULL) { 3085 printf("%s: could not allocate BHIE DMA vec buffer\n", 3086 sc->sc_dev.dv_xname); 3087 qwx_dmamem_free(sc->sc_dmat, psc->amss_data); 3088 psc->amss_data = NULL; 3089 return 1; 3090 } 3091 } 3092 3093 /* Copy firmware image to DMA memory. */ 3094 memcpy(QWX_DMA_KVA(psc->amss_data), data, len); 3095 3096 /* Create vector which controls chunk-wise DMA copy in hardware. */ 3097 paddr = QWX_DMA_DVA(psc->amss_data); 3098 vec = QWX_DMA_KVA(psc->amss_vec); 3099 remain = len; 3100 for (i = 0; i < nseg; i++) { 3101 vec[i].paddr = paddr; 3102 if (remain >= chunk_size) { 3103 vec[i].size = chunk_size; 3104 remain -= chunk_size; 3105 paddr += chunk_size; 3106 } else 3107 vec[i].size = remain; 3108 } 3109 3110 /* Set vector physical address and length. */ 3111 paddr = QWX_DMA_DVA(psc->amss_vec); 3112 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECADDR_HIGH_OFFS, 3113 paddr >> 32); 3114 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECADDR_LOW_OFFS, 3115 paddr & 0xffffffff); 3116 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECSIZE_OFFS, vec_size); 3117 3118 /* Set a random transaction sequence number. */ 3119 do { 3120 seq = arc4random_uniform(MHI_BHIE_TXVECSTATUS_SEQNUM_BMSK); 3121 } while (seq == 0); 3122 reg = qwx_pci_read(sc, psc->bhie_off + MHI_BHIE_TXVECDB_OFFS); 3123 reg &= ~MHI_BHIE_TXVECDB_SEQNUM_BMSK; 3124 reg |= seq << MHI_BHIE_TXVECDB_SEQNUM_SHFT; 3125 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECDB_OFFS, reg); 3126 3127 /* Wait for completion. */ 3128 ret = 0; 3129 while (state != MHI_BHIE_TXVECSTATUS_STATUS_XFER_COMPL) { 3130 ret = tsleep_nsec(&psc->bhie_off, 0, "qwxbhie", 3131 SEC_TO_NSEC(5)); 3132 if (ret) 3133 break; 3134 reg = qwx_pci_read(sc, 3135 psc->bhie_off + MHI_BHIE_TXVECSTATUS_OFFS); 3136 state = (reg & MHI_BHIE_TXVECSTATUS_STATUS_BMSK) >> 3137 MHI_BHIE_TXVECSTATUS_STATUS_SHFT; 3138 DNPRINTF(QWX_D_MHI, "%s: txvec state is 0x%x\n", __func__, 3139 state); 3140 } 3141 3142 if (ret) { 3143 printf("%s: BHIE load timeout\n", sc->sc_dev.dv_xname); 3144 return ret; 3145 } 3146 return 0; 3147 } 3148 3149 void 3150 qwx_rddm_prepare(struct qwx_pci_softc *psc) 3151 { 3152 struct qwx_softc *sc = &psc->sc_sc; 3153 struct qwx_dma_vec_entry *vec; 3154 struct qwx_dmamem *data_adm, *vec_adm; 3155 uint32_t seq, reg; 3156 uint64_t paddr; 3157 const size_t len = QWX_RDDM_DUMP_SIZE; 3158 const size_t chunk_size = MHI_DMA_VEC_CHUNK_SIZE; 3159 size_t nseg, remain, vec_size; 3160 int i; 3161 3162 nseg = howmany(len, chunk_size); 3163 if (nseg == 0) { 3164 printf("%s: RDDM data too short, have only %zu bytes\n", 3165 sc->sc_dev.dv_xname, len); 3166 return; 3167 } 3168 3169 data_adm = qwx_dmamem_alloc(sc->sc_dmat, len, 0); 3170 if (data_adm == NULL) { 3171 printf("%s: could not allocate BHIE DMA data buffer\n", 3172 sc->sc_dev.dv_xname); 3173 return; 3174 } 3175 3176 vec_size = nseg * sizeof(*vec); 3177 vec_adm = qwx_dmamem_alloc(sc->sc_dmat, vec_size, 0); 3178 if (vec_adm == NULL) { 3179 printf("%s: could not allocate BHIE DMA vector buffer\n", 3180 sc->sc_dev.dv_xname); 3181 qwx_dmamem_free(sc->sc_dmat, data_adm); 3182 return; 3183 } 3184 3185 /* Create vector which controls chunk-wise DMA copy from hardware. */ 3186 paddr = QWX_DMA_DVA(data_adm); 3187 vec = QWX_DMA_KVA(vec_adm); 3188 remain = len; 3189 for (i = 0; i < nseg; i++) { 3190 vec[i].paddr = paddr; 3191 if (remain >= chunk_size) { 3192 vec[i].size = chunk_size; 3193 remain -= chunk_size; 3194 paddr += chunk_size; 3195 } else 3196 vec[i].size = remain; 3197 } 3198 3199 /* Set vector physical address and length. */ 3200 paddr = QWX_DMA_DVA(vec_adm); 3201 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECADDR_HIGH_OFFS, 3202 paddr >> 32); 3203 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECADDR_LOW_OFFS, 3204 paddr & 0xffffffff); 3205 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECSIZE_OFFS, vec_size); 3206 3207 /* Set a random transaction sequence number. */ 3208 do { 3209 seq = arc4random_uniform(MHI_BHIE_RXVECSTATUS_SEQNUM_BMSK); 3210 } while (seq == 0); 3211 3212 reg = qwx_pci_read(sc, psc->bhie_off + MHI_BHIE_RXVECDB_OFFS); 3213 reg &= ~MHI_BHIE_RXVECDB_SEQNUM_BMSK; 3214 reg |= seq << MHI_BHIE_RXVECDB_SEQNUM_SHFT; 3215 qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECDB_OFFS, reg); 3216 3217 psc->rddm_data = data_adm; 3218 psc->rddm_vec = vec_adm; 3219 } 3220 3221 void 3222 qwx_rddm_task(void *arg) 3223 { 3224 struct qwx_pci_softc *psc = arg; 3225 struct qwx_softc *sc = &psc->sc_sc; 3226 uint32_t reg, state = MHI_BHIE_RXVECSTATUS_STATUS_RESET; 3227 const size_t len = QWX_RDDM_DUMP_SIZE; 3228 int i, timeout; 3229 const uint32_t msecs = 100, retries = 20; 3230 uint8_t *rddm; 3231 struct nameidata nd; 3232 struct vnode *vp = NULL; 3233 struct iovec iov[3]; 3234 struct uio uio; 3235 char path[PATH_MAX]; 3236 int error = 0; 3237 3238 if (psc->rddm_data == NULL) { 3239 DPRINTF("%s: RDDM not prepared\n", __func__); 3240 return; 3241 } 3242 3243 /* Poll for completion */ 3244 timeout = retries; 3245 while (timeout > 0 && state != MHI_BHIE_RXVECSTATUS_STATUS_XFER_COMPL) { 3246 reg = qwx_pci_read(sc, 3247 psc->bhie_off + MHI_BHIE_RXVECSTATUS_OFFS); 3248 state = (reg & MHI_BHIE_RXVECSTATUS_STATUS_BMSK) >> 3249 MHI_BHIE_RXVECSTATUS_STATUS_SHFT; 3250 DPRINTF("%s: txvec state is 0x%x\n", __func__, state); 3251 DELAY((msecs / retries) * 1000); 3252 timeout--; 3253 } 3254 3255 if (timeout == 0) { 3256 DPRINTF("%s: RDDM dump failed\n", sc->sc_dev.dv_xname); 3257 return; 3258 } 3259 3260 rddm = QWX_DMA_KVA(psc->rddm_data); 3261 DPRINTF("%s: RDDM snippet:\n", __func__); 3262 for (i = 0; i < MIN(64, len); i++) { 3263 DPRINTF("%s %.2x", i % 16 == 0 ? "\n" : "", rddm[i]); 3264 } 3265 DPRINTF("\n"); 3266 3267 DPRINTF("%s: sleeping for 30 seconds to allow userland to boot\n", __func__); 3268 tsleep_nsec(&psc->rddm_data, 0, "qwxrddm", SEC_TO_NSEC(30)); 3269 3270 snprintf(path, sizeof(path), "/root/%s-rddm.bin", sc->sc_dev.dv_xname); 3271 DPRINTF("%s: saving RDDM to %s\n", __func__, path); 3272 NDINIT(&nd, 0, 0, UIO_SYSSPACE, path, curproc); 3273 nd.ni_pledge = PLEDGE_CPATH | PLEDGE_WPATH; 3274 nd.ni_unveil = UNVEIL_CREATE | UNVEIL_WRITE; 3275 error = vn_open(&nd, FWRITE | O_CREAT | O_NOFOLLOW | O_TRUNC, 3276 S_IRUSR | S_IWUSR); 3277 if (error) { 3278 DPRINTF("%s: vn_open: error %d\n", __func__, error); 3279 goto done; 3280 } 3281 vp = nd.ni_vp; 3282 VOP_UNLOCK(vp); 3283 3284 iov[0].iov_base = (void *)rddm; 3285 iov[0].iov_len = len; 3286 iov[1].iov_len = 0; 3287 uio.uio_iov = &iov[0]; 3288 uio.uio_offset = 0; 3289 uio.uio_segflg = UIO_SYSSPACE; 3290 uio.uio_rw = UIO_WRITE; 3291 uio.uio_resid = len; 3292 uio.uio_iovcnt = 1; 3293 uio.uio_procp = curproc; 3294 error = vget(vp, LK_EXCLUSIVE | LK_RETRY); 3295 if (error) { 3296 DPRINTF("%s: vget: error %d\n", __func__, error); 3297 goto done; 3298 } 3299 error = VOP_WRITE(vp, &uio, IO_UNIT|IO_APPEND, curproc->p_ucred); 3300 vput(vp); 3301 if (error) 3302 DPRINTF("%s: VOP_WRITE: error %d\n", __func__, error); 3303 #if 0 3304 error = vn_close(vp, FWRITE, curproc->p_ucred, curproc); 3305 if (error) 3306 DPRINTF("%s: vn_close: error %d\n", __func__, error); 3307 #endif 3308 done: 3309 qwx_dmamem_free(sc->sc_dmat, psc->rddm_data); 3310 qwx_dmamem_free(sc->sc_dmat, psc->rddm_vec); 3311 psc->rddm_data = NULL; 3312 psc->rddm_vec = NULL; 3313 DPRINTF("%s: done, error %d\n", __func__, error); 3314 } 3315 3316 void * 3317 qwx_pci_event_ring_get_elem(struct qwx_pci_event_ring *ring, uint64_t rp) 3318 { 3319 uint64_t base = QWX_DMA_DVA(ring->dmamem), offset; 3320 void *addr = QWX_DMA_KVA(ring->dmamem); 3321 3322 if (rp < base) 3323 return NULL; 3324 3325 offset = rp - base; 3326 if (offset >= ring->size) 3327 return NULL; 3328 3329 return addr + offset; 3330 } 3331 3332 void 3333 qwx_mhi_state_change(void *arg) 3334 { 3335 struct qwx_pci_softc *psc = arg; 3336 struct qwx_softc *sc = &psc->sc_sc; 3337 struct qwx_mhi_newstate *ns = &psc->mhi_newstate; 3338 3339 while (ns->tail != ns->cur) { 3340 int mhi_state = ns->queue[ns->tail].mhi_state; 3341 int ee = ns->queue[ns->tail].ee; 3342 uint32_t old_ee = psc->bhi_ee; 3343 uint32_t old_mhi_state = psc->mhi_state; 3344 3345 KASSERT(ns->queued > 0); 3346 3347 if (ee != -1 && psc->bhi_ee != ee) { 3348 switch (ee) { 3349 case MHI_EE_PBL: 3350 DNPRINTF(QWX_D_MHI, "%s: new EE PBL\n", 3351 sc->sc_dev.dv_xname); 3352 psc->bhi_ee = ee; 3353 break; 3354 case MHI_EE_SBL: 3355 psc->bhi_ee = ee; 3356 DNPRINTF(QWX_D_MHI, "%s: new EE SBL\n", 3357 sc->sc_dev.dv_xname); 3358 break; 3359 case MHI_EE_AMSS: 3360 DNPRINTF(QWX_D_MHI, "%s: new EE AMSS\n", 3361 sc->sc_dev.dv_xname); 3362 psc->bhi_ee = ee; 3363 qwx_mhi_ee_amss_state_transition(psc); 3364 /* Wake thread loading the full AMSS image. */ 3365 wakeup(&psc->bhie_off); 3366 break; 3367 case MHI_EE_WFW: 3368 DNPRINTF(QWX_D_MHI, "%s: new EE WFW\n", 3369 sc->sc_dev.dv_xname); 3370 psc->bhi_ee = ee; 3371 break; 3372 default: 3373 printf("%s: unhandled EE change to %x\n", 3374 sc->sc_dev.dv_xname, ee); 3375 break; 3376 } 3377 } 3378 3379 if (mhi_state != -1 && psc->mhi_state != mhi_state) { 3380 switch (mhi_state) { 3381 case -1: 3382 break; 3383 case MHI_STATE_RESET: 3384 DNPRINTF(QWX_D_MHI, "%s: new MHI state RESET\n", 3385 sc->sc_dev.dv_xname); 3386 psc->mhi_state = mhi_state; 3387 break; 3388 case MHI_STATE_READY: 3389 DNPRINTF(QWX_D_MHI, "%s: new MHI state READY\n", 3390 sc->sc_dev.dv_xname); 3391 psc->mhi_state = mhi_state; 3392 qwx_mhi_ready_state_transition(psc); 3393 break; 3394 case MHI_STATE_M0: 3395 DNPRINTF(QWX_D_MHI, "%s: new MHI state M0\n", 3396 sc->sc_dev.dv_xname); 3397 psc->mhi_state = mhi_state; 3398 qwx_mhi_mission_mode_state_transition(psc); 3399 break; 3400 case MHI_STATE_SYS_ERR: 3401 DNPRINTF(QWX_D_MHI, 3402 "%s: new MHI state SYS ERR\n", 3403 sc->sc_dev.dv_xname); 3404 psc->mhi_state = mhi_state; 3405 break; 3406 default: 3407 printf("%s: unhandled MHI state change to %x\n", 3408 sc->sc_dev.dv_xname, mhi_state); 3409 break; 3410 } 3411 } 3412 3413 if (old_ee != psc->bhi_ee) 3414 wakeup(&psc->bhi_ee); 3415 if (old_mhi_state != psc->mhi_state) 3416 wakeup(&psc->mhi_state); 3417 3418 ns->tail = (ns->tail + 1) % nitems(ns->queue); 3419 ns->queued--; 3420 } 3421 } 3422 3423 void 3424 qwx_mhi_queue_state_change(struct qwx_pci_softc *psc, int ee, int mhi_state) 3425 { 3426 struct qwx_mhi_newstate *ns = &psc->mhi_newstate; 3427 3428 if (ns->queued >= nitems(ns->queue)) { 3429 printf("%s: event queue full, dropping event\n", __func__); 3430 return; 3431 } 3432 3433 ns->queue[ns->cur].ee = ee; 3434 ns->queue[ns->cur].mhi_state = mhi_state; 3435 ns->queued++; 3436 ns->cur = (ns->cur + 1) % nitems(ns->queue); 3437 task_add(systq, &psc->mhi_newstate_task); 3438 } 3439 3440 void 3441 qwx_pci_intr_ctrl_event_mhi(struct qwx_pci_softc *psc, uint32_t mhi_state) 3442 { 3443 DNPRINTF(QWX_D_MHI, "%s: MHI state change 0x%x -> 0x%x\n", __func__, 3444 psc->mhi_state, mhi_state); 3445 3446 qwx_mhi_queue_state_change(psc, -1, mhi_state); 3447 } 3448 3449 void 3450 qwx_pci_intr_ctrl_event_ee(struct qwx_pci_softc *psc, uint32_t ee) 3451 { 3452 DNPRINTF(QWX_D_MHI, "%s: EE change 0x%x to 0x%x\n", __func__, 3453 psc->bhi_ee, ee); 3454 3455 qwx_mhi_queue_state_change(psc, ee, -1); 3456 } 3457 3458 void 3459 qwx_pci_intr_ctrl_event_cmd_complete(struct qwx_pci_softc *psc, 3460 uint64_t ptr, uint32_t cmd_status) 3461 { 3462 struct qwx_pci_cmd_ring *cmd_ring = &psc->cmd_ring; 3463 uint64_t base = QWX_DMA_DVA(cmd_ring->dmamem); 3464 struct qwx_pci_xfer_ring *xfer_ring = NULL; 3465 struct qwx_mhi_ring_element *e; 3466 uint32_t tre1, chid; 3467 size_t i; 3468 3469 e = qwx_pci_cmd_ring_get_elem(cmd_ring, ptr); 3470 if (e == NULL) 3471 return; 3472 3473 tre1 = le32toh(e->dword[1]); 3474 chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT; 3475 3476 for (i = 0; i < nitems(psc->xfer_rings); i++) { 3477 if (psc->xfer_rings[i].mhi_chan_id == chid) { 3478 xfer_ring = &psc->xfer_rings[i]; 3479 break; 3480 } 3481 } 3482 if (xfer_ring == NULL) { 3483 printf("%s: no transfer ring found for command completion " 3484 "on channel %u\n", __func__, chid); 3485 return; 3486 } 3487 3488 xfer_ring->cmd_status = cmd_status; 3489 wakeup(&xfer_ring->cmd_status); 3490 3491 if (cmd_ring->rp + sizeof(*e) >= base + cmd_ring->size) 3492 cmd_ring->rp = base; 3493 else 3494 cmd_ring->rp += sizeof(*e); 3495 } 3496 3497 int 3498 qwx_pci_intr_ctrl_event(struct qwx_pci_softc *psc, struct qwx_pci_event_ring *ring) 3499 { 3500 struct qwx_softc *sc = &psc->sc_sc; 3501 struct qwx_mhi_event_ctxt *c; 3502 uint64_t rp, wp, base; 3503 struct qwx_mhi_ring_element *e; 3504 uint32_t tre0, tre1, type, code, chid, len; 3505 3506 c = ring->event_ctxt; 3507 if (c == NULL) { 3508 /* 3509 * Interrupts can trigger before mhi_init_event_rings() 3510 * if the device is still active after a warm reboot. 3511 */ 3512 return 0; 3513 } 3514 3515 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0, 3516 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_POSTREAD); 3517 3518 rp = le64toh(c->rp); 3519 wp = le64toh(c->wp); 3520 3521 DNPRINTF(QWX_D_MHI, "%s: kernel rp=0x%llx\n", __func__, ring->rp); 3522 DNPRINTF(QWX_D_MHI, "%s: device rp=0x%llx\n", __func__, rp); 3523 DNPRINTF(QWX_D_MHI, "%s: kernel wp=0x%llx\n", __func__, ring->wp); 3524 DNPRINTF(QWX_D_MHI, "%s: device wp=0x%llx\n", __func__, wp); 3525 3526 base = QWX_DMA_DVA(ring->dmamem); 3527 if (ring->rp == rp || rp < base || rp >= base + ring->size) 3528 return 0; 3529 if (wp < base || wp >= base + ring->size) 3530 return 0; 3531 3532 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(ring->dmamem), 3533 0, QWX_DMA_LEN(ring->dmamem), BUS_DMASYNC_POSTREAD); 3534 3535 while (ring->rp != rp) { 3536 e = qwx_pci_event_ring_get_elem(ring, ring->rp); 3537 if (e == NULL) 3538 return 0; 3539 3540 tre0 = le32toh(e->dword[0]); 3541 tre1 = le32toh(e->dword[1]); 3542 3543 len = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT; 3544 code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT; 3545 type = (tre1 & MHI_TRE1_EV_TYPE_MASK) >> MHI_TRE1_EV_TYPE_SHFT; 3546 chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT; 3547 DNPRINTF(QWX_D_MHI, "%s: len=%u code=0x%x type=0x%x chid=%d\n", 3548 __func__, len, code, type, chid); 3549 3550 switch (type) { 3551 case MHI_PKT_TYPE_STATE_CHANGE_EVENT: 3552 qwx_pci_intr_ctrl_event_mhi(psc, code); 3553 break; 3554 case MHI_PKT_TYPE_EE_EVENT: 3555 qwx_pci_intr_ctrl_event_ee(psc, code); 3556 break; 3557 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: 3558 qwx_pci_intr_ctrl_event_cmd_complete(psc, 3559 le64toh(e->ptr), code); 3560 break; 3561 default: 3562 printf("%s: unhandled event type 0x%x\n", 3563 __func__, type); 3564 break; 3565 } 3566 3567 if (ring->rp + sizeof(*e) >= base + ring->size) 3568 ring->rp = base; 3569 else 3570 ring->rp += sizeof(*e); 3571 3572 if (ring->wp + sizeof(*e) >= base + ring->size) 3573 ring->wp = base; 3574 else 3575 ring->wp += sizeof(*e); 3576 } 3577 3578 c->wp = htole64(ring->wp); 3579 3580 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0, 3581 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE); 3582 3583 qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp); 3584 return 1; 3585 } 3586 3587 void 3588 qwx_pci_intr_data_event_tx(struct qwx_pci_softc *psc, struct qwx_mhi_ring_element *e) 3589 { 3590 struct qwx_softc *sc = &psc->sc_sc; 3591 struct qwx_pci_xfer_ring *ring; 3592 struct qwx_xfer_data *xfer; 3593 uint64_t rp, evrp, base, paddr; 3594 uint32_t tre0, tre1, code, chid, evlen, len; 3595 int i; 3596 3597 tre0 = le32toh(e->dword[0]); 3598 tre1 = le32toh(e->dword[1]); 3599 3600 evlen = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT; 3601 code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT; 3602 chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT; 3603 3604 switch (code) { 3605 case MHI_EV_CC_EOT: 3606 for (i = 0; i < nitems(psc->xfer_rings); i++) { 3607 ring = &psc->xfer_rings[i]; 3608 if (ring->mhi_chan_id == chid) 3609 break; 3610 } 3611 if (i == nitems(psc->xfer_rings)) { 3612 printf("%s: unhandled channel 0x%x\n", 3613 __func__, chid); 3614 break; 3615 } 3616 base = QWX_DMA_DVA(ring->dmamem); 3617 /* PTR contains the entry that was last written */ 3618 evrp = letoh64(e->ptr); 3619 rp = evrp; 3620 if (rp < base || rp >= base + ring->size) { 3621 printf("%s: invalid ptr 0x%llx\n", 3622 __func__, rp); 3623 break; 3624 } 3625 /* Point rp to next empty slot */ 3626 if (rp + sizeof(*e) >= base + ring->size) 3627 rp = base; 3628 else 3629 rp += sizeof(*e); 3630 /* Parse until next empty slot */ 3631 while (ring->rp != rp) { 3632 DNPRINTF(QWX_D_MHI, "%s:%d: ring->rp 0x%llx " 3633 "ring->wp 0x%llx rp 0x%llx\n", __func__, 3634 __LINE__, ring->rp, ring->wp, rp); 3635 e = qwx_pci_xfer_ring_get_elem(ring, ring->rp); 3636 xfer = qwx_pci_xfer_ring_get_data(ring, ring->rp); 3637 3638 if (ring->rp == evrp) 3639 len = evlen; 3640 else 3641 len = xfer->m->m_pkthdr.len; 3642 3643 bus_dmamap_sync(sc->sc_dmat, xfer->map, 0, 3644 xfer->m->m_pkthdr.len, BUS_DMASYNC_POSTREAD); 3645 #ifdef QWX_DEBUG 3646 { 3647 int i; 3648 DNPRINTF(QWX_D_MHI, "%s: chan %u data (len %u): ", 3649 __func__, 3650 ring->mhi_chan_id, len); 3651 for (i = 0; i < MIN(32, len); i++) { 3652 DNPRINTF(QWX_D_MHI, "%02x ", 3653 (unsigned char)mtod(xfer->m, caddr_t)[i]); 3654 } 3655 if (i < len) 3656 DNPRINTF(QWX_D_MHI, "..."); 3657 DNPRINTF(QWX_D_MHI, "\n"); 3658 } 3659 #endif 3660 if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) { 3661 /* Save m_data as upper layers use m_adj(9) */ 3662 void *o_data = xfer->m->m_data; 3663 3664 /* Pass mbuf to upper layers */ 3665 qwx_qrtr_recv_msg(sc, xfer->m); 3666 3667 /* Reset RX mbuf instead of free/alloc */ 3668 KASSERT(xfer->m->m_next == NULL); 3669 xfer->m->m_data = o_data; 3670 xfer->m->m_len = xfer->m->m_pkthdr.len = 3671 QWX_PCI_XFER_MAX_DATA_SIZE; 3672 3673 paddr = xfer->map->dm_segs[0].ds_addr; 3674 3675 e->ptr = htole64(paddr); 3676 e->dword[0] = htole32(( 3677 QWX_PCI_XFER_MAX_DATA_SIZE << 3678 MHI_TRE0_DATA_LEN_SHFT) & 3679 MHI_TRE0_DATA_LEN_MASK); 3680 e->dword[1] = htole32(MHI_TRE1_DATA_IEOT | 3681 MHI_TRE1_DATA_BEI | 3682 MHI_TRE1_DATA_TYPE_TRANSFER << 3683 MHI_TRE1_DATA_TYPE_SHIFT); 3684 3685 if (ring->wp + sizeof(*e) >= base + ring->size) 3686 ring->wp = base; 3687 else 3688 ring->wp += sizeof(*e); 3689 } else { 3690 /* Unload and free TX mbuf */ 3691 bus_dmamap_unload(sc->sc_dmat, xfer->map); 3692 m_freem(xfer->m); 3693 xfer->m = NULL; 3694 ring->queued--; 3695 } 3696 3697 if (ring->rp + sizeof(*e) >= base + ring->size) 3698 ring->rp = base; 3699 else 3700 ring->rp += sizeof(*e); 3701 } 3702 3703 if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) { 3704 ring->chan_ctxt->wp = htole64(ring->wp); 3705 3706 bus_dmamap_sync(sc->sc_dmat, 3707 QWX_DMA_MAP(psc->chan_ctxt), 0, 3708 QWX_DMA_LEN(psc->chan_ctxt), 3709 BUS_DMASYNC_PREWRITE); 3710 3711 qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp); 3712 } 3713 break; 3714 default: 3715 printf("%s: unhandled event code 0x%x\n", 3716 __func__, code); 3717 } 3718 } 3719 3720 int 3721 qwx_pci_intr_data_event(struct qwx_pci_softc *psc, struct qwx_pci_event_ring *ring) 3722 { 3723 struct qwx_softc *sc = &psc->sc_sc; 3724 struct qwx_mhi_event_ctxt *c; 3725 uint64_t rp, wp, base; 3726 struct qwx_mhi_ring_element *e; 3727 uint32_t tre0, tre1, type, code, chid, len; 3728 3729 c = ring->event_ctxt; 3730 if (c == NULL) { 3731 /* 3732 * Interrupts can trigger before mhi_init_event_rings() 3733 * if the device is still active after a warm reboot. 3734 */ 3735 return 0; 3736 } 3737 3738 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0, 3739 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_POSTREAD); 3740 3741 rp = le64toh(c->rp); 3742 wp = le64toh(c->wp); 3743 3744 DNPRINTF(QWX_D_MHI, "%s: kernel rp=0x%llx\n", __func__, ring->rp); 3745 DNPRINTF(QWX_D_MHI, "%s: device rp=0x%llx\n", __func__, rp); 3746 DNPRINTF(QWX_D_MHI, "%s: kernel wp=0x%llx\n", __func__, ring->wp); 3747 DNPRINTF(QWX_D_MHI, "%s: device wp=0x%llx\n", __func__, wp); 3748 3749 base = QWX_DMA_DVA(ring->dmamem); 3750 if (ring->rp == rp || rp < base || rp >= base + ring->size) 3751 return 0; 3752 3753 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(ring->dmamem), 3754 0, QWX_DMA_LEN(ring->dmamem), BUS_DMASYNC_POSTREAD); 3755 3756 while (ring->rp != rp) { 3757 e = qwx_pci_event_ring_get_elem(ring, ring->rp); 3758 if (e == NULL) 3759 return 0; 3760 3761 tre0 = le32toh(e->dword[0]); 3762 tre1 = le32toh(e->dword[1]); 3763 3764 len = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT; 3765 code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT; 3766 type = (tre1 & MHI_TRE1_EV_TYPE_MASK) >> MHI_TRE1_EV_TYPE_SHFT; 3767 chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT; 3768 DNPRINTF(QWX_D_MHI, "%s: len=%u code=0x%x type=0x%x chid=%d\n", 3769 __func__, len, code, type, chid); 3770 3771 switch (type) { 3772 case MHI_PKT_TYPE_TX_EVENT: 3773 qwx_pci_intr_data_event_tx(psc, e); 3774 break; 3775 default: 3776 printf("%s: unhandled event type 0x%x\n", 3777 __func__, type); 3778 break; 3779 } 3780 3781 if (ring->rp + sizeof(*e) >= base + ring->size) 3782 ring->rp = base; 3783 else 3784 ring->rp += sizeof(*e); 3785 3786 if (ring->wp + sizeof(*e) >= base + ring->size) 3787 ring->wp = base; 3788 else 3789 ring->wp += sizeof(*e); 3790 } 3791 3792 c->wp = htole64(ring->wp); 3793 3794 bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0, 3795 QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE); 3796 3797 qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp); 3798 return 1; 3799 } 3800 3801 int 3802 qwx_pci_intr(void *arg) 3803 { 3804 struct qwx_pci_softc *psc = arg; 3805 struct qwx_softc *sc = (void *)psc; 3806 uint32_t ee, state; 3807 int ret = 0; 3808 3809 /* 3810 * Interrupts can trigger before mhi_start() during boot if the device 3811 * is still active after a warm reboot. 3812 */ 3813 if (psc->bhi_off == 0) 3814 psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET); 3815 3816 ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV); 3817 state = qwx_pci_read(sc, MHI_STATUS); 3818 state = (state & MHI_STATUS_MHISTATE_MASK) >> 3819 MHI_STATUS_MHISTATE_SHFT; 3820 3821 DNPRINTF(QWX_D_MHI, 3822 "%s: MHI interrupt with EE: 0x%x -> 0x%x state: 0x%x -> 0x%x\n", 3823 sc->sc_dev.dv_xname, psc->bhi_ee, ee, psc->mhi_state, state); 3824 3825 if (ee == MHI_EE_RDDM) { 3826 psc->bhi_ee = ee; 3827 if (!psc->rddm_triggered) { 3828 task_add(systq, &psc->rddm_task); 3829 psc->rddm_triggered = 1; 3830 } 3831 return 1; 3832 } else if (psc->bhi_ee == MHI_EE_PBL || psc->bhi_ee == MHI_EE_SBL) { 3833 int new_ee = -1, new_mhi_state = -1; 3834 3835 if (psc->bhi_ee != ee) 3836 new_ee = ee; 3837 if (state < MHI_STATE_M0 && psc->mhi_state != state) 3838 new_mhi_state = state; 3839 3840 if (new_ee != -1 || new_mhi_state != -1) 3841 qwx_mhi_queue_state_change(psc, new_ee, new_mhi_state); 3842 3843 /* Wake thread loading second stage bootloader. */ 3844 wakeup(&psc->bhi_off); 3845 ret = 1; 3846 } 3847 3848 if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0])) 3849 ret = 1; 3850 if (qwx_pci_intr_data_event(psc, &psc->event_rings[1])) 3851 ret = 1; 3852 3853 if (qwx_intr(sc)) 3854 ret = 1; 3855 3856 return ret; 3857 } 3858