xref: /openbsd-src/sys/dev/pci/if_qwx_pci.c (revision dcc91c2622318df8f66a9bca2d2864253df1bfc3)
1 /*	$OpenBSD: if_qwx_pci.c,v 1.22 2024/07/06 05:34:35 patrick Exp $	*/
2 
3 /*
4  * Copyright 2023 Stefan Sperling <stsp@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc.
21  * Copyright (c) 2018-2021 The Linux Foundation.
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted (subject to the limitations in the disclaimer
26  * below) provided that the following conditions are met:
27  *
28  *  * Redistributions of source code must retain the above copyright notice,
29  *    this list of conditions and the following disclaimer.
30  *
31  *  * Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  *
35  *  * Neither the name of [Owner Organization] nor the names of its
36  *    contributors may be used to endorse or promote products derived from
37  *    this software without specific prior written permission.
38  *
39  * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
40  * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
41  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
42  * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
43  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
44  * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
45  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
46  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
47  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
48  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
49  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
50  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 #include "bpfilter.h"
54 
55 #include <sys/param.h>
56 #include <sys/mbuf.h>
57 #include <sys/lock.h>
58 #include <sys/socket.h>
59 #include <sys/systm.h>
60 #include <sys/malloc.h>
61 #include <sys/device.h>
62 #include <sys/endian.h>
63 
64 #include <machine/bus.h>
65 #include <machine/intr.h>
66 
67 #include <net/if.h>
68 #include <net/if_media.h>
69 
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 
73 #include <net80211/ieee80211_var.h>
74 #include <net80211/ieee80211_radiotap.h>
75 
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcidevs.h>
79 
80 /* XXX linux porting goo */
81 #ifdef __LP64__
82 #define BITS_PER_LONG		64
83 #else
84 #define BITS_PER_LONG		32
85 #endif
86 #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
87 #define __bf_shf(x) (__builtin_ffsll(x) - 1)
88 #define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m)))
89 #define BIT(x)               (1UL << (x))
90 #define test_bit(i, a)  ((a) & (1 << (i)))
91 #define clear_bit(i, a) ((a)) &= ~(1 << (i))
92 #define set_bit(i, a)   ((a)) |= (1 << (i))
93 
94 /* #define QWX_DEBUG */
95 
96 #include <dev/ic/qwxreg.h>
97 #include <dev/ic/qwxvar.h>
98 
99 #ifdef QWX_DEBUG
100 /* Headers needed for RDDM dump */
101 #include <sys/namei.h>
102 #include <sys/pledge.h>
103 #include <sys/vnode.h>
104 #include <sys/fcntl.h>
105 #include <sys/stat.h>
106 #include <sys/proc.h>
107 #endif
108 
109 #define ATH11K_PCI_IRQ_CE0_OFFSET	3
110 #define ATH11K_PCI_IRQ_DP_OFFSET	14
111 
112 #define ATH11K_PCI_CE_WAKE_IRQ		2
113 
114 #define ATH11K_PCI_WINDOW_ENABLE_BIT	0x40000000
115 #define ATH11K_PCI_WINDOW_REG_ADDRESS	0x310c
116 #define ATH11K_PCI_WINDOW_VALUE_MASK	GENMASK(24, 19)
117 #define ATH11K_PCI_WINDOW_START		0x80000
118 #define ATH11K_PCI_WINDOW_RANGE_MASK	GENMASK(18, 0)
119 
120 /* BAR0 + 4k is always accessible, and no need to force wakeup. */
121 #define ATH11K_PCI_ACCESS_ALWAYS_OFF	0xFE0	/* 4K - 32 = 0xFE0 */
122 
123 #define TCSR_SOC_HW_VERSION		0x0224
124 #define TCSR_SOC_HW_VERSION_MAJOR_MASK	GENMASK(11, 8)
125 #define TCSR_SOC_HW_VERSION_MINOR_MASK	GENMASK(7, 0)
126 
127 /*
128  * pci.h
129  */
130 #define PCIE_SOC_GLOBAL_RESET			0x3008
131 #define PCIE_SOC_GLOBAL_RESET_V			1
132 
133 #define WLAON_WARM_SW_ENTRY			0x1f80504
134 #define WLAON_SOC_RESET_CAUSE_REG		0x01f8060c
135 
136 #define PCIE_Q6_COOKIE_ADDR			0x01f80500
137 #define PCIE_Q6_COOKIE_DATA			0xc0000000
138 
139 /* register to wake the UMAC from power collapse */
140 #define PCIE_SCRATCH_0_SOC_PCIE_REG		0x4040
141 
142 /* register used for handshake mechanism to validate UMAC is awake */
143 #define PCIE_SOC_WAKE_PCIE_LOCAL_REG		0x3004
144 
145 #define PCIE_PCIE_PARF_LTSSM			0x1e081b0
146 #define PARM_LTSSM_VALUE			0x111
147 
148 #define GCC_GCC_PCIE_HOT_RST			0x1e402bc
149 #define GCC_GCC_PCIE_HOT_RST_VAL		0x10
150 
151 #define PCIE_PCIE_INT_ALL_CLEAR			0x1e08228
152 #define PCIE_SMLH_REQ_RST_LINK_DOWN		0x2
153 #define PCIE_INT_CLEAR_ALL			0xffffffff
154 
155 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(sc) \
156 		(sc->hw_params.regs->pcie_qserdes_sysclk_en_sel)
157 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL	0x10
158 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK	0xffffffff
159 #define PCIE_PCS_OSC_DTCT_CONFIG1_REG(sc) \
160 		(sc->hw_params.regs->pcie_pcs_osc_dtct_config_base)
161 #define PCIE_PCS_OSC_DTCT_CONFIG1_VAL		0x02
162 #define PCIE_PCS_OSC_DTCT_CONFIG2_REG(sc) \
163 		(sc->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4)
164 #define PCIE_PCS_OSC_DTCT_CONFIG2_VAL		0x52
165 #define PCIE_PCS_OSC_DTCT_CONFIG4_REG(sc) \
166 		(sc->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc)
167 #define PCIE_PCS_OSC_DTCT_CONFIG4_VAL		0xff
168 #define PCIE_PCS_OSC_DTCT_CONFIG_MSK		0x000000ff
169 
170 #define WLAON_QFPROM_PWR_CTRL_REG		0x01f8031c
171 #define QFPROM_PWR_CTRL_VDD4BLOW_MASK		0x4
172 
173 /*
174  * mhi.h
175  */
176 #define PCIE_TXVECDB				0x360
177 #define PCIE_TXVECSTATUS			0x368
178 #define PCIE_RXVECDB				0x394
179 #define PCIE_RXVECSTATUS			0x39C
180 
181 #define MHI_CHAN_CTX_CHSTATE_MASK		GENMASK(7, 0)
182 #define   MHI_CHAN_CTX_CHSTATE_DISABLED		0
183 #define   MHI_CHAN_CTX_CHSTATE_ENABLED		1
184 #define   MHI_CHAN_CTX_CHSTATE_RUNNING		2
185 #define   MHI_CHAN_CTX_CHSTATE_SUSPENDED	3
186 #define   MHI_CHAN_CTX_CHSTATE_STOP		4
187 #define   MHI_CHAN_CTX_CHSTATE_ERROR		5
188 #define MHI_CHAN_CTX_BRSTMODE_MASK		GENMASK(9, 8)
189 #define MHI_CHAN_CTX_BRSTMODE_SHFT		8
190 #define   MHI_CHAN_CTX_BRSTMODE_DISABLE		2
191 #define   MHI_CHAN_CTX_BRSTMODE_ENABLE		3
192 #define MHI_CHAN_CTX_POLLCFG_MASK		GENMASK(15, 10)
193 #define MHI_CHAN_CTX_RESERVED_MASK		GENMASK(31, 16)
194 
195 #define QWX_MHI_CONFIG_QCA6390_MAX_CHANNELS	128
196 #define QWX_MHI_CONFIG_QCA6390_TIMEOUT_MS	2000
197 #define QWX_MHI_CONFIG_QCA9074_MAX_CHANNELS	30
198 
199 #define MHI_CHAN_TYPE_INVALID		0
200 #define MHI_CHAN_TYPE_OUTBOUND		1 /* to device */
201 #define MHI_CHAN_TYPE_INBOUND		2 /* from device */
202 #define MHI_CHAN_TYPE_INBOUND_COALESCED	3
203 
204 #define MHI_EV_CTX_RESERVED_MASK	GENMASK(7, 0)
205 #define MHI_EV_CTX_INTMODC_MASK		GENMASK(15, 8)
206 #define MHI_EV_CTX_INTMODT_MASK		GENMASK(31, 16)
207 #define MHI_EV_CTX_INTMODT_SHFT		16
208 
209 #define MHI_ER_TYPE_INVALID	0
210 #define MHI_ER_TYPE_VALID	1
211 
212 #define MHI_ER_DATA	0
213 #define MHI_ER_CTRL	1
214 
215 #define MHI_CH_STATE_DISABLED	0
216 #define MHI_CH_STATE_ENABLED	1
217 #define MHI_CH_STATE_RUNNING	2
218 #define MHI_CH_STATE_SUSPENDED	3
219 #define MHI_CH_STATE_STOP	4
220 #define MHI_CH_STATE_ERROR	5
221 
222 #define QWX_NUM_EVENT_CTX	2
223 
224 /* Event context. Shared with device. */
225 struct qwx_mhi_event_ctxt {
226 	uint32_t intmod;
227 	uint32_t ertype;
228 	uint32_t msivec;
229 
230 	uint64_t rbase;
231 	uint64_t rlen;
232 	uint64_t rp;
233 	uint64_t wp;
234 } __packed;
235 
236 /* Channel context. Shared with device. */
237 struct qwx_mhi_chan_ctxt {
238 	uint32_t chcfg;
239 	uint32_t chtype;
240 	uint32_t erindex;
241 
242 	uint64_t rbase;
243 	uint64_t rlen;
244 	uint64_t rp;
245 	uint64_t wp;
246 } __packed;
247 
248 /* Command context. Shared with device. */
249 struct qwx_mhi_cmd_ctxt {
250 	uint32_t reserved0;
251 	uint32_t reserved1;
252 	uint32_t reserved2;
253 
254 	uint64_t rbase;
255 	uint64_t rlen;
256 	uint64_t rp;
257 	uint64_t wp;
258 } __packed;
259 
260 struct qwx_mhi_ring_element {
261 	uint64_t ptr;
262 	uint32_t dword[2];
263 };
264 
265 struct qwx_xfer_data {
266 	bus_dmamap_t	map;
267 	struct mbuf	*m;
268 };
269 
270 #define QWX_PCI_XFER_MAX_DATA_SIZE	0xffff
271 #define QWX_PCI_XFER_RING_MAX_ELEMENTS	64
272 
273 struct qwx_pci_xfer_ring {
274 	struct qwx_dmamem	*dmamem;
275 	bus_size_t		size;
276 	uint32_t		mhi_chan_id;
277 	uint32_t		mhi_chan_state;
278 	uint32_t		mhi_chan_direction;
279 	uint32_t		mhi_chan_event_ring_index;
280 	uint32_t		db_addr;
281 	uint32_t		cmd_status;
282 	int			num_elements;
283 	int			queued;
284 	struct qwx_xfer_data	data[QWX_PCI_XFER_RING_MAX_ELEMENTS];
285 	uint64_t		rp;
286 	uint64_t		wp;
287 	struct qwx_mhi_chan_ctxt *chan_ctxt;
288 };
289 
290 
291 #define QWX_PCI_EVENT_RING_MAX_ELEMENTS	256
292 
293 struct qwx_pci_event_ring {
294 	struct qwx_dmamem	*dmamem;
295 	bus_size_t		size;
296 	uint32_t		mhi_er_type;
297 	uint32_t		mhi_er_irq;
298 	uint32_t		mhi_er_irq_moderation_ms;
299 	uint32_t		db_addr;
300 	int			num_elements;
301 	uint64_t		rp;
302 	uint64_t		wp;
303 	struct qwx_mhi_event_ctxt *event_ctxt;
304 };
305 
306 struct qwx_cmd_data {
307 	bus_dmamap_t	map;
308 	struct mbuf	*m;
309 };
310 
311 #define QWX_PCI_CMD_RING_MAX_ELEMENTS	128
312 
313 struct qwx_pci_cmd_ring {
314 	struct qwx_dmamem	*dmamem;
315 	bus_size_t		size;
316 	uint64_t		rp;
317 	uint64_t		wp;
318 	int			num_elements;
319 	int			queued;
320 };
321 
322 struct qwx_pci_ops;
323 struct qwx_msi_config;
324 
325 #define QWX_NUM_MSI_VEC	32
326 
327 struct qwx_pci_softc {
328 	struct qwx_softc	sc_sc;
329 	pci_chipset_tag_t	sc_pc;
330 	pcitag_t		sc_tag;
331 	int			sc_cap_off;
332 	int			sc_msi_off;
333 	pcireg_t		sc_msi_cap;
334 	void			*sc_ih[QWX_NUM_MSI_VEC];
335 	char			sc_ivname[QWX_NUM_MSI_VEC][16];
336 	struct qwx_ext_irq_grp	ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
337 	int			mhi_irq[2];
338 	bus_space_tag_t		sc_st;
339 	bus_space_handle_t	sc_sh;
340 	bus_addr_t		sc_map;
341 	bus_size_t		sc_mapsize;
342 
343 	pcireg_t		sc_lcsr;
344 	uint32_t		sc_flags;
345 #define ATH11K_PCI_ASPM_RESTORE	1
346 
347 	uint32_t		register_window;
348 	const struct qwx_pci_ops *sc_pci_ops;
349 
350 	uint32_t		 bhi_off;
351 	uint32_t		 bhi_ee;
352 	uint32_t		 bhie_off;
353 	uint32_t		 mhi_state;
354 	uint32_t		 max_chan;
355 
356 	uint64_t		 wake_db;
357 
358 	/*
359 	 * DMA memory for AMSS.bin firmware image.
360 	 * This memory must remain available to the device until
361 	 * the device is powered down.
362 	 */
363 	struct qwx_dmamem	*amss_data;
364 	struct qwx_dmamem	*amss_vec;
365 
366 	struct qwx_dmamem	 *rddm_vec;
367 	struct qwx_dmamem	 *rddm_data;
368 	int			 rddm_triggered;
369 	struct task		 rddm_task;
370 #define	QWX_RDDM_DUMP_SIZE	0x420000
371 
372 	struct qwx_dmamem	*chan_ctxt;
373 	struct qwx_dmamem	*event_ctxt;
374 	struct qwx_dmamem	*cmd_ctxt;
375 
376 
377 	struct qwx_pci_xfer_ring xfer_rings[4];
378 #define QWX_PCI_XFER_RING_LOOPBACK_OUTBOUND	0
379 #define QWX_PCI_XFER_RING_LOOPBACK_INBOUND	1
380 #define QWX_PCI_XFER_RING_IPCR_OUTBOUND		2
381 #define QWX_PCI_XFER_RING_IPCR_INBOUND		3
382 	struct qwx_pci_event_ring event_rings[QWX_NUM_EVENT_CTX];
383 	struct qwx_pci_cmd_ring cmd_ring;
384 };
385 
386 int	qwx_pci_match(struct device *, void *, void *);
387 void	qwx_pci_attach(struct device *, struct device *, void *);
388 int	qwx_pci_detach(struct device *, int);
389 void	qwx_pci_attach_hook(struct device *);
390 void	qwx_pci_free_xfer_rings(struct qwx_pci_softc *);
391 int	qwx_pci_alloc_xfer_ring(struct qwx_softc *, struct qwx_pci_xfer_ring *,
392 	    uint32_t, uint32_t, uint32_t, size_t);
393 int	qwx_pci_alloc_xfer_rings_qca6390(struct qwx_pci_softc *);
394 int	qwx_pci_alloc_xfer_rings_qcn9074(struct qwx_pci_softc *);
395 void	qwx_pci_free_event_rings(struct qwx_pci_softc *);
396 int	qwx_pci_alloc_event_ring(struct qwx_softc *,
397 	    struct qwx_pci_event_ring *, uint32_t, uint32_t, uint32_t, size_t);
398 int	qwx_pci_alloc_event_rings(struct qwx_pci_softc *);
399 void	qwx_pci_free_cmd_ring(struct qwx_pci_softc *);
400 int	qwx_pci_init_cmd_ring(struct qwx_softc *, struct qwx_pci_cmd_ring *);
401 uint32_t qwx_pci_read(struct qwx_softc *, uint32_t);
402 void	qwx_pci_write(struct qwx_softc *, uint32_t, uint32_t);
403 
404 void	qwx_pci_read_hw_version(struct qwx_softc *, uint32_t *, uint32_t *);
405 uint32_t qwx_pcic_read32(struct qwx_softc *, uint32_t);
406 void	 qwx_pcic_write32(struct qwx_softc *, uint32_t, uint32_t);
407 
408 void	qwx_pcic_ext_irq_enable(struct qwx_softc *);
409 void	qwx_pcic_ext_irq_disable(struct qwx_softc *);
410 int	qwx_pcic_config_irq(struct qwx_softc *, struct pci_attach_args *);
411 
412 int	qwx_pci_start(struct qwx_softc *);
413 void	qwx_pci_stop(struct qwx_softc *);
414 void	qwx_pci_aspm_disable(struct qwx_softc *);
415 void	qwx_pci_aspm_restore(struct qwx_softc *);
416 int	qwx_pci_power_up(struct qwx_softc *);
417 void	qwx_pci_power_down(struct qwx_softc *);
418 
419 int	qwx_pci_bus_wake_up(struct qwx_softc *);
420 void	qwx_pci_bus_release(struct qwx_softc *);
421 void	qwx_pci_window_write32(struct qwx_softc *, uint32_t, uint32_t);
422 uint32_t qwx_pci_window_read32(struct qwx_softc *, uint32_t);
423 
424 int	qwx_mhi_register(struct qwx_softc *);
425 void	qwx_mhi_unregister(struct qwx_softc *);
426 void	qwx_mhi_ring_doorbell(struct qwx_softc *sc, uint64_t, uint64_t);
427 void	qwx_mhi_device_wake(struct qwx_softc *);
428 void	qwx_mhi_device_zzz(struct qwx_softc *);
429 int	qwx_mhi_wake_db_clear_valid(struct qwx_softc *);
430 void	qwx_mhi_init_xfer_rings(struct qwx_pci_softc *);
431 void	qwx_mhi_init_event_rings(struct qwx_pci_softc *);
432 void	qwx_mhi_init_cmd_ring(struct qwx_pci_softc *);
433 void	qwx_mhi_init_dev_ctxt(struct qwx_pci_softc *);
434 int	qwx_mhi_send_cmd(struct qwx_pci_softc *psc, uint32_t, uint32_t);
435 void *	qwx_pci_xfer_ring_get_elem(struct qwx_pci_xfer_ring *, uint64_t);
436 struct qwx_xfer_data *qwx_pci_xfer_ring_get_data(struct qwx_pci_xfer_ring *,
437 	    uint64_t);
438 int	qwx_mhi_submit_xfer(struct qwx_softc *sc, struct mbuf *m);
439 int	qwx_mhi_start_channel(struct qwx_pci_softc *,
440 	    struct qwx_pci_xfer_ring *);
441 int	qwx_mhi_start_channels(struct qwx_pci_softc *);
442 int	qwx_mhi_start(struct qwx_pci_softc *);
443 void	qwx_mhi_stop(struct qwx_softc *);
444 int	qwx_mhi_reset_device(struct qwx_softc *, int);
445 void	qwx_mhi_clear_vector(struct qwx_softc *);
446 int	qwx_mhi_fw_load_handler(struct qwx_pci_softc *);
447 int	qwx_mhi_await_device_reset(struct qwx_softc *);
448 int	qwx_mhi_await_device_ready(struct qwx_softc *);
449 void	qwx_mhi_ready_state_transition(struct qwx_pci_softc *);
450 void	qwx_mhi_mission_mode_state_transition(struct qwx_pci_softc *);
451 void	qwx_mhi_low_power_mode_state_transition(struct qwx_pci_softc *);
452 void	qwx_mhi_set_state(struct qwx_softc *, uint32_t);
453 void	qwx_mhi_init_mmio(struct qwx_pci_softc *);
454 int	qwx_mhi_fw_load_bhi(struct qwx_pci_softc *, uint8_t *, size_t);
455 int	qwx_mhi_fw_load_bhie(struct qwx_pci_softc *, uint8_t *, size_t);
456 void	qwx_rddm_prepare(struct qwx_pci_softc *);
457 #ifdef QWX_DEBUG
458 void	qwx_rddm_task(void *);
459 #endif
460 void *	qwx_pci_event_ring_get_elem(struct qwx_pci_event_ring *, uint64_t);
461 void	qwx_pci_intr_ctrl_event_mhi(struct qwx_pci_softc *, uint32_t);
462 void	qwx_pci_intr_ctrl_event_ee(struct qwx_pci_softc *, uint32_t);
463 void	qwx_pci_intr_ctrl_event_cmd_complete(struct qwx_pci_softc *,
464 	    uint64_t, uint32_t);
465 int	qwx_pci_intr_ctrl_event(struct qwx_pci_softc *,
466 	    struct qwx_pci_event_ring *);
467 void	qwx_pci_intr_data_event_tx(struct qwx_pci_softc *,
468 	    struct qwx_mhi_ring_element *);
469 int	qwx_pci_intr_data_event(struct qwx_pci_softc *,
470 	    struct qwx_pci_event_ring *);
471 int	qwx_pci_intr_mhi_ctrl(void *);
472 int	qwx_pci_intr_mhi_data(void *);
473 int	qwx_pci_intr(void *);
474 
475 struct qwx_pci_ops {
476 	int	 (*wakeup)(struct qwx_softc *);
477 	void	 (*release)(struct qwx_softc *);
478 	int	 (*get_msi_irq)(struct qwx_softc *, unsigned int);
479 	void	 (*window_write32)(struct qwx_softc *, uint32_t, uint32_t);
480 	uint32_t (*window_read32)(struct qwx_softc *, uint32_t);
481 	int	 (*alloc_xfer_rings)(struct qwx_pci_softc *);
482 };
483 
484 
485 static const struct qwx_pci_ops qwx_pci_ops_qca6390 = {
486 	.wakeup = qwx_pci_bus_wake_up,
487 	.release = qwx_pci_bus_release,
488 #if notyet
489 	.get_msi_irq = qwx_pci_get_msi_irq,
490 #endif
491 	.window_write32 = qwx_pci_window_write32,
492 	.window_read32 = qwx_pci_window_read32,
493 	.alloc_xfer_rings = qwx_pci_alloc_xfer_rings_qca6390,
494 };
495 
496 static const struct qwx_pci_ops qwx_pci_ops_qcn9074 = {
497 	.wakeup = NULL,
498 	.release = NULL,
499 #if notyet
500 	.get_msi_irq = qwx_pci_get_msi_irq,
501 #endif
502 	.window_write32 = qwx_pci_window_write32,
503 	.window_read32 = qwx_pci_window_read32,
504 	.alloc_xfer_rings = qwx_pci_alloc_xfer_rings_qcn9074,
505 };
506 
507 const struct cfattach qwx_pci_ca = {
508 	sizeof(struct qwx_pci_softc),
509 	qwx_pci_match,
510 	qwx_pci_attach,
511 	qwx_pci_detach,
512 	qwx_activate
513 };
514 
515 /* XXX pcidev */
516 #define PCI_PRODUCT_QUALCOMM_QCA6390	0x1101
517 #define PCI_PRODUCT_QUALCOMM_QCN9074	0x1104
518 
519 static const struct pci_matchid qwx_pci_devices[] = {
520 #if notyet
521 	{ PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCA6390 },
522 	{ PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCN9074 },
523 #endif
524 	{ PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCNFA765 }
525 };
526 
527 int
528 qwx_pci_match(struct device *parent, void *match, void *aux)
529 {
530 	return pci_matchbyid(aux, qwx_pci_devices, nitems(qwx_pci_devices));
531 }
532 
533 void
534 qwx_pci_init_qmi_ce_config(struct qwx_softc *sc)
535 {
536 	struct qwx_qmi_ce_cfg *cfg = &sc->qmi_ce_cfg;
537 
538 	qwx_ce_get_shadow_config(sc, &cfg->shadow_reg_v2,
539 	    &cfg->shadow_reg_v2_len);
540 }
541 
542 const struct qwx_msi_config qwx_msi_config_one_msi = {
543 	.total_vectors = 1,
544 	.total_users = 4,
545 	.users = (struct qwx_msi_user[]) {
546 		{ .name = "MHI", .num_vectors = 1, .base_vector = 0 },
547 		{ .name = "CE", .num_vectors = 1, .base_vector = 0 },
548 		{ .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
549 		{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
550 	},
551 };
552 
553 const struct qwx_msi_config qwx_msi_config[] = {
554 	{
555 		.total_vectors = 32,
556 		.total_users = 4,
557 		.users = (struct qwx_msi_user[]) {
558 			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
559 			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
560 			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
561 			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
562 		},
563 		.hw_rev = ATH11K_HW_QCA6390_HW20,
564 	},
565 	{
566 		.total_vectors = 16,
567 		.total_users = 3,
568 		.users = (struct qwx_msi_user[]) {
569 			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
570 			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
571 			{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
572 		},
573 		.hw_rev = ATH11K_HW_QCN9074_HW10,
574 	},
575 	{
576 		.total_vectors = 32,
577 		.total_users = 4,
578 		.users = (struct qwx_msi_user[]) {
579 			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
580 			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
581 			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
582 			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
583 		},
584 		.hw_rev = ATH11K_HW_WCN6855_HW20,
585 	},
586 	{
587 		.total_vectors = 32,
588 		.total_users = 4,
589 		.users = (struct qwx_msi_user[]) {
590 			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
591 			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
592 			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
593 			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
594 		},
595 		.hw_rev = ATH11K_HW_WCN6855_HW21,
596 	},
597 	{
598 		.total_vectors = 28,
599 		.total_users = 2,
600 		.users = (struct qwx_msi_user[]) {
601 			{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
602 			{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
603 		},
604 		.hw_rev = ATH11K_HW_WCN6750_HW10,
605 	},
606 };
607 
608 int
609 qwx_pcic_init_msi_config(struct qwx_softc *sc)
610 {
611 	const struct qwx_msi_config *msi_config;
612 	int i;
613 
614 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
615 		sc->msi_cfg = &qwx_msi_config_one_msi;
616 		return 0;
617 	}
618 	for (i = 0; i < nitems(qwx_msi_config); i++) {
619 		msi_config = &qwx_msi_config[i];
620 
621 		if (msi_config->hw_rev == sc->sc_hw_rev)
622 			break;
623 	}
624 
625 	if (i == nitems(qwx_msi_config)) {
626 		printf("%s: failed to fetch msi config, "
627 		    "unsupported hw version: 0x%x\n",
628 		    sc->sc_dev.dv_xname, sc->sc_hw_rev);
629 		return EINVAL;
630 	}
631 
632 	sc->msi_cfg = msi_config;
633 	return 0;
634 }
635 
636 int
637 qwx_pci_alloc_msi(struct qwx_softc *sc)
638 {
639 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
640 	uint64_t addr;
641 	pcireg_t data;
642 
643 	if (psc->sc_msi_cap & PCI_MSI_MC_C64) {
644 		uint64_t addr_hi;
645 		pcireg_t addr_lo;
646 
647 		addr_lo = pci_conf_read(psc->sc_pc, psc->sc_tag,
648 		    psc->sc_msi_off + PCI_MSI_MA);
649 		addr_hi = pci_conf_read(psc->sc_pc, psc->sc_tag,
650 		    psc->sc_msi_off + PCI_MSI_MAU32);
651 		addr = addr_hi << 32 | addr_lo;
652 		data = pci_conf_read(psc->sc_pc, psc->sc_tag,
653 		    psc->sc_msi_off + PCI_MSI_MD64);
654 	} else {
655 		addr = pci_conf_read(psc->sc_pc, psc->sc_tag,
656 		    psc->sc_msi_off + PCI_MSI_MA);
657 		data = pci_conf_read(psc->sc_pc, psc->sc_tag,
658 		    psc->sc_msi_off + PCI_MSI_MD32);
659 	}
660 
661 	sc->msi_addr_lo = addr & 0xffffffff;
662 	sc->msi_addr_hi = ((uint64_t)addr) >> 32;
663 	sc->msi_data_start = data;
664 
665 	DPRINTF("%s: MSI addr: 0x%llx MSI data: 0x%x\n", sc->sc_dev.dv_xname,
666 	    addr, data);
667 
668 	return 0;
669 }
670 
671 int
672 qwx_pcic_map_service_to_pipe(struct qwx_softc *sc, uint16_t service_id,
673     uint8_t *ul_pipe, uint8_t *dl_pipe)
674 {
675 	const struct service_to_pipe *entry;
676 	int ul_set = 0, dl_set = 0;
677 	int i;
678 
679 	for (i = 0; i < sc->hw_params.svc_to_ce_map_len; i++) {
680 		entry = &sc->hw_params.svc_to_ce_map[i];
681 
682 		if (le32toh(entry->service_id) != service_id)
683 			continue;
684 
685 		switch (le32toh(entry->pipedir)) {
686 		case PIPEDIR_NONE:
687 			break;
688 		case PIPEDIR_IN:
689 			*dl_pipe = le32toh(entry->pipenum);
690 			dl_set = 1;
691 			break;
692 		case PIPEDIR_OUT:
693 			*ul_pipe = le32toh(entry->pipenum);
694 			ul_set = 1;
695 			break;
696 		case PIPEDIR_INOUT:
697 			*dl_pipe = le32toh(entry->pipenum);
698 			*ul_pipe = le32toh(entry->pipenum);
699 			dl_set = 1;
700 			ul_set = 1;
701 			break;
702 		}
703 	}
704 
705 	if (!ul_set || !dl_set) {
706 		DPRINTF("%s: found no uplink and no downlink\n", __func__);
707 		return ENOENT;
708 	}
709 
710 	return 0;
711 }
712 
713 int
714 qwx_pcic_get_user_msi_vector(struct qwx_softc *sc, char *user_name,
715     int *num_vectors, uint32_t *user_base_data, uint32_t *base_vector)
716 {
717 	const struct qwx_msi_config *msi_config = sc->msi_cfg;
718 	int idx;
719 
720 	for (idx = 0; idx < msi_config->total_users; idx++) {
721 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
722 			*num_vectors = msi_config->users[idx].num_vectors;
723 			*base_vector =  msi_config->users[idx].base_vector;
724 			*user_base_data = *base_vector + sc->msi_data_start;
725 
726 			DPRINTF("%s: MSI assignment %s num_vectors %d "
727 			    "user_base_data %u base_vector %u\n", __func__,
728 			    user_name, *num_vectors, *user_base_data,
729 			    *base_vector);
730 			return 0;
731 		}
732 	}
733 
734 	DPRINTF("%s: Failed to find MSI assignment for %s\n",
735 	    sc->sc_dev.dv_xname, user_name);
736 
737 	return EINVAL;
738 }
739 
740 void
741 qwx_pci_attach(struct device *parent, struct device *self, void *aux)
742 {
743 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)self;
744 	struct qwx_softc *sc = &psc->sc_sc;
745 	struct ieee80211com *ic = &sc->sc_ic;
746 	struct ifnet *ifp = &ic->ic_if;
747 	uint32_t soc_hw_version_major, soc_hw_version_minor;
748 	const struct qwx_pci_ops *pci_ops;
749 	struct pci_attach_args *pa = aux;
750 	pci_intr_handle_t ih;
751 	pcireg_t memtype, reg;
752 	const char *intrstr;
753 	int error;
754 	pcireg_t sreg;
755 
756 	sc->sc_dmat = pa->pa_dmat;
757 	psc->sc_pc = pa->pa_pc;
758 	psc->sc_tag = pa->pa_tag;
759 
760 #ifdef __HAVE_FDT
761 	sc->sc_node = PCITAG_NODE(pa->pa_tag);
762 #endif
763 
764 	rw_init(&sc->ioctl_rwl, "qwxioctl");
765 
766 	sreg = pci_conf_read(psc->sc_pc, psc->sc_tag, PCI_SUBSYS_ID_REG);
767 	sc->id.bdf_search = ATH11K_BDF_SEARCH_DEFAULT;
768 	sc->id.vendor = PCI_VENDOR(pa->pa_id);
769 	sc->id.device = PCI_PRODUCT(pa->pa_id);
770 	sc->id.subsystem_vendor = PCI_VENDOR(sreg);
771 	sc->id.subsystem_device = PCI_PRODUCT(sreg);
772 
773 	strlcpy(sc->sc_bus_str, "pci", sizeof(sc->sc_bus_str));
774 
775 	sc->ops.read32 = qwx_pcic_read32;
776 	sc->ops.write32 = qwx_pcic_write32;
777 	sc->ops.start = qwx_pci_start;
778 	sc->ops.stop = qwx_pci_stop;
779 	sc->ops.power_up = qwx_pci_power_up;
780 	sc->ops.power_down = qwx_pci_power_down;
781 	sc->ops.submit_xfer = qwx_mhi_submit_xfer;
782 	sc->ops.irq_enable = qwx_pcic_ext_irq_enable;
783 	sc->ops.irq_disable = qwx_pcic_ext_irq_disable;
784 	sc->ops.map_service_to_pipe = qwx_pcic_map_service_to_pipe;
785 	sc->ops.get_user_msi_vector = qwx_pcic_get_user_msi_vector;
786 
787 	if (pci_get_capability(psc->sc_pc, psc->sc_tag, PCI_CAP_PCIEXPRESS,
788 	    &psc->sc_cap_off, NULL) == 0) {
789 		printf(": can't find PCIe capability structure\n");
790 		return;
791 	}
792 
793 	if (pci_get_capability(psc->sc_pc, psc->sc_tag, PCI_CAP_MSI,
794 	    &psc->sc_msi_off, &psc->sc_msi_cap) == 0) {
795 		printf(": can't find MSI capability structure\n");
796 		return;
797 	}
798 
799 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
800 	reg |= PCI_COMMAND_MASTER_ENABLE;
801 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg);
802 
803 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
804 	if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
805 	    &psc->sc_st, &psc->sc_sh, &psc->sc_map, &psc->sc_mapsize, 0)) {
806 		printf(": can't map mem space\n");
807 		return;
808 	}
809 
810 	sc->mem = psc->sc_map;
811 
812 	sc->num_msivec = 32;
813 	if (pci_intr_enable_msivec(pa, sc->num_msivec) != 0) {
814 		sc->num_msivec = 1;
815 		if (pci_intr_map_msi(pa, &ih) != 0) {
816 			printf(": can't map interrupt\n");
817 			return;
818 		}
819 		clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags);
820 	} else {
821 		if (pci_intr_map_msivec(pa, 0, &ih) != 0 &&
822 		    pci_intr_map_msi(pa, &ih) != 0) {
823 			printf(": can't map interrupt\n");
824 			return;
825 		}
826 		set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags);
827 		psc->mhi_irq[MHI_ER_CTRL] = 1;
828 		psc->mhi_irq[MHI_ER_DATA] = 2;
829 	}
830 
831 	intrstr = pci_intr_string(psc->sc_pc, ih);
832 	snprintf(psc->sc_ivname[0], sizeof(psc->sc_ivname[0]), "%s:bhi",
833 	    sc->sc_dev.dv_xname);
834 	psc->sc_ih[0] = pci_intr_establish(psc->sc_pc, ih, IPL_NET,
835 	    qwx_pci_intr, psc, psc->sc_ivname[0]);
836 	if (psc->sc_ih[0] == NULL) {
837 		printf(": can't establish interrupt");
838 		if (intrstr != NULL)
839 			printf(" at %s", intrstr);
840 		printf("\n");
841 		return;
842 	}
843 	printf(": %s\n", intrstr);
844 
845 	if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
846 		int msivec;
847 
848 		msivec = psc->mhi_irq[MHI_ER_CTRL];
849 		if (pci_intr_map_msivec(pa, msivec, &ih) != 0 &&
850 		    pci_intr_map_msi(pa, &ih) != 0) {
851 			printf(": can't map interrupt\n");
852 			return;
853 		}
854 		snprintf(psc->sc_ivname[msivec],
855 		    sizeof(psc->sc_ivname[msivec]),
856 		    "%s:mhic", sc->sc_dev.dv_xname);
857 		psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih,
858 		    IPL_NET, qwx_pci_intr_mhi_ctrl, psc,
859 		    psc->sc_ivname[msivec]);
860 		if (psc->sc_ih[msivec] == NULL) {
861 			printf("%s: can't establish interrupt\n",
862 			    sc->sc_dev.dv_xname);
863 			return;
864 		}
865 
866 		msivec = psc->mhi_irq[MHI_ER_DATA];
867 		if (pci_intr_map_msivec(pa, msivec, &ih) != 0 &&
868 		    pci_intr_map_msi(pa, &ih) != 0) {
869 			printf(": can't map interrupt\n");
870 			return;
871 		}
872 		snprintf(psc->sc_ivname[msivec],
873 		    sizeof(psc->sc_ivname[msivec]),
874 		    "%s:mhid", sc->sc_dev.dv_xname);
875 		psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih,
876 		    IPL_NET, qwx_pci_intr_mhi_data, psc,
877 		    psc->sc_ivname[msivec]);
878 		if (psc->sc_ih[msivec] == NULL) {
879 			printf("%s: can't establish interrupt\n",
880 			    sc->sc_dev.dv_xname);
881 			return;
882 		}
883 	}
884 
885 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
886 
887 	switch (PCI_PRODUCT(pa->pa_id)) {
888 	case PCI_PRODUCT_QUALCOMM_QCA6390:
889 		qwx_pci_read_hw_version(sc, &soc_hw_version_major,
890 		    &soc_hw_version_minor);
891 		switch (soc_hw_version_major) {
892 		case 2:
893 			sc->sc_hw_rev = ATH11K_HW_QCA6390_HW20;
894 			break;
895 		default:
896 			printf(": unsupported QCA6390 SOC version: %d %d\n",
897 				soc_hw_version_major, soc_hw_version_minor);
898 			return;
899 		}
900 
901 		pci_ops = &qwx_pci_ops_qca6390;
902 		psc->max_chan = QWX_MHI_CONFIG_QCA6390_MAX_CHANNELS;
903 		break;
904 	case PCI_PRODUCT_QUALCOMM_QCN9074:
905 		pci_ops = &qwx_pci_ops_qcn9074;
906 		sc->sc_hw_rev = ATH11K_HW_QCN9074_HW10;
907 		psc->max_chan = QWX_MHI_CONFIG_QCA9074_MAX_CHANNELS;
908 		break;
909 	case PCI_PRODUCT_QUALCOMM_QCNFA765:
910 		sc->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
911 		qwx_pci_read_hw_version(sc, &soc_hw_version_major,
912 		    &soc_hw_version_minor);
913 		switch (soc_hw_version_major) {
914 		case 2:
915 			switch (soc_hw_version_minor) {
916 			case 0x00:
917 			case 0x01:
918 				sc->sc_hw_rev = ATH11K_HW_WCN6855_HW20;
919 				break;
920 			case 0x10:
921 			case 0x11:
922 				sc->sc_hw_rev = ATH11K_HW_WCN6855_HW21;
923 				break;
924 			default:
925 				goto unsupported_wcn6855_soc;
926 			}
927 			break;
928 		default:
929 unsupported_wcn6855_soc:
930 			printf(": unsupported WCN6855 SOC version: %d %d\n",
931 				soc_hw_version_major, soc_hw_version_minor);
932 			return;
933 		}
934 
935 		pci_ops = &qwx_pci_ops_qca6390;
936 		psc->max_chan = QWX_MHI_CONFIG_QCA6390_MAX_CHANNELS;
937 		break;
938 	default:
939 		printf(": unsupported chip\n");
940 		return;
941 	}
942 
943 	/* register PCI ops */
944 	psc->sc_pci_ops = pci_ops;
945 
946 	error = qwx_pcic_init_msi_config(sc);
947 	if (error)
948 		goto err_pci_free_region;
949 
950 	error = qwx_pci_alloc_msi(sc);
951 	if (error) {
952 		printf("%s: failed to enable msi: %d\n", sc->sc_dev.dv_xname,
953 		    error);
954 		goto err_pci_free_region;
955 	}
956 
957 	error = qwx_init_hw_params(sc);
958 	if (error)
959 		goto err_pci_disable_msi;
960 
961 	psc->chan_ctxt = qwx_dmamem_alloc(sc->sc_dmat,
962 	    sizeof(struct qwx_mhi_chan_ctxt) * psc->max_chan, 0);
963 	if (psc->chan_ctxt == NULL) {
964 		printf("%s: could not allocate channel context array\n",
965 		    sc->sc_dev.dv_xname);
966 		goto err_pci_disable_msi;
967 	}
968 
969 	if (psc->sc_pci_ops->alloc_xfer_rings(psc)) {
970 		printf("%s: could not allocate transfer rings\n",
971 		    sc->sc_dev.dv_xname);
972 		goto err_pci_free_chan_ctxt;
973 	}
974 
975 	psc->event_ctxt = qwx_dmamem_alloc(sc->sc_dmat,
976 	    sizeof(struct qwx_mhi_event_ctxt) * QWX_NUM_EVENT_CTX, 0);
977 	if (psc->event_ctxt == NULL) {
978 		printf("%s: could not allocate event context array\n",
979 		    sc->sc_dev.dv_xname);
980 		goto err_pci_free_xfer_rings;
981 	}
982 
983 	if (qwx_pci_alloc_event_rings(psc)) {
984 		printf("%s: could not allocate event rings\n",
985 		    sc->sc_dev.dv_xname);
986 		goto err_pci_free_event_ctxt;
987 	}
988 
989 	psc->cmd_ctxt = qwx_dmamem_alloc(sc->sc_dmat,
990 	    sizeof(struct qwx_mhi_cmd_ctxt), 0);
991 	if (psc->cmd_ctxt == NULL) {
992 		printf("%s: could not allocate command context array\n",
993 		    sc->sc_dev.dv_xname);
994 		goto err_pci_free_event_rings;
995 	}
996 
997 	if (qwx_pci_init_cmd_ring(sc, &psc->cmd_ring))  {
998 		printf("%s: could not allocate command ring\n",
999 		    sc->sc_dev.dv_xname);
1000 		goto err_pci_free_cmd_ctxt;
1001 	}
1002 
1003 	error = qwx_mhi_register(sc);
1004 	if (error) {
1005 		printf(": failed to register mhi: %d\n", error);
1006 		goto err_pci_free_cmd_ring;
1007 	}
1008 
1009 	error = qwx_hal_srng_init(sc);
1010 	if (error)
1011 		goto err_mhi_unregister;
1012 
1013 	error = qwx_ce_alloc_pipes(sc);
1014 	if (error) {
1015 		printf(": failed to allocate ce pipes: %d\n", error);
1016 		goto err_hal_srng_deinit;
1017 	}
1018 
1019 	sc->sc_nswq = taskq_create("qwxns", 1, IPL_NET, 0);
1020 	if (sc->sc_nswq == NULL)
1021 		goto err_ce_free;
1022 
1023 	qwx_pci_init_qmi_ce_config(sc);
1024 
1025 	error = qwx_pcic_config_irq(sc, pa);
1026 	if (error) {
1027 		printf("%s: failed to config irq: %d\n",
1028 		    sc->sc_dev.dv_xname, error);
1029 		goto err_ce_free;
1030 	}
1031 #if notyet
1032 	ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
1033 	if (ret) {
1034 		ath11k_err(ab, "failed to set irq affinity %d\n", ret);
1035 		goto err_free_irq;
1036 	}
1037 
1038 	/* kernel may allocate a dummy vector before request_irq and
1039 	 * then allocate a real vector when request_irq is called.
1040 	 * So get msi_data here again to avoid spurious interrupt
1041 	 * as msi_data will configured to srngs.
1042 	 */
1043 	ret = ath11k_pci_config_msi_data(ab_pci);
1044 	if (ret) {
1045 		ath11k_err(ab, "failed to config msi_data: %d\n", ret);
1046 		goto err_irq_affinity_cleanup;
1047 	}
1048 #endif
1049 #ifdef QWX_DEBUG
1050 	task_set(&psc->rddm_task, qwx_rddm_task, psc);
1051 #endif
1052 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
1053 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
1054 	ic->ic_state = IEEE80211_S_INIT;
1055 
1056 	/* Set device capabilities. */
1057 	ic->ic_caps =
1058 #if 0
1059 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
1060 #endif
1061 	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
1062 	    IEEE80211_C_WEP |		/* WEP */
1063 	    IEEE80211_C_RSN |		/* WPA/RSN */
1064 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
1065 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
1066 #if 0
1067 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
1068 #endif
1069 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
1070 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
1071 
1072 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
1073 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
1074 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
1075 
1076 	/* IBSS channel undefined for now. */
1077 	ic->ic_ibss_chan = &ic->ic_channels[1];
1078 
1079 	ifp->if_softc = sc;
1080 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1081 	ifp->if_ioctl = qwx_ioctl;
1082 	ifp->if_start = qwx_start;
1083 	ifp->if_watchdog = qwx_watchdog;
1084 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1085 	if_attach(ifp);
1086 	ieee80211_ifattach(ifp);
1087 	ieee80211_media_init(ifp, qwx_media_change, ieee80211_media_status);
1088 
1089 	ic->ic_node_alloc = qwx_node_alloc;
1090 
1091 	/* Override 802.11 state transition machine. */
1092 	sc->sc_newstate = ic->ic_newstate;
1093 	ic->ic_newstate = qwx_newstate;
1094 	ic->ic_set_key = qwx_set_key;
1095 	ic->ic_delete_key = qwx_delete_key;
1096 #if 0
1097 	ic->ic_updatechan = qwx_updatechan;
1098 	ic->ic_updateprot = qwx_updateprot;
1099 	ic->ic_updateslot = qwx_updateslot;
1100 	ic->ic_updateedca = qwx_updateedca;
1101 	ic->ic_updatedtim = qwx_updatedtim;
1102 #endif
1103 	/*
1104 	 * We cannot read the MAC address without loading the
1105 	 * firmware from disk. Postpone until mountroot is done.
1106 	 */
1107 	config_mountroot(self, qwx_pci_attach_hook);
1108 	return;
1109 
1110 err_ce_free:
1111 	qwx_ce_free_pipes(sc);
1112 err_hal_srng_deinit:
1113 err_mhi_unregister:
1114 err_pci_free_cmd_ring:
1115 	qwx_pci_free_cmd_ring(psc);
1116 err_pci_free_cmd_ctxt:
1117 	qwx_dmamem_free(sc->sc_dmat, psc->cmd_ctxt);
1118 	psc->cmd_ctxt = NULL;
1119 err_pci_free_event_rings:
1120 	qwx_pci_free_event_rings(psc);
1121 err_pci_free_event_ctxt:
1122 	qwx_dmamem_free(sc->sc_dmat, psc->event_ctxt);
1123 	psc->event_ctxt = NULL;
1124 err_pci_free_xfer_rings:
1125 	qwx_pci_free_xfer_rings(psc);
1126 err_pci_free_chan_ctxt:
1127 	qwx_dmamem_free(sc->sc_dmat, psc->chan_ctxt);
1128 	psc->chan_ctxt = NULL;
1129 err_pci_disable_msi:
1130 err_pci_free_region:
1131 	pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]);
1132 	return;
1133 }
1134 
1135 int
1136 qwx_pci_detach(struct device *self, int flags)
1137 {
1138 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)self;
1139 	struct qwx_softc *sc = &psc->sc_sc;
1140 
1141 	if (psc->sc_ih[0]) {
1142 		pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]);
1143 		psc->sc_ih[0] = NULL;
1144 	}
1145 
1146 	qwx_detach(sc);
1147 
1148 	qwx_pci_free_event_rings(psc);
1149 	qwx_pci_free_xfer_rings(psc);
1150 	qwx_pci_free_cmd_ring(psc);
1151 
1152 	if (psc->event_ctxt) {
1153 		qwx_dmamem_free(sc->sc_dmat, psc->event_ctxt);
1154 		psc->event_ctxt = NULL;
1155 	}
1156 	if (psc->chan_ctxt) {
1157 		qwx_dmamem_free(sc->sc_dmat, psc->chan_ctxt);
1158 		psc->chan_ctxt = NULL;
1159 	}
1160 	if (psc->cmd_ctxt) {
1161 		qwx_dmamem_free(sc->sc_dmat, psc->cmd_ctxt);
1162 		psc->cmd_ctxt = NULL;
1163 	}
1164 
1165 	if (psc->amss_data) {
1166 		qwx_dmamem_free(sc->sc_dmat, psc->amss_data);
1167 		psc->amss_data = NULL;
1168 	}
1169 	if (psc->amss_vec) {
1170 		qwx_dmamem_free(sc->sc_dmat, psc->amss_vec);
1171 		psc->amss_vec = NULL;
1172 	}
1173 
1174 	return 0;
1175 }
1176 
1177 void
1178 qwx_pci_attach_hook(struct device *self)
1179 {
1180 	struct qwx_softc *sc = (void *)self;
1181 	int s = splnet();
1182 
1183 	qwx_attach(sc);
1184 
1185 	splx(s);
1186 }
1187 
1188 void
1189 qwx_pci_free_xfer_rings(struct qwx_pci_softc *psc)
1190 {
1191 	struct qwx_softc *sc = &psc->sc_sc;
1192 	int i;
1193 
1194 	for (i = 0; i < nitems(psc->xfer_rings); i++) {
1195 		struct qwx_pci_xfer_ring *ring = &psc->xfer_rings[i];
1196 		if (ring->dmamem) {
1197 			qwx_dmamem_free(sc->sc_dmat, ring->dmamem);
1198 			ring->dmamem = NULL;
1199 		}
1200 		memset(ring, 0, sizeof(*ring));
1201 	}
1202 }
1203 
1204 int
1205 qwx_pci_alloc_xfer_ring(struct qwx_softc *sc, struct qwx_pci_xfer_ring *ring,
1206     uint32_t id, uint32_t direction, uint32_t event_ring_index,
1207     size_t num_elements)
1208 {
1209 	bus_size_t size;
1210 	int i, err;
1211 
1212 	memset(ring, 0, sizeof(*ring));
1213 
1214 	size = sizeof(struct qwx_mhi_ring_element) * num_elements;
1215 	/* Hardware requires that rings are aligned to ring size. */
1216 	ring->dmamem = qwx_dmamem_alloc(sc->sc_dmat, size, size);
1217 	if (ring->dmamem == NULL)
1218 		return ENOMEM;
1219 
1220 	ring->size = size;
1221 	ring->mhi_chan_id = id;
1222 	ring->mhi_chan_state = MHI_CH_STATE_DISABLED;
1223 	ring->mhi_chan_direction = direction;
1224 	ring->mhi_chan_event_ring_index = event_ring_index;
1225 	ring->num_elements = num_elements;
1226 
1227 	memset(ring->data, 0, sizeof(ring->data));
1228 	for (i = 0; i < ring->num_elements; i++) {
1229 		struct qwx_xfer_data *xfer = &ring->data[i];
1230 
1231 		err = bus_dmamap_create(sc->sc_dmat, QWX_PCI_XFER_MAX_DATA_SIZE,
1232 		    1, QWX_PCI_XFER_MAX_DATA_SIZE, 0, BUS_DMA_NOWAIT,
1233 		    &xfer->map);
1234 		if (err) {
1235 			printf("%s: could not create xfer DMA map\n",
1236 			    sc->sc_dev.dv_xname);
1237 			goto fail;
1238 		}
1239 
1240 		if (direction == MHI_CHAN_TYPE_INBOUND) {
1241 			struct mbuf *m;
1242 
1243 			m = m_gethdr(M_DONTWAIT, MT_DATA);
1244 			if (m == NULL) {
1245 				err = ENOBUFS;
1246 				goto fail;
1247 			}
1248 
1249 			MCLGETL(m, M_DONTWAIT, QWX_PCI_XFER_MAX_DATA_SIZE);
1250 			if ((m->m_flags & M_EXT) == 0) {
1251 				m_freem(m);
1252 				err = ENOBUFS;
1253 				goto fail;
1254 			}
1255 
1256 			m->m_len = m->m_pkthdr.len = QWX_PCI_XFER_MAX_DATA_SIZE;
1257 			err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map,
1258 			    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1259 			if (err) {
1260 				printf("%s: can't map mbuf (error %d)\n",
1261 				    sc->sc_dev.dv_xname, err);
1262 				m_freem(m);
1263 				goto fail;
1264 			}
1265 
1266 			bus_dmamap_sync(sc->sc_dmat, xfer->map, 0,
1267 			    QWX_PCI_XFER_MAX_DATA_SIZE, BUS_DMASYNC_PREREAD);
1268 			xfer->m = m;
1269 		}
1270 	}
1271 
1272 	return 0;
1273 fail:
1274 	for (i = 0; i < ring->num_elements; i++) {
1275 		struct qwx_xfer_data *xfer = &ring->data[i];
1276 
1277 		if (xfer->map) {
1278 			bus_dmamap_sync(sc->sc_dmat, xfer->map, 0,
1279 			    xfer->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1280 			bus_dmamap_unload(sc->sc_dmat, xfer->map);
1281 			bus_dmamap_destroy(sc->sc_dmat, xfer->map);
1282 			xfer->map = NULL;
1283 		}
1284 
1285 		if (xfer->m) {
1286 			m_freem(xfer->m);
1287 			xfer->m = NULL;
1288 		}
1289 	}
1290 	return 1;
1291 }
1292 
1293 int
1294 qwx_pci_alloc_xfer_rings_qca6390(struct qwx_pci_softc *psc)
1295 {
1296 	struct qwx_softc *sc = &psc->sc_sc;
1297 	int ret;
1298 
1299 	ret = qwx_pci_alloc_xfer_ring(sc,
1300 	    &psc->xfer_rings[QWX_PCI_XFER_RING_LOOPBACK_OUTBOUND],
1301 	    0, MHI_CHAN_TYPE_OUTBOUND, 0, 32);
1302 	if (ret)
1303 		goto fail;
1304 
1305 	ret = qwx_pci_alloc_xfer_ring(sc,
1306 	    &psc->xfer_rings[QWX_PCI_XFER_RING_LOOPBACK_INBOUND],
1307 	    1, MHI_CHAN_TYPE_INBOUND, 0, 32);
1308 	if (ret)
1309 		goto fail;
1310 
1311 	ret = qwx_pci_alloc_xfer_ring(sc,
1312 	    &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND],
1313 	    20, MHI_CHAN_TYPE_OUTBOUND, 1, 64);
1314 	if (ret)
1315 		goto fail;
1316 
1317 	ret = qwx_pci_alloc_xfer_ring(sc,
1318 	    &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND],
1319 	    21, MHI_CHAN_TYPE_INBOUND, 1, 64);
1320 	if (ret)
1321 		goto fail;
1322 
1323 	return 0;
1324 fail:
1325 	qwx_pci_free_xfer_rings(psc);
1326 	return ret;
1327 }
1328 
1329 int
1330 qwx_pci_alloc_xfer_rings_qcn9074(struct qwx_pci_softc *psc)
1331 {
1332 	struct qwx_softc *sc = &psc->sc_sc;
1333 	int ret;
1334 
1335 	ret = qwx_pci_alloc_xfer_ring(sc,
1336 	    &psc->xfer_rings[QWX_PCI_XFER_RING_LOOPBACK_OUTBOUND],
1337 	    0, MHI_CHAN_TYPE_OUTBOUND, 1, 32);
1338 	if (ret)
1339 		goto fail;
1340 
1341 	ret = qwx_pci_alloc_xfer_ring(sc,
1342 	    &psc->xfer_rings[QWX_PCI_XFER_RING_LOOPBACK_INBOUND],
1343 	    1, MHI_CHAN_TYPE_INBOUND, 1, 32);
1344 	if (ret)
1345 		goto fail;
1346 
1347 	ret = qwx_pci_alloc_xfer_ring(sc,
1348 	    &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND],
1349 	    20, MHI_CHAN_TYPE_OUTBOUND, 1, 32);
1350 	if (ret)
1351 		goto fail;
1352 
1353 	ret = qwx_pci_alloc_xfer_ring(sc,
1354 	    &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND],
1355 	    21, MHI_CHAN_TYPE_INBOUND, 1, 32);
1356 	if (ret)
1357 		goto fail;
1358 
1359 	return 0;
1360 fail:
1361 	qwx_pci_free_xfer_rings(psc);
1362 	return ret;
1363 }
1364 
1365 void
1366 qwx_pci_free_event_rings(struct qwx_pci_softc *psc)
1367 {
1368 	struct qwx_softc *sc = &psc->sc_sc;
1369 	int i;
1370 
1371 	for (i = 0; i < nitems(psc->event_rings); i++) {
1372 		struct qwx_pci_event_ring *ring = &psc->event_rings[i];
1373 		if (ring->dmamem) {
1374 			qwx_dmamem_free(sc->sc_dmat, ring->dmamem);
1375 			ring->dmamem = NULL;
1376 		}
1377 		memset(ring, 0, sizeof(*ring));
1378 	}
1379 }
1380 
1381 int
1382 qwx_pci_alloc_event_ring(struct qwx_softc *sc, struct qwx_pci_event_ring *ring,
1383     uint32_t type, uint32_t irq, uint32_t intmod, size_t num_elements)
1384 {
1385 	bus_size_t size;
1386 
1387 	memset(ring, 0, sizeof(*ring));
1388 
1389 	size = sizeof(struct qwx_mhi_ring_element) * num_elements;
1390 	/* Hardware requires that rings are aligned to ring size. */
1391 	ring->dmamem = qwx_dmamem_alloc(sc->sc_dmat, size, size);
1392 	if (ring->dmamem == NULL)
1393 		return ENOMEM;
1394 
1395 	ring->size = size;
1396 	ring->mhi_er_type = type;
1397 	ring->mhi_er_irq = irq;
1398 	ring->mhi_er_irq_moderation_ms = intmod;
1399 	ring->num_elements = num_elements;
1400 	return 0;
1401 }
1402 
1403 int
1404 qwx_pci_alloc_event_rings(struct qwx_pci_softc *psc)
1405 {
1406 	struct qwx_softc *sc = &psc->sc_sc;
1407 	int ret;
1408 
1409 	ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[0],
1410 	    MHI_ER_CTRL, psc->mhi_irq[MHI_ER_CTRL], 0, 32);
1411 	if (ret)
1412 		goto fail;
1413 
1414 	ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[1],
1415 	    MHI_ER_DATA, psc->mhi_irq[MHI_ER_DATA], 1, 256);
1416 	if (ret)
1417 		goto fail;
1418 
1419 	return 0;
1420 fail:
1421 	qwx_pci_free_event_rings(psc);
1422 	return ret;
1423 }
1424 
1425 void
1426 qwx_pci_free_cmd_ring(struct qwx_pci_softc *psc)
1427 {
1428 	struct qwx_softc *sc = &psc->sc_sc;
1429 	struct qwx_pci_cmd_ring *ring = &psc->cmd_ring;
1430 
1431 	if (ring->dmamem)
1432 		qwx_dmamem_free(sc->sc_dmat, ring->dmamem);
1433 
1434 	memset(ring, 0, sizeof(*ring));
1435 }
1436 
1437 int
1438 qwx_pci_init_cmd_ring(struct qwx_softc *sc, struct qwx_pci_cmd_ring *ring)
1439 {
1440 	memset(ring, 0, sizeof(*ring));
1441 
1442 	ring->num_elements = QWX_PCI_CMD_RING_MAX_ELEMENTS;
1443 	ring->size = sizeof(struct qwx_mhi_ring_element) * ring->num_elements;
1444 
1445 	/* Hardware requires that rings are aligned to ring size. */
1446 	ring->dmamem = qwx_dmamem_alloc(sc->sc_dmat, ring->size, ring->size);
1447 	if (ring->dmamem == NULL)
1448 		return ENOMEM;
1449 
1450 	return 0;
1451 }
1452 
1453 uint32_t
1454 qwx_pci_read(struct qwx_softc *sc, uint32_t addr)
1455 {
1456 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1457 
1458 	return (bus_space_read_4(psc->sc_st, psc->sc_sh, addr));
1459 }
1460 
1461 void
1462 qwx_pci_write(struct qwx_softc *sc, uint32_t addr, uint32_t val)
1463 {
1464 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1465 
1466 	bus_space_write_4(psc->sc_st, psc->sc_sh, addr, val);
1467 }
1468 
1469 void
1470 qwx_pci_read_hw_version(struct qwx_softc *sc, uint32_t *major,
1471     uint32_t *minor)
1472 {
1473 	uint32_t soc_hw_version;
1474 
1475 	soc_hw_version = qwx_pcic_read32(sc, TCSR_SOC_HW_VERSION);
1476 	*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, soc_hw_version);
1477 	*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, soc_hw_version);
1478 	DPRINTF("%s: pci tcsr_soc_hw_version major %d minor %d\n",
1479 	    sc->sc_dev.dv_xname, *major, *minor);
1480 }
1481 
1482 uint32_t
1483 qwx_pcic_read32(struct qwx_softc *sc, uint32_t offset)
1484 {
1485 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1486 	int ret = 0;
1487 	uint32_t val;
1488 	bool wakeup_required;
1489 
1490 	/* for offset beyond BAR + 4K - 32, may
1491 	 * need to wakeup the device to access.
1492 	 */
1493 	wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags)
1494 	    && offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
1495 	if (wakeup_required && psc->sc_pci_ops->wakeup)
1496 		ret = psc->sc_pci_ops->wakeup(sc);
1497 
1498 	if (offset < ATH11K_PCI_WINDOW_START)
1499 		val = qwx_pci_read(sc, offset);
1500 	else
1501 		val = psc->sc_pci_ops->window_read32(sc, offset);
1502 
1503 	if (wakeup_required && !ret && psc->sc_pci_ops->release)
1504 		psc->sc_pci_ops->release(sc);
1505 
1506 	return val;
1507 }
1508 
1509 void
1510 qwx_pcic_write32(struct qwx_softc *sc, uint32_t offset, uint32_t value)
1511 {
1512 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1513 	int ret = 0;
1514 	bool wakeup_required;
1515 
1516 	/* for offset beyond BAR + 4K - 32, may
1517 	 * need to wakeup the device to access.
1518 	 */
1519 	wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags)
1520 	    && offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
1521 	if (wakeup_required && psc->sc_pci_ops->wakeup)
1522 		ret = psc->sc_pci_ops->wakeup(sc);
1523 
1524 	if (offset < ATH11K_PCI_WINDOW_START)
1525 		qwx_pci_write(sc, offset, value);
1526 	else
1527 		psc->sc_pci_ops->window_write32(sc, offset, value);
1528 
1529 	if (wakeup_required && !ret && psc->sc_pci_ops->release)
1530 		psc->sc_pci_ops->release(sc);
1531 }
1532 
1533 void
1534 qwx_pcic_ext_irq_disable(struct qwx_softc *sc)
1535 {
1536 	clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags);
1537 
1538 	/* In case of one MSI vector, we handle irq enable/disable in a
1539 	 * uniform way since we only have one irq
1540 	 */
1541 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
1542 		return;
1543 
1544 	DPRINTF("%s not implemented\n", __func__);
1545 }
1546 
1547 void
1548 qwx_pcic_ext_irq_enable(struct qwx_softc *sc)
1549 {
1550 	set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags);
1551 
1552 	/* In case of one MSI vector, we handle irq enable/disable in a
1553 	 * uniform way since we only have one irq
1554 	 */
1555 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
1556 		return;
1557 
1558 	DPRINTF("%s not implemented\n", __func__);
1559 }
1560 
1561 void
1562 qwx_pcic_ce_irq_enable(struct qwx_softc *sc, uint16_t ce_id)
1563 {
1564 	/* In case of one MSI vector, we handle irq enable/disable in a
1565 	 * uniform way since we only have one irq
1566 	 */
1567 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
1568 		return;
1569 
1570 	/* OpenBSD PCI stack does not yet implement MSI interrupt masking. */
1571 	sc->msi_ce_irqmask |= (1U << ce_id);
1572 }
1573 
1574 void
1575 qwx_pcic_ce_irq_disable(struct qwx_softc *sc, uint16_t ce_id)
1576 {
1577 	/* In case of one MSI vector, we handle irq enable/disable in a
1578 	 * uniform way since we only have one irq
1579 	 */
1580 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
1581 		return;
1582 
1583 	/* OpenBSD PCI stack does not yet implement MSI interrupt masking. */
1584 	sc->msi_ce_irqmask &= ~(1U << ce_id);
1585 }
1586 
1587 void
1588 qwx_pcic_ext_grp_disable(struct qwx_ext_irq_grp *irq_grp)
1589 {
1590 	struct qwx_softc *sc = irq_grp->sc;
1591 
1592 	/* In case of one MSI vector, we handle irq enable/disable
1593 	 * in a uniform way since we only have one irq
1594 	 */
1595 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
1596 		return;
1597 }
1598 
1599 int
1600 qwx_pcic_ext_irq_config(struct qwx_softc *sc, struct pci_attach_args *pa)
1601 {
1602 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1603 	int i, ret, num_vectors = 0;
1604 	uint32_t msi_data_start = 0;
1605 	uint32_t base_vector = 0;
1606 
1607 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
1608 		return 0;
1609 
1610 	ret = qwx_pcic_get_user_msi_vector(sc, "DP", &num_vectors,
1611 	    &msi_data_start, &base_vector);
1612 	if (ret < 0)
1613 		return ret;
1614 
1615 	for (i = 0; i < nitems(sc->ext_irq_grp); i++) {
1616 		struct qwx_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
1617 		uint32_t num_irq = 0;
1618 
1619 		irq_grp->sc = sc;
1620 		irq_grp->grp_id = i;
1621 #if 0
1622 		init_dummy_netdev(&irq_grp->napi_ndev);
1623 		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
1624 			       ath11k_pcic_ext_grp_napi_poll);
1625 #endif
1626 		if (sc->hw_params.ring_mask->tx[i] ||
1627 		    sc->hw_params.ring_mask->rx[i] ||
1628 		    sc->hw_params.ring_mask->rx_err[i] ||
1629 		    sc->hw_params.ring_mask->rx_wbm_rel[i] ||
1630 		    sc->hw_params.ring_mask->reo_status[i] ||
1631 		    sc->hw_params.ring_mask->rxdma2host[i] ||
1632 		    sc->hw_params.ring_mask->host2rxdma[i] ||
1633 		    sc->hw_params.ring_mask->rx_mon_status[i]) {
1634 			num_irq = 1;
1635 		}
1636 
1637 		irq_grp->num_irq = num_irq;
1638 		irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
1639 
1640 		if (num_irq) {
1641 			int irq_idx = irq_grp->irqs[0];
1642 			pci_intr_handle_t ih;
1643 
1644 			if (pci_intr_map_msivec(pa, irq_idx, &ih) != 0 &&
1645 			    pci_intr_map(pa, &ih) != 0) {
1646 				printf("%s: can't map interrupt\n",
1647 				    sc->sc_dev.dv_xname);
1648 				return EIO;
1649 			}
1650 
1651 			snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]),
1652 			    "%s:ex%d", sc->sc_dev.dv_xname, i);
1653 			psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih,
1654 			    IPL_NET, qwx_ext_intr, irq_grp, psc->sc_ivname[irq_idx]);
1655 			if (psc->sc_ih[irq_idx] == NULL) {
1656 				printf("%s: failed to request irq %d\n",
1657 				    sc->sc_dev.dv_xname, irq_idx);
1658 				return EIO;
1659 			}
1660 		}
1661 
1662 		qwx_pcic_ext_grp_disable(irq_grp);
1663 	}
1664 
1665 	return 0;
1666 }
1667 
1668 int
1669 qwx_pcic_config_irq(struct qwx_softc *sc, struct pci_attach_args *pa)
1670 {
1671 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1672 	struct qwx_ce_pipe *ce_pipe;
1673 	uint32_t msi_data_start;
1674 	uint32_t msi_data_count, msi_data_idx;
1675 	uint32_t msi_irq_start;
1676 	int i, ret, irq_idx;
1677 	pci_intr_handle_t ih;
1678 
1679 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
1680 		return 0;
1681 
1682 	ret = qwx_pcic_get_user_msi_vector(sc, "CE", &msi_data_count,
1683 	    &msi_data_start, &msi_irq_start);
1684 	if (ret)
1685 		return ret;
1686 
1687 	/* Configure CE irqs */
1688 	for (i = 0, msi_data_idx = 0; i < sc->hw_params.ce_count; i++) {
1689 		if (qwx_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR)
1690 			continue;
1691 
1692 		ce_pipe = &sc->ce.ce_pipe[i];
1693 		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
1694 
1695 		if (pci_intr_map_msivec(pa, irq_idx, &ih) != 0 &&
1696 		    pci_intr_map(pa, &ih) != 0) {
1697 			printf("%s: can't map interrupt\n",
1698 			    sc->sc_dev.dv_xname);
1699 			return EIO;
1700 		}
1701 
1702 		snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]),
1703 		    "%s:ce%d", sc->sc_dev.dv_xname, ce_pipe->pipe_num);
1704 		psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih,
1705 		    IPL_NET, qwx_ce_intr, ce_pipe, psc->sc_ivname[irq_idx]);
1706 		if (psc->sc_ih[irq_idx] == NULL) {
1707 			printf("%s: failed to request irq %d\n",
1708 			    sc->sc_dev.dv_xname, irq_idx);
1709 			return EIO;
1710 		}
1711 
1712 		msi_data_idx++;
1713 
1714 		qwx_pcic_ce_irq_disable(sc, i);
1715 	}
1716 
1717 	ret = qwx_pcic_ext_irq_config(sc, pa);
1718 	if (ret)
1719 		return ret;
1720 
1721 	return 0;
1722 }
1723 
1724 void
1725 qwx_pcic_ce_irqs_enable(struct qwx_softc *sc)
1726 {
1727 	int i;
1728 
1729 	set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags);
1730 
1731 	for (i = 0; i < sc->hw_params.ce_count; i++) {
1732 		if (qwx_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR)
1733 			continue;
1734 		qwx_pcic_ce_irq_enable(sc, i);
1735 	}
1736 }
1737 
1738 void
1739 qwx_pcic_ce_irqs_disable(struct qwx_softc *sc)
1740 {
1741 	int i;
1742 
1743 	clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags);
1744 
1745 	for (i = 0; i < sc->hw_params.ce_count; i++) {
1746 		if (qwx_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR)
1747 			continue;
1748 		qwx_pcic_ce_irq_disable(sc, i);
1749 	}
1750 }
1751 
1752 int
1753 qwx_pci_start(struct qwx_softc *sc)
1754 {
1755 	/* TODO: for now don't restore ASPM in case of single MSI
1756 	 * vector as MHI register reading in M2 causes system hang.
1757 	 */
1758 	if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
1759 		qwx_pci_aspm_restore(sc);
1760 	else
1761 		DPRINTF("%s: leaving PCI ASPM disabled to avoid MHI M2 problems"
1762 		    "\n", sc->sc_dev.dv_xname);
1763 
1764 	set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags);
1765 
1766 	qwx_ce_rx_post_buf(sc);
1767 	qwx_pcic_ce_irqs_enable(sc);
1768 
1769 	return 0;
1770 }
1771 
1772 void
1773 qwx_pcic_ce_irq_disable_sync(struct qwx_softc *sc)
1774 {
1775 	qwx_pcic_ce_irqs_disable(sc);
1776 #if 0
1777 	ath11k_pcic_sync_ce_irqs(ab);
1778 	ath11k_pcic_kill_tasklets(ab);
1779 #endif
1780 }
1781 
1782 void
1783 qwx_pci_stop(struct qwx_softc *sc)
1784 {
1785 	qwx_pcic_ce_irq_disable_sync(sc);
1786 	qwx_ce_cleanup_pipes(sc);
1787 }
1788 
1789 int
1790 qwx_pci_bus_wake_up(struct qwx_softc *sc)
1791 {
1792 	if (qwx_mhi_wake_db_clear_valid(sc))
1793 		qwx_mhi_device_wake(sc);
1794 
1795 	return 0;
1796 }
1797 
1798 void
1799 qwx_pci_bus_release(struct qwx_softc *sc)
1800 {
1801 	if (qwx_mhi_wake_db_clear_valid(sc))
1802 		qwx_mhi_device_zzz(sc);
1803 }
1804 
1805 uint32_t
1806 qwx_pci_get_window_start(struct qwx_softc *sc, uint32_t offset)
1807 {
1808 	if (!sc->hw_params.static_window_map)
1809 		return ATH11K_PCI_WINDOW_START;
1810 
1811 	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
1812 		/* if offset lies within DP register range, use 3rd window */
1813 		return 3 * ATH11K_PCI_WINDOW_START;
1814 	else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc)) <
1815 		 ATH11K_PCI_WINDOW_RANGE_MASK)
1816 		 /* if offset lies within CE register range, use 2nd window */
1817 		return 2 * ATH11K_PCI_WINDOW_START;
1818 	else
1819 		return ATH11K_PCI_WINDOW_START;
1820 }
1821 
1822 void
1823 qwx_pci_select_window(struct qwx_softc *sc, uint32_t offset)
1824 {
1825 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
1826 	uint32_t window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
1827 
1828 #if notyet
1829 	lockdep_assert_held(&ab_pci->window_lock);
1830 #endif
1831 
1832 	if (window != psc->register_window) {
1833 		qwx_pci_write(sc, ATH11K_PCI_WINDOW_REG_ADDRESS,
1834 		    ATH11K_PCI_WINDOW_ENABLE_BIT | window);
1835 		(void) qwx_pci_read(sc, ATH11K_PCI_WINDOW_REG_ADDRESS);
1836 		psc->register_window = window;
1837 	}
1838 }
1839 
1840 void
1841 qwx_pci_window_write32(struct qwx_softc *sc, uint32_t offset, uint32_t value)
1842 {
1843 	uint32_t window_start;
1844 
1845 	window_start = qwx_pci_get_window_start(sc, offset);
1846 
1847 	if (window_start == ATH11K_PCI_WINDOW_START) {
1848 #if notyet
1849 		spin_lock_bh(&ab_pci->window_lock);
1850 #endif
1851 		qwx_pci_select_window(sc, offset);
1852 		qwx_pci_write(sc, window_start +
1853 		    (offset & ATH11K_PCI_WINDOW_RANGE_MASK), value);
1854 #if notyet
1855 		spin_unlock_bh(&ab_pci->window_lock);
1856 #endif
1857 	} else {
1858 		qwx_pci_write(sc, window_start +
1859 		    (offset & ATH11K_PCI_WINDOW_RANGE_MASK), value);
1860 	}
1861 }
1862 
1863 uint32_t
1864 qwx_pci_window_read32(struct qwx_softc *sc, uint32_t offset)
1865 {
1866 	uint32_t window_start, val;
1867 
1868 	window_start = qwx_pci_get_window_start(sc, offset);
1869 
1870 	if (window_start == ATH11K_PCI_WINDOW_START) {
1871 #if notyet
1872 		spin_lock_bh(&ab_pci->window_lock);
1873 #endif
1874 		qwx_pci_select_window(sc, offset);
1875 		val = qwx_pci_read(sc, window_start +
1876 		    (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
1877 #if notyet
1878 		spin_unlock_bh(&ab_pci->window_lock);
1879 #endif
1880 	} else {
1881 		val = qwx_pci_read(sc, window_start +
1882 		    (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
1883 	}
1884 
1885 	return val;
1886 }
1887 
1888 void
1889 qwx_pci_select_static_window(struct qwx_softc *sc)
1890 {
1891 	uint32_t umac_window;
1892 	uint32_t ce_window;
1893 	uint32_t window;
1894 
1895 	umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
1896 	ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
1897 	window = (umac_window << 12) | (ce_window << 6);
1898 
1899 	qwx_pci_write(sc, ATH11K_PCI_WINDOW_REG_ADDRESS,
1900 	    ATH11K_PCI_WINDOW_ENABLE_BIT | window);
1901 }
1902 
1903 void
1904 qwx_pci_soc_global_reset(struct qwx_softc *sc)
1905 {
1906 	uint32_t val, msecs;
1907 
1908 	val = qwx_pcic_read32(sc, PCIE_SOC_GLOBAL_RESET);
1909 
1910 	val |= PCIE_SOC_GLOBAL_RESET_V;
1911 
1912 	qwx_pcic_write32(sc, PCIE_SOC_GLOBAL_RESET, val);
1913 
1914 	/* TODO: exact time to sleep is uncertain */
1915 	msecs = 10;
1916 	DELAY(msecs * 1000);
1917 
1918 	/* Need to toggle V bit back otherwise stuck in reset status */
1919 	val &= ~PCIE_SOC_GLOBAL_RESET_V;
1920 
1921 	qwx_pcic_write32(sc, PCIE_SOC_GLOBAL_RESET, val);
1922 
1923 	DELAY(msecs * 1000);
1924 
1925 	val = qwx_pcic_read32(sc, PCIE_SOC_GLOBAL_RESET);
1926 	if (val == 0xffffffff)
1927 		printf("%s: link down error during global reset\n",
1928 		    sc->sc_dev.dv_xname);
1929 }
1930 
1931 void
1932 qwx_pci_clear_dbg_registers(struct qwx_softc *sc)
1933 {
1934 	uint32_t val;
1935 
1936 	/* read cookie */
1937 	val = qwx_pcic_read32(sc, PCIE_Q6_COOKIE_ADDR);
1938 	DPRINTF("%s: cookie:0x%x\n", sc->sc_dev.dv_xname, val);
1939 
1940 	val = qwx_pcic_read32(sc, WLAON_WARM_SW_ENTRY);
1941 	DPRINTF("%s: WLAON_WARM_SW_ENTRY 0x%x\n", sc->sc_dev.dv_xname, val);
1942 
1943 	/* TODO: exact time to sleep is uncertain */
1944 	DELAY(10 * 1000);
1945 
1946 	/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
1947 	 * continuing warm path and entering dead loop.
1948 	 */
1949 	qwx_pcic_write32(sc, WLAON_WARM_SW_ENTRY, 0);
1950 	DELAY(10 * 1000);
1951 
1952 	val = qwx_pcic_read32(sc, WLAON_WARM_SW_ENTRY);
1953 	DPRINTF("%s: WLAON_WARM_SW_ENTRY 0x%x\n", sc->sc_dev.dv_xname, val);
1954 
1955 	/* A read clear register. clear the register to prevent
1956 	 * Q6 from entering wrong code path.
1957 	 */
1958 	val = qwx_pcic_read32(sc, WLAON_SOC_RESET_CAUSE_REG);
1959 	DPRINTF("%s: soc reset cause:%d\n", sc->sc_dev.dv_xname, val);
1960 }
1961 
1962 int
1963 qwx_pci_set_link_reg(struct qwx_softc *sc, uint32_t offset, uint32_t value,
1964     uint32_t mask)
1965 {
1966 	uint32_t v;
1967 	int i;
1968 
1969 	v = qwx_pcic_read32(sc, offset);
1970 	if ((v & mask) == value)
1971 		return 0;
1972 
1973 	for (i = 0; i < 10; i++) {
1974 		qwx_pcic_write32(sc, offset, (v & ~mask) | value);
1975 
1976 		v = qwx_pcic_read32(sc, offset);
1977 		if ((v & mask) == value)
1978 			return 0;
1979 
1980 		delay((2 * 1000));
1981 	}
1982 
1983 	DPRINTF("failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
1984 	    offset, v & mask, value);
1985 
1986 	return ETIMEDOUT;
1987 }
1988 
1989 int
1990 qwx_pci_fix_l1ss(struct qwx_softc *sc)
1991 {
1992 	int ret;
1993 
1994 	ret = qwx_pci_set_link_reg(sc,
1995 				      PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(sc),
1996 				      PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
1997 				      PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
1998 	if (ret) {
1999 		DPRINTF("failed to set sysclk: %d\n", ret);
2000 		return ret;
2001 	}
2002 
2003 	ret = qwx_pci_set_link_reg(sc,
2004 				      PCIE_PCS_OSC_DTCT_CONFIG1_REG(sc),
2005 				      PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
2006 				      PCIE_PCS_OSC_DTCT_CONFIG_MSK);
2007 	if (ret) {
2008 		DPRINTF("failed to set dtct config1 error: %d\n", ret);
2009 		return ret;
2010 	}
2011 
2012 	ret = qwx_pci_set_link_reg(sc,
2013 				      PCIE_PCS_OSC_DTCT_CONFIG2_REG(sc),
2014 				      PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
2015 				      PCIE_PCS_OSC_DTCT_CONFIG_MSK);
2016 	if (ret) {
2017 		DPRINTF("failed to set dtct config2: %d\n", ret);
2018 		return ret;
2019 	}
2020 
2021 	ret = qwx_pci_set_link_reg(sc,
2022 				      PCIE_PCS_OSC_DTCT_CONFIG4_REG(sc),
2023 				      PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
2024 				      PCIE_PCS_OSC_DTCT_CONFIG_MSK);
2025 	if (ret) {
2026 		DPRINTF("failed to set dtct config4: %d\n", ret);
2027 		return ret;
2028 	}
2029 
2030 	return 0;
2031 }
2032 
2033 void
2034 qwx_pci_enable_ltssm(struct qwx_softc *sc)
2035 {
2036 	uint32_t val;
2037 	int i;
2038 
2039 	val = qwx_pcic_read32(sc, PCIE_PCIE_PARF_LTSSM);
2040 
2041 	/* PCIE link seems very unstable after the Hot Reset*/
2042 	for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
2043 		if (val == 0xffffffff)
2044 			DELAY(5 * 1000);
2045 
2046 		qwx_pcic_write32(sc, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
2047 		val = qwx_pcic_read32(sc, PCIE_PCIE_PARF_LTSSM);
2048 	}
2049 
2050 	DPRINTF("%s: pci ltssm 0x%x\n", sc->sc_dev.dv_xname, val);
2051 
2052 	val = qwx_pcic_read32(sc, GCC_GCC_PCIE_HOT_RST);
2053 	val |= GCC_GCC_PCIE_HOT_RST_VAL;
2054 	qwx_pcic_write32(sc, GCC_GCC_PCIE_HOT_RST, val);
2055 	val = qwx_pcic_read32(sc, GCC_GCC_PCIE_HOT_RST);
2056 
2057 	DPRINTF("%s: pci pcie_hot_rst 0x%x\n", sc->sc_dev.dv_xname, val);
2058 
2059 	DELAY(5 * 1000);
2060 }
2061 
2062 void
2063 qwx_pci_clear_all_intrs(struct qwx_softc *sc)
2064 {
2065 	/* This is a WAR for PCIE Hotreset.
2066 	 * When target receive Hotreset, but will set the interrupt.
2067 	 * So when download SBL again, SBL will open Interrupt and
2068 	 * receive it, and crash immediately.
2069 	 */
2070 	qwx_pcic_write32(sc, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
2071 }
2072 
2073 void
2074 qwx_pci_set_wlaon_pwr_ctrl(struct qwx_softc *sc)
2075 {
2076 	uint32_t val;
2077 
2078 	val = qwx_pcic_read32(sc, WLAON_QFPROM_PWR_CTRL_REG);
2079 	val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
2080 	qwx_pcic_write32(sc, WLAON_QFPROM_PWR_CTRL_REG, val);
2081 }
2082 
2083 void
2084 qwx_pci_force_wake(struct qwx_softc *sc)
2085 {
2086 	qwx_pcic_write32(sc, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
2087 	DELAY(5 * 1000);
2088 }
2089 
2090 void
2091 qwx_pci_sw_reset(struct qwx_softc *sc, bool power_on)
2092 {
2093 	DELAY(100 * 1000); /* msecs */
2094 
2095 	if (power_on) {
2096 		qwx_pci_enable_ltssm(sc);
2097 		qwx_pci_clear_all_intrs(sc);
2098 		qwx_pci_set_wlaon_pwr_ctrl(sc);
2099 		if (sc->hw_params.fix_l1ss)
2100 			qwx_pci_fix_l1ss(sc);
2101 	}
2102 
2103 	qwx_mhi_clear_vector(sc);
2104 	qwx_pci_clear_dbg_registers(sc);
2105 	qwx_pci_soc_global_reset(sc);
2106 	qwx_mhi_reset_device(sc, 0);
2107 }
2108 
2109 void
2110 qwx_pci_msi_config(struct qwx_softc *sc, bool enable)
2111 {
2112 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2113 	uint32_t val;
2114 
2115 	val = pci_conf_read(psc->sc_pc, psc->sc_tag,
2116 	    psc->sc_msi_off + PCI_MSI_MC);
2117 
2118 	if (enable)
2119 		val |= PCI_MSI_MC_MSIE;
2120 	else
2121 		val &= ~PCI_MSI_MC_MSIE;
2122 
2123 	pci_conf_write(psc->sc_pc, psc->sc_tag,  psc->sc_msi_off + PCI_MSI_MC,
2124 	    val);
2125 }
2126 
2127 void
2128 qwx_pci_msi_enable(struct qwx_softc *sc)
2129 {
2130 	qwx_pci_msi_config(sc, true);
2131 }
2132 
2133 void
2134 qwx_pci_msi_disable(struct qwx_softc *sc)
2135 {
2136 	qwx_pci_msi_config(sc, false);
2137 }
2138 
2139 void
2140 qwx_pci_aspm_disable(struct qwx_softc *sc)
2141 {
2142 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2143 
2144 	psc->sc_lcsr = pci_conf_read(psc->sc_pc, psc->sc_tag,
2145 	    psc->sc_cap_off + PCI_PCIE_LCSR);
2146 
2147 	DPRINTF("%s: pci link_ctl 0x%04x L0s %d L1 %d\n", sc->sc_dev.dv_xname,
2148 	    (uint16_t)psc->sc_lcsr, (psc->sc_lcsr & PCI_PCIE_LCSR_ASPM_L0S),
2149 	    (psc->sc_lcsr & PCI_PCIE_LCSR_ASPM_L1));
2150 
2151 	/* disable L0s and L1 */
2152 	pci_conf_write(psc->sc_pc, psc->sc_tag, psc->sc_cap_off + PCI_PCIE_LCSR,
2153 	    psc->sc_lcsr & ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1));
2154 
2155 	psc->sc_flags |= ATH11K_PCI_ASPM_RESTORE;
2156 }
2157 
2158 void
2159 qwx_pci_aspm_restore(struct qwx_softc *sc)
2160 {
2161 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2162 
2163 	if (psc->sc_flags & ATH11K_PCI_ASPM_RESTORE) {
2164 		pci_conf_write(psc->sc_pc, psc->sc_tag,
2165 		    psc->sc_cap_off + PCI_PCIE_LCSR, psc->sc_lcsr);
2166 		psc->sc_flags &= ~ATH11K_PCI_ASPM_RESTORE;
2167 	}
2168 }
2169 
2170 int
2171 qwx_pci_power_up(struct qwx_softc *sc)
2172 {
2173 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2174 	int error;
2175 
2176 	psc->register_window = 0;
2177 	clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags);
2178 
2179 	qwx_pci_sw_reset(sc, true);
2180 
2181 	/* Disable ASPM during firmware download due to problems switching
2182 	 * to AMSS state.
2183 	 */
2184 	qwx_pci_aspm_disable(sc);
2185 
2186 	qwx_pci_msi_enable(sc);
2187 
2188 	error = qwx_mhi_start(psc);
2189 	if (error)
2190 		return error;
2191 
2192 	if (sc->hw_params.static_window_map)
2193 		qwx_pci_select_static_window(sc);
2194 
2195 	return 0;
2196 }
2197 
2198 void
2199 qwx_pci_power_down(struct qwx_softc *sc)
2200 {
2201 	/* restore aspm in case firmware bootup fails */
2202 	qwx_pci_aspm_restore(sc);
2203 
2204 	qwx_pci_force_wake(sc);
2205 
2206 	qwx_pci_msi_disable(sc);
2207 
2208 	qwx_mhi_stop(sc);
2209 	clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, sc->sc_flags);
2210 	qwx_pci_sw_reset(sc, false);
2211 }
2212 
2213 /*
2214  * MHI
2215  */
2216 int
2217 qwx_mhi_register(struct qwx_softc *sc)
2218 {
2219 	DNPRINTF(QWX_D_MHI, "%s: STUB %s()\n", sc->sc_dev.dv_xname, __func__);
2220 	return 0;
2221 }
2222 
2223 void
2224 qwx_mhi_unregister(struct qwx_softc *sc)
2225 {
2226 	DNPRINTF(QWX_D_MHI, "%s: STUB %s()\n", sc->sc_dev.dv_xname, __func__);
2227 }
2228 
2229 // XXX MHI is GPLd - we provide a compatible bare-bones implementation
2230 #define MHI_CFG				0x10
2231 #define   MHI_CFG_NHWER_MASK		GENMASK(31, 24)
2232 #define   MHI_CFG_NHWER_SHFT		24
2233 #define   MHI_CFG_NER_MASK		GENMASK(23, 16)
2234 #define   MHI_CFG_NER_SHFT		16
2235 #define   MHI_CFG_NHWCH_MASK		GENMASK(15, 8)
2236 #define   MHI_CFG_NHWCH_SHFT		8
2237 #define   MHI_CFG_NCH_MASK		GENMASK(7, 0)
2238 #define MHI_CHDBOFF			0x18
2239 #define MHI_DEV_WAKE_DB			127
2240 #define MHI_ERDBOFF			0x20
2241 #define MHI_BHI_OFFSET			0x28
2242 #define   MHI_BHI_IMGADDR_LOW			0x08
2243 #define   MHI_BHI_IMGADDR_HIGH			0x0c
2244 #define   MHI_BHI_IMGSIZE			0x10
2245 #define   MHI_BHI_IMGTXDB			0x18
2246 #define   MHI_BHI_INTVEC			0x20
2247 #define   MHI_BHI_EXECENV			0x28
2248 #define   MHI_BHI_STATUS			0x2c
2249 #define	  MHI_BHI_SERIALNU			0x40
2250 #define MHI_BHIE_OFFSET			0x2c
2251 #define   MHI_BHIE_TXVECADDR_LOW_OFFS		0x2c
2252 #define   MHI_BHIE_TXVECADDR_HIGH_OFFS		0x30
2253 #define   MHI_BHIE_TXVECSIZE_OFFS		0x34
2254 #define   MHI_BHIE_TXVECDB_OFFS			0x3c
2255 #define   MHI_BHIE_TXVECSTATUS_OFFS		0x44
2256 #define   MHI_BHIE_RXVECADDR_LOW_OFFS		0x60
2257 #define   MHI_BHIE_RXVECSTATUS_OFFS		0x78
2258 #define MHI_CTRL			0x38
2259 #define    MHI_CTRL_READY_MASK			0x1
2260 #define    MHI_CTRL_RESET_MASK			0x2
2261 #define    MHI_CTRL_MHISTATE_MASK		GENMASK(15, 8)
2262 #define    MHI_CTRL_MHISTATE_SHFT		8
2263 #define MHI_STATUS			0x48
2264 #define    MHI_STATUS_MHISTATE_MASK		GENMASK(15, 8)
2265 #define    MHI_STATUS_MHISTATE_SHFT		8
2266 #define        MHI_STATE_RESET			0x0
2267 #define        MHI_STATE_READY			0x1
2268 #define        MHI_STATE_M0			0x2
2269 #define        MHI_STATE_M1			0x3
2270 #define        MHI_STATE_M2			0x4
2271 #define        MHI_STATE_M3			0x5
2272 #define        MHI_STATE_M3_FAST		0x6
2273 #define        MHI_STATE_BHI			0x7
2274 #define        MHI_STATE_SYS_ERR		0xff
2275 #define    MHI_STATUS_READY_MASK		0x1
2276 #define    MHI_STATUS_SYSERR_MASK		0x4
2277 #define MHI_CCABAP_LOWER		0x58
2278 #define MHI_CCABAP_HIGHER		0x5c
2279 #define MHI_ECABAP_LOWER		0x60
2280 #define MHI_ECABAP_HIGHER		0x64
2281 #define MHI_CRCBAP_LOWER		0x68
2282 #define MHI_CRCBAP_HIGHER		0x6c
2283 #define MHI_CRDB_LOWER			0x70
2284 #define MHI_CRDB_HIGHER			0x74
2285 #define MHI_CTRLBASE_LOWER		0x80
2286 #define MHI_CTRLBASE_HIGHER		0x84
2287 #define MHI_CTRLLIMIT_LOWER		0x88
2288 #define MHI_CTRLLIMIT_HIGHER		0x8c
2289 #define MHI_DATABASE_LOWER		0x98
2290 #define MHI_DATABASE_HIGHER		0x9c
2291 #define MHI_DATALIMIT_LOWER		0xa0
2292 #define MHI_DATALIMIT_HIGHER		0xa4
2293 
2294 #define MHI_EE_PBL	0x0	/* Primary Bootloader */
2295 #define MHI_EE_SBL	0x1	/* Secondary Bootloader */
2296 #define MHI_EE_AMSS	0x2	/* Modem, aka the primary runtime EE */
2297 #define MHI_EE_RDDM	0x3	/* Ram dump download mode */
2298 #define MHI_EE_WFW	0x4	/* WLAN firmware mode */
2299 #define MHI_EE_PTHRU	0x5	/* Passthrough */
2300 #define MHI_EE_EDL	0x6	/* Embedded downloader */
2301 #define MHI_EE_FP	0x7	/* Flash Programmer Environment */
2302 
2303 #define MHI_IN_PBL(e) (e == MHI_EE_PBL || e == MHI_EE_PTHRU || e == MHI_EE_EDL)
2304 #define MHI_POWER_UP_CAPABLE(e) (MHI_IN_PBL(e) || e == MHI_EE_AMSS)
2305 #define MHI_IN_MISSION_MODE(e) \
2306 	(e == MHI_EE_AMSS || e == MHI_EE_WFW || e == MHI_EE_FP)
2307 
2308 /* BHI register bits */
2309 #define MHI_BHI_TXDB_SEQNUM_BMSK	GENMASK(29, 0)
2310 #define MHI_BHI_TXDB_SEQNUM_SHFT	0
2311 #define MHI_BHI_STATUS_MASK		GENMASK(31, 30)
2312 #define MHI_BHI_STATUS_SHFT		30
2313 #define MHI_BHI_STATUS_ERROR		0x03
2314 #define MHI_BHI_STATUS_SUCCESS		0x02
2315 #define MHI_BHI_STATUS_RESET		0x00
2316 
2317 /* MHI BHIE registers */
2318 #define MHI_BHIE_MSMSOCID_OFFS		0x00
2319 #define MHI_BHIE_RXVECADDR_LOW_OFFS	0x60
2320 #define MHI_BHIE_RXVECADDR_HIGH_OFFS	0x64
2321 #define MHI_BHIE_RXVECSIZE_OFFS		0x68
2322 #define MHI_BHIE_RXVECDB_OFFS		0x70
2323 #define MHI_BHIE_RXVECSTATUS_OFFS	0x78
2324 
2325 /* BHIE register bits */
2326 #define MHI_BHIE_TXVECDB_SEQNUM_BMSK		GENMASK(29, 0)
2327 #define MHI_BHIE_TXVECDB_SEQNUM_SHFT		0
2328 #define MHI_BHIE_TXVECSTATUS_SEQNUM_BMSK	GENMASK(29, 0)
2329 #define MHI_BHIE_TXVECSTATUS_SEQNUM_SHFT	0
2330 #define MHI_BHIE_TXVECSTATUS_STATUS_BMSK	GENMASK(31, 30)
2331 #define MHI_BHIE_TXVECSTATUS_STATUS_SHFT	30
2332 #define MHI_BHIE_TXVECSTATUS_STATUS_RESET	0x00
2333 #define MHI_BHIE_TXVECSTATUS_STATUS_XFER_COMPL	0x02
2334 #define MHI_BHIE_TXVECSTATUS_STATUS_ERROR	0x03
2335 #define MHI_BHIE_RXVECDB_SEQNUM_BMSK		GENMASK(29, 0)
2336 #define MHI_BHIE_RXVECDB_SEQNUM_SHFT		0
2337 #define MHI_BHIE_RXVECSTATUS_SEQNUM_BMSK	GENMASK(29, 0)
2338 #define MHI_BHIE_RXVECSTATUS_SEQNUM_SHFT	0
2339 #define MHI_BHIE_RXVECSTATUS_STATUS_BMSK	GENMASK(31, 30)
2340 #define MHI_BHIE_RXVECSTATUS_STATUS_SHFT	30
2341 #define MHI_BHIE_RXVECSTATUS_STATUS_RESET	0x00
2342 #define MHI_BHIE_RXVECSTATUS_STATUS_XFER_COMPL	0x02
2343 #define MHI_BHIE_RXVECSTATUS_STATUS_ERROR	0x03
2344 
2345 #define MHI_EV_CC_INVALID	0x0
2346 #define MHI_EV_CC_SUCCESS	0x1
2347 #define MHI_EV_CC_EOT		0x2
2348 #define MHI_EV_CC_OVERFLOW	0x3
2349 #define MHI_EV_CC_EOB		0x4
2350 #define MHI_EV_CC_OOB		0x5
2351 #define MHI_EV_CC_DB_MODE	0x6
2352 #define MHI_EV_CC_UNDEFINED_ERR	0x10
2353 #define MHI_EV_CC_BAD_TRE	0x11
2354 
2355 #define MHI_CMD_NOP		01
2356 #define MHI_CMD_RESET_CHAN	16
2357 #define MHI_CMD_STOP_CHAN	17
2358 #define MHI_CMD_START_CHAN	18
2359 
2360 #define MHI_TRE_CMD_CHID_MASK	GENMASK(31, 24)
2361 #define MHI_TRE_CMD_CHID_SHFT	24
2362 #define MHI_TRE_CMD_CMDID_MASK	GENMASK(23, 16)
2363 #define MHI_TRE_CMD_CMDID_SHFT	16
2364 
2365 #define MHI_TRE0_EV_LEN_MASK	GENMASK(15, 0)
2366 #define MHI_TRE0_EV_LEN_SHFT	0
2367 #define MHI_TRE0_EV_CODE_MASK	GENMASK(31, 24)
2368 #define MHI_TRE0_EV_CODE_SHFT	24
2369 #define MHI_TRE1_EV_TYPE_MASK	GENMASK(23, 16)
2370 #define MHI_TRE1_EV_TYPE_SHFT	16
2371 #define MHI_TRE1_EV_CHID_MASK	GENMASK(31, 24)
2372 #define MHI_TRE1_EV_CHID_SHFT	24
2373 
2374 #define MHI_TRE0_DATA_LEN_MASK	GENMASK(15, 0)
2375 #define MHI_TRE0_DATA_LEN_SHFT	0
2376 #define MHI_TRE1_DATA_CHAIN	(1 << 0)
2377 #define MHI_TRE1_DATA_IEOB	(1 << 8)
2378 #define MHI_TRE1_DATA_IEOT	(1 << 9)
2379 #define MHI_TRE1_DATA_BEI	(1 << 10)
2380 #define MHI_TRE1_DATA_TYPE_MASK		GENMASK(23, 16)
2381 #define MHI_TRE1_DATA_TYPE_SHIFT	16
2382 #define MHI_TRE1_DATA_TYPE_TRANSFER	0x2
2383 
2384 #define MHI_PKT_TYPE_INVALID			0x00
2385 #define MHI_PKT_TYPE_NOOP_CMD			0x01
2386 #define MHI_PKT_TYPE_TRANSFER			0x02
2387 #define MHI_PKT_TYPE_COALESCING			0x08
2388 #define MHI_PKT_TYPE_RESET_CHAN_CMD		0x10
2389 #define MHI_PKT_TYPE_STOP_CHAN_CMD		0x11
2390 #define MHI_PKT_TYPE_START_CHAN_CMD		0x12
2391 #define MHI_PKT_TYPE_STATE_CHANGE_EVENT		0x20
2392 #define MHI_PKT_TYPE_CMD_COMPLETION_EVENT	0x21
2393 #define MHI_PKT_TYPE_TX_EVENT			0x22
2394 #define MHI_PKT_TYPE_RSC_TX_EVENT		0x28
2395 #define MHI_PKT_TYPE_EE_EVENT			0x40
2396 #define MHI_PKT_TYPE_TSYNC_EVENT		0x48
2397 #define MHI_PKT_TYPE_BW_REQ_EVENT		0x50
2398 
2399 
2400 #define MHI_DMA_VEC_CHUNK_SIZE			524288 /* 512 KB */
2401 struct qwx_dma_vec_entry {
2402 	uint64_t paddr;
2403 	uint64_t size;
2404 };
2405 
2406 void
2407 qwx_mhi_ring_doorbell(struct qwx_softc *sc, uint64_t db_addr, uint64_t val)
2408 {
2409 	qwx_pci_write(sc, db_addr + 4, val >> 32);
2410 	qwx_pci_write(sc, db_addr, val & 0xffffffff);
2411 }
2412 
2413 void
2414 qwx_mhi_device_wake(struct qwx_softc *sc)
2415 {
2416 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2417 
2418 	/*
2419 	 * Device wake is async only for now because we do not
2420 	 * keep track of PM state in software.
2421 	 */
2422 	qwx_mhi_ring_doorbell(sc, psc->wake_db, 1);
2423 }
2424 
2425 void
2426 qwx_mhi_device_zzz(struct qwx_softc *sc)
2427 {
2428 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2429 
2430 	qwx_mhi_ring_doorbell(sc, psc->wake_db, 0);
2431 }
2432 
2433 int
2434 qwx_mhi_wake_db_clear_valid(struct qwx_softc *sc)
2435 {
2436 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2437 
2438 	return (psc->mhi_state == MHI_STATE_M0); /* TODO other states? */
2439 }
2440 
2441 void
2442 qwx_mhi_init_xfer_rings(struct qwx_pci_softc *psc)
2443 {
2444 	struct qwx_softc *sc = &psc->sc_sc;
2445 	int i;
2446 	uint32_t chcfg;
2447 	struct qwx_pci_xfer_ring *ring;
2448 	struct qwx_mhi_chan_ctxt *cbase, *c;
2449 
2450 	cbase = (struct qwx_mhi_chan_ctxt *)QWX_DMA_KVA(psc->chan_ctxt);
2451 	for (i = 0; i < psc->max_chan; i++) {
2452 		c = &cbase[i];
2453 		chcfg = le32toh(c->chcfg);
2454 		chcfg &= ~(MHI_CHAN_CTX_CHSTATE_MASK |
2455 		    MHI_CHAN_CTX_BRSTMODE_MASK |
2456 		    MHI_CHAN_CTX_POLLCFG_MASK);
2457 		chcfg |= (MHI_CHAN_CTX_CHSTATE_DISABLED |
2458 		    (MHI_CHAN_CTX_BRSTMODE_DISABLE <<
2459 		    MHI_CHAN_CTX_BRSTMODE_SHFT));
2460 		c->chcfg = htole32(chcfg);
2461 		c->chtype = htole32(MHI_CHAN_TYPE_INVALID);
2462 		c->erindex = 0;
2463 	}
2464 
2465 	for (i = 0; i < nitems(psc->xfer_rings); i++) {
2466 		ring = &psc->xfer_rings[i];
2467 		KASSERT(ring->mhi_chan_id < psc->max_chan);
2468 		c = &cbase[ring->mhi_chan_id];
2469 		c->chtype = htole32(ring->mhi_chan_direction);
2470 		c->erindex = htole32(ring->mhi_chan_event_ring_index);
2471 		ring->chan_ctxt = c;
2472 	}
2473 
2474 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0,
2475 	    QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
2476 }
2477 
2478 void
2479 qwx_mhi_init_event_rings(struct qwx_pci_softc *psc)
2480 {
2481 	struct qwx_softc *sc = &psc->sc_sc;
2482 	int i;
2483 	uint32_t intmod;
2484 	uint64_t paddr, len;
2485 	struct qwx_pci_event_ring *ring;
2486 	struct qwx_mhi_event_ctxt *c;
2487 
2488 	c = (struct qwx_mhi_event_ctxt *)QWX_DMA_KVA(psc->event_ctxt);
2489 	for (i = 0; i < nitems(psc->event_rings); i++, c++) {
2490 		ring = &psc->event_rings[i];
2491 
2492 		ring->event_ctxt = c;
2493 
2494 		intmod = le32toh(c->intmod);
2495 		intmod &= ~(MHI_EV_CTX_INTMODC_MASK | MHI_EV_CTX_INTMODT_MASK);
2496 		intmod |= (ring->mhi_er_irq_moderation_ms <<
2497 		    MHI_EV_CTX_INTMODT_SHFT) & MHI_EV_CTX_INTMODT_MASK;
2498 		c->intmod = htole32(intmod);
2499 
2500 		c->ertype = htole32(MHI_ER_TYPE_VALID);
2501 		c->msivec = htole32(ring->mhi_er_irq);
2502 
2503 		paddr = QWX_DMA_DVA(ring->dmamem);
2504 		ring->rp = paddr;
2505 		ring->wp = paddr + ring->size -
2506 		    sizeof(struct qwx_mhi_ring_element);
2507 		c->rbase = htole64(paddr);
2508 		c->rp = htole64(ring->rp);
2509 		c->wp = htole64(ring->wp);
2510 
2511 		len = sizeof(struct qwx_mhi_ring_element) * ring->num_elements;
2512 		c->rlen = htole64(len);
2513 	}
2514 
2515 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
2516 	    QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
2517 }
2518 
2519 void
2520 qwx_mhi_init_cmd_ring(struct qwx_pci_softc *psc)
2521 {
2522 	struct qwx_softc *sc = &psc->sc_sc;
2523 	struct qwx_pci_cmd_ring *ring = &psc->cmd_ring;
2524 	struct qwx_mhi_cmd_ctxt *c;
2525 	uint64_t paddr, len;
2526 
2527 	paddr = QWX_DMA_DVA(ring->dmamem);
2528 	len = ring->size;
2529 
2530 	ring->rp = ring->wp = paddr;
2531 
2532 	c = (struct qwx_mhi_cmd_ctxt *)QWX_DMA_KVA(psc->cmd_ctxt);
2533 	c->rbase = htole64(paddr);
2534 	c->rp = htole64(paddr);
2535 	c->wp = htole64(paddr);
2536 	c->rlen = htole64(len);
2537 
2538 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0,
2539 	    QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_PREWRITE);
2540 }
2541 
2542 void
2543 qwx_mhi_init_dev_ctxt(struct qwx_pci_softc *psc)
2544 {
2545 	qwx_mhi_init_xfer_rings(psc);
2546 	qwx_mhi_init_event_rings(psc);
2547 	qwx_mhi_init_cmd_ring(psc);
2548 }
2549 
2550 void *
2551 qwx_pci_cmd_ring_get_elem(struct qwx_pci_cmd_ring *ring, uint64_t ptr)
2552 {
2553 	uint64_t base = QWX_DMA_DVA(ring->dmamem), offset;
2554 
2555 	if (ptr < base || ptr >= base + ring->size)
2556 		return NULL;
2557 
2558 	offset = ptr - base;
2559 	if (offset >= ring->size)
2560 		return NULL;
2561 
2562 	return QWX_DMA_KVA(ring->dmamem) + offset;
2563 }
2564 
2565 int
2566 qwx_mhi_cmd_ring_submit(struct qwx_pci_softc *psc,
2567     struct qwx_pci_cmd_ring *ring)
2568 {
2569 	struct qwx_softc *sc = &psc->sc_sc;
2570 	uint64_t base = QWX_DMA_DVA(ring->dmamem);
2571 	struct qwx_mhi_cmd_ctxt *c;
2572 
2573 	if (ring->queued >= ring->num_elements)
2574 		return 1;
2575 
2576 	if (ring->wp + sizeof(struct qwx_mhi_ring_element) >= base + ring->size)
2577 		ring->wp = base;
2578 	else
2579 		ring->wp += sizeof(struct qwx_mhi_ring_element);
2580 
2581 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0,
2582 	    QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_POSTREAD);
2583 
2584 	c = (struct qwx_mhi_cmd_ctxt *)QWX_DMA_KVA(psc->cmd_ctxt);
2585 	c->wp = htole64(ring->wp);
2586 
2587 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->cmd_ctxt), 0,
2588 	    QWX_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_PREWRITE);
2589 
2590 	ring->queued++;
2591 	qwx_mhi_ring_doorbell(sc, MHI_CRDB_LOWER, ring->wp);
2592 	return 0;
2593 }
2594 
2595 int
2596 qwx_mhi_send_cmd(struct qwx_pci_softc *psc, uint32_t cmd, uint32_t chan)
2597 {
2598 	struct qwx_softc *sc = &psc->sc_sc;
2599 	struct qwx_pci_cmd_ring	*ring = &psc->cmd_ring;
2600 	struct qwx_mhi_ring_element *e;
2601 
2602 	if (ring->queued >= ring->num_elements) {
2603 		printf("%s: command ring overflow\n", sc->sc_dev.dv_xname);
2604 		return 1;
2605 	}
2606 
2607 	e = qwx_pci_cmd_ring_get_elem(ring, ring->wp);
2608 	if (e == NULL)
2609 		return 1;
2610 
2611 	e->ptr = 0ULL;
2612 	e->dword[0] = 0;
2613 	e->dword[1] = htole32(
2614 	    ((chan << MHI_TRE_CMD_CHID_SHFT) & MHI_TRE_CMD_CHID_MASK) |
2615 	    ((cmd << MHI_TRE_CMD_CMDID_SHFT) & MHI_TRE_CMD_CMDID_MASK));
2616 
2617 	return qwx_mhi_cmd_ring_submit(psc, ring);
2618 }
2619 
2620 void *
2621 qwx_pci_xfer_ring_get_elem(struct qwx_pci_xfer_ring *ring, uint64_t wp)
2622 {
2623 	uint64_t base = QWX_DMA_DVA(ring->dmamem), offset;
2624 	void *addr = QWX_DMA_KVA(ring->dmamem);
2625 
2626 	if (wp < base)
2627 		return NULL;
2628 
2629 	offset = wp - base;
2630 	if (offset >= ring->size)
2631 		return NULL;
2632 
2633 	return addr + offset;
2634 }
2635 
2636 struct qwx_xfer_data *
2637 qwx_pci_xfer_ring_get_data(struct qwx_pci_xfer_ring *ring, uint64_t wp)
2638 {
2639 	uint64_t base = QWX_DMA_DVA(ring->dmamem), offset;
2640 
2641 	if (wp < base)
2642 		return NULL;
2643 
2644 	offset = wp - base;
2645 	if (offset >= ring->size)
2646 		return NULL;
2647 
2648 	return &ring->data[offset / sizeof(ring->data[0])];
2649 }
2650 
2651 int
2652 qwx_mhi_submit_xfer(struct qwx_softc *sc, struct mbuf *m)
2653 {
2654 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2655 	struct qwx_pci_xfer_ring *ring;
2656 	struct qwx_mhi_ring_element *e;
2657 	struct qwx_xfer_data *xfer;
2658 	uint64_t paddr, base;
2659 	int err;
2660 
2661 	ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND];
2662 
2663 	if (ring->queued >= ring->num_elements)
2664 		return 1;
2665 
2666 	if (m->m_pkthdr.len > QWX_PCI_XFER_MAX_DATA_SIZE) {
2667 		/* TODO: chunk xfers */
2668 		printf("%s: xfer too large: %d bytes\n", __func__, m->m_pkthdr.len);
2669 		return 1;
2670 
2671 	}
2672 
2673 	e = qwx_pci_xfer_ring_get_elem(ring, ring->wp);
2674 	if (e == NULL)
2675 		return 1;
2676 
2677 	xfer = qwx_pci_xfer_ring_get_data(ring, ring->wp);
2678 	if (xfer == NULL || xfer->m != NULL)
2679 		return 1;
2680 
2681 	err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map, m,
2682 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
2683 	if (err && err != EFBIG) {
2684 		printf("%s: can't map mbuf (error %d)\n",
2685 		    sc->sc_dev.dv_xname, err);
2686 		return err;
2687 	}
2688 	if (err) {
2689 		/* Too many DMA segments, linearize mbuf. */
2690 		if (m_defrag(m, M_DONTWAIT))
2691 			return ENOBUFS;
2692 		err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map, m,
2693 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
2694 		if (err) {
2695 			printf("%s: can't map mbuf (error %d)\n",
2696 			    sc->sc_dev.dv_xname, err);
2697 			return err;
2698 		}
2699 	}
2700 
2701 	bus_dmamap_sync(sc->sc_dmat, xfer->map, 0, m->m_pkthdr.len,
2702 	    BUS_DMASYNC_PREWRITE);
2703 
2704 	xfer->m = m;
2705 	paddr = xfer->map->dm_segs[0].ds_addr;
2706 
2707 	e->ptr = htole64(paddr);
2708 	e->dword[0] = htole32((m->m_pkthdr.len << MHI_TRE0_DATA_LEN_SHFT) &
2709 	    MHI_TRE0_DATA_LEN_MASK);
2710 	e->dword[1] = htole32(MHI_TRE1_DATA_IEOT |
2711 	    MHI_TRE1_DATA_TYPE_TRANSFER << MHI_TRE1_DATA_TYPE_SHIFT);
2712 
2713 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(ring->dmamem),
2714 	    0, QWX_DMA_LEN(ring->dmamem), BUS_DMASYNC_PREWRITE);
2715 
2716 	base = QWX_DMA_DVA(ring->dmamem);
2717 	if (ring->wp + sizeof(struct qwx_mhi_ring_element) >= base + ring->size)
2718 		ring->wp = base;
2719 	else
2720 		ring->wp += sizeof(struct qwx_mhi_ring_element);
2721 	ring->queued++;
2722 
2723 	ring->chan_ctxt->wp = htole64(ring->wp);
2724 
2725 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0,
2726 	    QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
2727 
2728 	qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
2729 	return 0;
2730 }
2731 
2732 int
2733 qwx_mhi_start_channel(struct qwx_pci_softc *psc,
2734 	struct qwx_pci_xfer_ring *ring)
2735 {
2736 	struct qwx_softc *sc = &psc->sc_sc;
2737 	struct qwx_mhi_chan_ctxt *c;
2738 	int ret = 0;
2739 	uint32_t chcfg;
2740 	uint64_t paddr, len;
2741 
2742 	DNPRINTF(QWX_D_MHI, "%s: start MHI channel %d in state %d\n", __func__,
2743 	    ring->mhi_chan_id, ring->mhi_chan_state);
2744 
2745 	c = ring->chan_ctxt;
2746 
2747 	chcfg = le32toh(c->chcfg);
2748 	chcfg &= ~MHI_CHAN_CTX_CHSTATE_MASK;
2749 	chcfg |= MHI_CHAN_CTX_CHSTATE_ENABLED;
2750 	c->chcfg = htole32(chcfg);
2751 
2752 	paddr = QWX_DMA_DVA(ring->dmamem);
2753 	ring->rp = ring->wp = paddr;
2754 	c->rbase = htole64(paddr);
2755 	c->rp = htole64(ring->rp);
2756 	c->wp = htole64(ring->wp);
2757 	len = sizeof(struct qwx_mhi_ring_element) * ring->num_elements;
2758 	c->rlen = htole64(len);
2759 
2760 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->chan_ctxt), 0,
2761 	    QWX_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
2762 
2763 	ring->cmd_status = MHI_EV_CC_INVALID;
2764 	if (qwx_mhi_send_cmd(psc, MHI_CMD_START_CHAN, ring->mhi_chan_id))
2765 		return 1;
2766 
2767 	while (ring->cmd_status != MHI_EV_CC_SUCCESS) {
2768 		ret = tsleep_nsec(&ring->cmd_status, 0, "qwxcmd",
2769 		    SEC_TO_NSEC(5));
2770 		if (ret)
2771 			break;
2772 	}
2773 
2774 	if (ret) {
2775 		printf("%s: could not start MHI channel %d in state %d: status 0x%x\n",
2776 		    sc->sc_dev.dv_xname, ring->mhi_chan_id,
2777 		    ring->mhi_chan_state, ring->cmd_status);
2778 		return 1;
2779 	}
2780 
2781 	if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) {
2782 		uint64_t wp = QWX_DMA_DVA(ring->dmamem);
2783 		int i;
2784 
2785 		for (i = 0; i < ring->num_elements; i++) {
2786 			struct qwx_mhi_ring_element *e;
2787 			struct qwx_xfer_data *xfer;
2788 			uint64_t paddr;
2789 
2790 			e = qwx_pci_xfer_ring_get_elem(ring, wp);
2791 			xfer = qwx_pci_xfer_ring_get_data(ring, wp);
2792 			paddr = xfer->map->dm_segs[0].ds_addr;
2793 
2794 			e->ptr = htole64(paddr);
2795 			e->dword[0] = htole32((QWX_PCI_XFER_MAX_DATA_SIZE <<
2796 			    MHI_TRE0_DATA_LEN_SHFT) &
2797 			    MHI_TRE0_DATA_LEN_MASK);
2798 			e->dword[1] = htole32(MHI_TRE1_DATA_IEOT |
2799 			    MHI_TRE1_DATA_BEI |
2800 			    MHI_TRE1_DATA_TYPE_TRANSFER <<
2801 			    MHI_TRE1_DATA_TYPE_SHIFT);
2802 
2803 			ring->wp = wp;
2804 			wp += sizeof(*e);
2805 		}
2806 
2807 		bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(ring->dmamem), 0,
2808 		    QWX_DMA_LEN(ring->dmamem), BUS_DMASYNC_PREWRITE);
2809 
2810 		qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
2811 	}
2812 
2813 	return 0;
2814 }
2815 
2816 int
2817 qwx_mhi_start_channels(struct qwx_pci_softc *psc)
2818 {
2819 	struct qwx_pci_xfer_ring *ring;
2820 	int ret = 0;
2821 
2822 	qwx_mhi_device_wake(&psc->sc_sc);
2823 
2824 	ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_OUTBOUND];
2825 	if (qwx_mhi_start_channel(psc, ring)) {
2826 		ret = 1;
2827 		goto done;
2828 	}
2829 
2830 	ring = &psc->xfer_rings[QWX_PCI_XFER_RING_IPCR_INBOUND];
2831 	if (qwx_mhi_start_channel(psc, ring))
2832 		ret = 1;
2833 done:
2834 	qwx_mhi_device_zzz(&psc->sc_sc);
2835 	return ret;
2836 }
2837 
2838 int
2839 qwx_mhi_start(struct qwx_pci_softc *psc)
2840 {
2841 	struct qwx_softc *sc = &psc->sc_sc;
2842 	uint32_t off;
2843 	uint32_t ee, state;
2844 	int ret;
2845 
2846 	qwx_mhi_init_dev_ctxt(psc);
2847 
2848 	psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET);
2849 	DNPRINTF(QWX_D_MHI, "%s: BHI offset 0x%x\n", __func__, psc->bhi_off);
2850 
2851 	psc->bhie_off = qwx_pci_read(sc, MHI_BHIE_OFFSET);
2852 	DNPRINTF(QWX_D_MHI, "%s: BHIE offset 0x%x\n", __func__, psc->bhie_off);
2853 
2854 	/* Clean BHIE RX registers */
2855 	for (off = MHI_BHIE_RXVECADDR_LOW_OFFS;
2856 	     off < (MHI_BHIE_RXVECSTATUS_OFFS - 4);
2857 	     off += 4)
2858 	     	qwx_pci_write(sc, psc->bhie_off + off, 0x0);
2859 
2860 	qwx_rddm_prepare(psc);
2861 
2862 	/* Program BHI INTVEC */
2863 	qwx_pci_write(sc, psc->bhi_off + MHI_BHI_INTVEC, 0x00);
2864 
2865 	/*
2866 	 * Get BHI execution environment and confirm that it is valid
2867 	 * for power on.
2868 	 */
2869 	ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
2870 	if (!MHI_POWER_UP_CAPABLE(ee)) {
2871 		printf("%s: invalid EE for power on: 0x%x\n",
2872 		     sc->sc_dev.dv_xname, ee);
2873 		return 1;
2874 	}
2875 
2876 	/*
2877 	 * Get MHI state of the device and reset it if it is in system
2878 	 * error.
2879 	 */
2880 	state = qwx_pci_read(sc, MHI_STATUS);
2881 	DNPRINTF(QWX_D_MHI, "%s: MHI power on with EE: 0x%x, status: 0x%x\n",
2882 	     sc->sc_dev.dv_xname, ee, state);
2883 	state = (state & MHI_STATUS_MHISTATE_MASK) >> MHI_STATUS_MHISTATE_SHFT;
2884 	if (state == MHI_STATE_SYS_ERR) {
2885 		if (qwx_mhi_reset_device(sc, 0))
2886 			return 1;
2887 		state = qwx_pci_read(sc, MHI_STATUS);
2888 		DNPRINTF(QWX_D_MHI, "%s: MHI state after reset: 0x%x\n",
2889 		    sc->sc_dev.dv_xname, state);
2890 		state = (state & MHI_STATUS_MHISTATE_MASK) >>
2891 		    MHI_STATUS_MHISTATE_SHFT;
2892 		if (state == MHI_STATE_SYS_ERR) {
2893 			printf("%s: MHI stuck in system error state\n",
2894 			    sc->sc_dev.dv_xname);
2895 			return 1;
2896 		}
2897 	}
2898 
2899 	psc->bhi_ee = ee;
2900 	psc->mhi_state = state;
2901 
2902 #if notyet
2903 	/* Enable IRQs */
2904 	//  XXX todo?
2905 #endif
2906 
2907 	/* Transition to primary runtime. */
2908 	if (MHI_IN_PBL(ee)) {
2909 		ret = qwx_mhi_fw_load_handler(psc);
2910 		if (ret)
2911 			return ret;
2912 
2913 		/* XXX without this delay starting the channels may fail */
2914 		delay(1000);
2915 		qwx_mhi_start_channels(psc);
2916 	} else {
2917 		/* XXX Handle partially initialized device...?!? */
2918 		ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
2919 		if (!MHI_IN_MISSION_MODE(ee)) {
2920 			printf("%s: failed to power up MHI, ee=0x%x\n",
2921 			    sc->sc_dev.dv_xname, ee);
2922 			return EIO;
2923 		}
2924 	}
2925 
2926 	return 0;
2927 }
2928 
2929 void
2930 qwx_mhi_stop(struct qwx_softc *sc)
2931 {
2932 	qwx_mhi_reset_device(sc, 1);
2933 }
2934 
2935 int
2936 qwx_mhi_reset_device(struct qwx_softc *sc, int force)
2937 {
2938 	struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
2939 	uint32_t reg;
2940 	int ret = 0;
2941 
2942 	reg = qwx_pcic_read32(sc, MHI_STATUS);
2943 
2944 	DNPRINTF(QWX_D_MHI, "%s: MHISTATUS 0x%x\n", sc->sc_dev.dv_xname, reg);
2945 	/*
2946 	 * Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
2947 	 * has SYSERR bit set and thus need to set MHICTRL_RESET
2948 	 * to clear SYSERR.
2949 	 */
2950 	if (force || (reg & MHI_STATUS_SYSERR_MASK)) {
2951 		/* Trigger MHI Reset in device. */
2952 		qwx_pcic_write32(sc, MHI_CTRL, MHI_CTRL_RESET_MASK);
2953 
2954 		/* Wait for the reset bit to be cleared by the device. */
2955 		ret = qwx_mhi_await_device_reset(sc);
2956 		if (ret)
2957 			return ret;
2958 
2959 		if (psc->bhi_off == 0)
2960 			psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET);
2961 
2962 		/* Device clear BHI INTVEC so re-program it. */
2963 		qwx_pci_write(sc, psc->bhi_off + MHI_BHI_INTVEC, 0x00);
2964 	}
2965 
2966 	return 0;
2967 }
2968 
2969 static inline void
2970 qwx_mhi_reset_txvecdb(struct qwx_softc *sc)
2971 {
2972 	qwx_pcic_write32(sc, PCIE_TXVECDB, 0);
2973 }
2974 
2975 static inline void
2976 qwx_mhi_reset_txvecstatus(struct qwx_softc *sc)
2977 {
2978 	qwx_pcic_write32(sc, PCIE_TXVECSTATUS, 0);
2979 }
2980 
2981 static inline void
2982 qwx_mhi_reset_rxvecdb(struct qwx_softc *sc)
2983 {
2984 	qwx_pcic_write32(sc, PCIE_RXVECDB, 0);
2985 }
2986 
2987 static inline void
2988 qwx_mhi_reset_rxvecstatus(struct qwx_softc *sc)
2989 {
2990 	qwx_pcic_write32(sc, PCIE_RXVECSTATUS, 0);
2991 }
2992 
2993 void
2994 qwx_mhi_clear_vector(struct qwx_softc *sc)
2995 {
2996 	qwx_mhi_reset_txvecdb(sc);
2997 	qwx_mhi_reset_txvecstatus(sc);
2998 	qwx_mhi_reset_rxvecdb(sc);
2999 	qwx_mhi_reset_rxvecstatus(sc);
3000 }
3001 
3002 int
3003 qwx_mhi_fw_load_handler(struct qwx_pci_softc *psc)
3004 {
3005 	struct qwx_softc *sc = &psc->sc_sc;
3006 	int ret;
3007 	char amss_path[PATH_MAX];
3008 	u_char *data;
3009 	size_t len;
3010 
3011 	if (sc->fw_img[QWX_FW_AMSS].data) {
3012 		data = sc->fw_img[QWX_FW_AMSS].data;
3013 		len = sc->fw_img[QWX_FW_AMSS].size;
3014 	} else {
3015 		ret = snprintf(amss_path, sizeof(amss_path), "%s-%s-%s",
3016 		    ATH11K_FW_DIR, sc->hw_params.fw.dir, ATH11K_AMSS_FILE);
3017 		if (ret < 0 || ret >= sizeof(amss_path))
3018 			return ENOSPC;
3019 
3020 		ret = loadfirmware(amss_path, &data, &len);
3021 		if (ret) {
3022 			printf("%s: could not read %s (error %d)\n",
3023 			    sc->sc_dev.dv_xname, amss_path, ret);
3024 			return ret;
3025 		}
3026 
3027 		if (len < MHI_DMA_VEC_CHUNK_SIZE) {
3028 			printf("%s: %s is too short, have only %zu bytes\n",
3029 			    sc->sc_dev.dv_xname, amss_path, len);
3030 			free(data, M_DEVBUF, len);
3031 			return EINVAL;
3032 		}
3033 
3034 		sc->fw_img[QWX_FW_AMSS].data = data;
3035 		sc->fw_img[QWX_FW_AMSS].size = len;
3036 	}
3037 
3038 	/* Second-stage boot loader sits in the first 512 KB of image. */
3039 	ret = qwx_mhi_fw_load_bhi(psc, data, MHI_DMA_VEC_CHUNK_SIZE);
3040 	if (ret != 0) {
3041 		printf("%s: could not load firmware %s\n",
3042 		    sc->sc_dev.dv_xname, amss_path);
3043 		return ret;
3044 	}
3045 
3046 	/* Now load the full image. */
3047 	ret = qwx_mhi_fw_load_bhie(psc, data, len);
3048 	if (ret != 0) {
3049 		printf("%s: could not load firmware %s\n",
3050 		    sc->sc_dev.dv_xname, amss_path);
3051 		return ret;
3052 	}
3053 
3054 	while (psc->bhi_ee < MHI_EE_AMSS) {
3055 		ret = tsleep_nsec(&psc->bhi_ee, 0, "qwxamss",
3056 		    SEC_TO_NSEC(5));
3057 		if (ret)
3058 			break;
3059 	}
3060 	if (ret != 0) {
3061 		printf("%s: device failed to enter AMSS EE\n",
3062 		    sc->sc_dev.dv_xname);
3063 	}
3064 
3065 	return ret;
3066 }
3067 
3068 int
3069 qwx_mhi_await_device_reset(struct qwx_softc *sc)
3070 {
3071 	const uint32_t msecs = 24, retries = 2;
3072 	uint32_t reg;
3073 	int timeout;
3074 
3075 	/* Poll for CTRL RESET to clear. */
3076 	timeout = retries;
3077 	while (timeout > 0) {
3078 		reg = qwx_pci_read(sc, MHI_CTRL);
3079 		DNPRINTF(QWX_D_MHI, "%s: MHI_CTRL is 0x%x\n", __func__, reg);
3080 		if ((reg & MHI_CTRL_RESET_MASK) == 0)
3081 			break;
3082 		DELAY((msecs / retries) * 1000);
3083 		timeout--;
3084 	}
3085 	if (timeout == 0) {
3086 		DNPRINTF(QWX_D_MHI, "%s: MHI reset failed\n", __func__);
3087 		return ETIMEDOUT;
3088 	}
3089 
3090 	return 0;
3091 }
3092 
3093 int
3094 qwx_mhi_await_device_ready(struct qwx_softc *sc)
3095 {
3096 	uint32_t reg;
3097 	int timeout;
3098 	const uint32_t msecs = 2000, retries = 4;
3099 
3100 
3101 	/* Poll for READY to be set. */
3102 	timeout = retries;
3103 	while (timeout > 0) {
3104 		reg = qwx_pci_read(sc, MHI_STATUS);
3105 		DNPRINTF(QWX_D_MHI, "%s: MHI_STATUS is 0x%x\n", __func__, reg);
3106 		if (reg & MHI_STATUS_READY_MASK) {
3107 			reg &= ~MHI_STATUS_READY_MASK;
3108 			qwx_pci_write(sc, MHI_STATUS, reg);
3109 			break;
3110 		}
3111 		DELAY((msecs / retries) * 1000);
3112 		timeout--;
3113 	}
3114 	if (timeout == 0) {
3115 		printf("%s: MHI not ready\n", sc->sc_dev.dv_xname);
3116 		return ETIMEDOUT;
3117 	}
3118 
3119 	return 0;
3120 }
3121 
3122 void
3123 qwx_mhi_ready_state_transition(struct qwx_pci_softc *psc)
3124 {
3125 	struct qwx_softc *sc = &psc->sc_sc;
3126 	int ret, i;
3127 
3128 	ret = qwx_mhi_await_device_reset(sc);
3129 	if (ret)
3130 		return;
3131 
3132 	ret = qwx_mhi_await_device_ready(sc);
3133 	if (ret)
3134 		return;
3135 
3136 	/* Set up memory-mapped IO for channels, events, etc. */
3137 	qwx_mhi_init_mmio(psc);
3138 
3139 	/* Notify event rings. */
3140 	for (i = 0; i < nitems(psc->event_rings); i++) {
3141 		struct qwx_pci_event_ring *ring = &psc->event_rings[i];
3142 		qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
3143 	}
3144 
3145 	/*
3146 	 * Set the device into M0 state. The device will transition
3147 	 * into M0 and the execution environment will switch to SBL.
3148 	 */
3149 	qwx_mhi_set_state(sc, MHI_STATE_M0);
3150 }
3151 
3152 void
3153 qwx_mhi_mission_mode_state_transition(struct qwx_pci_softc *psc)
3154 {
3155 	struct qwx_softc *sc = &psc->sc_sc;
3156 	int i;
3157 
3158 	qwx_mhi_device_wake(sc);
3159 
3160 	/* Notify event rings. */
3161 	for (i = 0; i < nitems(psc->event_rings); i++) {
3162 		struct qwx_pci_event_ring *ring = &psc->event_rings[i];
3163 		qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
3164 	}
3165 
3166 	/* TODO: Notify transfer/command rings? */
3167 
3168 	qwx_mhi_device_zzz(sc);
3169 }
3170 
3171 void
3172 qwx_mhi_low_power_mode_state_transition(struct qwx_pci_softc *psc)
3173 {
3174 	struct qwx_softc *sc = &psc->sc_sc;
3175 
3176 	qwx_mhi_set_state(sc, MHI_STATE_M2);
3177 }
3178 
3179 void
3180 qwx_mhi_set_state(struct qwx_softc *sc, uint32_t state)
3181 {
3182 	uint32_t reg;
3183 
3184 	reg = qwx_pci_read(sc, MHI_CTRL);
3185 
3186 	if (state != MHI_STATE_RESET) {
3187 		reg &= ~MHI_CTRL_MHISTATE_MASK;
3188 		reg |= (state << MHI_CTRL_MHISTATE_SHFT) & MHI_CTRL_MHISTATE_MASK;
3189 	} else
3190 		reg |= MHI_CTRL_RESET_MASK;
3191 
3192 	qwx_pci_write(sc, MHI_CTRL, reg);
3193 }
3194 
3195 void
3196 qwx_mhi_init_mmio(struct qwx_pci_softc *psc)
3197 {
3198 	struct qwx_softc *sc = &psc->sc_sc;
3199 	uint64_t paddr;
3200 	uint32_t reg;
3201 	int i;
3202 
3203 	reg = qwx_pci_read(sc, MHI_CHDBOFF);
3204 
3205 	/* Set device wake doorbell address. */
3206 	psc->wake_db = reg + 8 * MHI_DEV_WAKE_DB;
3207 
3208 	/* Set doorbell address for each transfer ring. */
3209 	for (i = 0; i < nitems(psc->xfer_rings); i++) {
3210 		struct qwx_pci_xfer_ring *ring = &psc->xfer_rings[i];
3211 		ring->db_addr = reg + (8 * ring->mhi_chan_id);
3212 	}
3213 
3214 	reg = qwx_pci_read(sc, MHI_ERDBOFF);
3215 	/* Set doorbell address for each event ring. */
3216 	for (i = 0; i < nitems(psc->event_rings); i++) {
3217 		struct qwx_pci_event_ring *ring = &psc->event_rings[i];
3218 		ring->db_addr = reg + (8 * i);
3219 	}
3220 
3221 	paddr = QWX_DMA_DVA(psc->chan_ctxt);
3222 	qwx_pci_write(sc, MHI_CCABAP_HIGHER, paddr >> 32);
3223 	qwx_pci_write(sc, MHI_CCABAP_LOWER, paddr & 0xffffffff);
3224 
3225 	paddr = QWX_DMA_DVA(psc->event_ctxt);
3226 	qwx_pci_write(sc, MHI_ECABAP_HIGHER, paddr >> 32);
3227 	qwx_pci_write(sc, MHI_ECABAP_LOWER, paddr & 0xffffffff);
3228 
3229 	paddr = QWX_DMA_DVA(psc->cmd_ctxt);
3230 	qwx_pci_write(sc, MHI_CRCBAP_HIGHER, paddr >> 32);
3231 	qwx_pci_write(sc, MHI_CRCBAP_LOWER, paddr & 0xffffffff);
3232 
3233 	/* Not (yet?) using fixed memory space from a device-tree. */
3234 	qwx_pci_write(sc, MHI_CTRLBASE_HIGHER, 0);
3235 	qwx_pci_write(sc, MHI_CTRLBASE_LOWER, 0);
3236 	qwx_pci_write(sc, MHI_DATABASE_HIGHER, 0);
3237 	qwx_pci_write(sc, MHI_DATABASE_LOWER, 0);
3238 	qwx_pci_write(sc, MHI_CTRLLIMIT_HIGHER, 0x0);
3239 	qwx_pci_write(sc, MHI_CTRLLIMIT_LOWER, 0xffffffff);
3240 	qwx_pci_write(sc, MHI_DATALIMIT_HIGHER, 0x0);
3241 	qwx_pci_write(sc, MHI_DATALIMIT_LOWER, 0xffffffff);
3242 
3243 	reg = qwx_pci_read(sc, MHI_CFG);
3244 	reg &= ~(MHI_CFG_NER_MASK | MHI_CFG_NHWER_MASK);
3245 	reg |= QWX_NUM_EVENT_CTX << MHI_CFG_NER_SHFT;
3246 	qwx_pci_write(sc, MHI_CFG, reg);
3247 }
3248 
3249 int
3250 qwx_mhi_fw_load_bhi(struct qwx_pci_softc *psc, uint8_t *data, size_t len)
3251 {
3252 	struct qwx_softc *sc = &psc->sc_sc;
3253 	struct qwx_dmamem *data_adm;
3254 	uint32_t seq, reg, status = MHI_BHI_STATUS_RESET;
3255 	uint64_t paddr;
3256 	int ret;
3257 
3258 	data_adm = qwx_dmamem_alloc(sc->sc_dmat, len, 0);
3259 	if (data_adm == NULL) {
3260 		printf("%s: could not allocate BHI DMA data buffer\n",
3261 		    sc->sc_dev.dv_xname);
3262 		return 1;
3263 	}
3264 
3265 	/* Copy firmware image to DMA memory. */
3266 	memcpy(QWX_DMA_KVA(data_adm), data, len);
3267 
3268 	qwx_pci_write(sc, psc->bhi_off + MHI_BHI_STATUS, 0);
3269 
3270 	/* Set data physical address and length. */
3271 	paddr = QWX_DMA_DVA(data_adm);
3272 	qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGADDR_HIGH, paddr >> 32);
3273 	qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGADDR_LOW,
3274 	    paddr & 0xffffffff);
3275 	qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGSIZE, len);
3276 
3277 	/* Set a random transaction sequence number. */
3278 	do {
3279 		seq = arc4random_uniform(MHI_BHI_TXDB_SEQNUM_BMSK);
3280 	} while (seq == 0);
3281 	qwx_pci_write(sc, psc->bhi_off + MHI_BHI_IMGTXDB, seq);
3282 
3283 	/* Wait for completion. */
3284 	ret = 0;
3285 	while (status != MHI_BHI_STATUS_SUCCESS && psc->bhi_ee < MHI_EE_SBL) {
3286 		ret = tsleep_nsec(&psc->bhi_ee, 0, "qwxbhi", SEC_TO_NSEC(5));
3287 		if (ret)
3288 			break;
3289 		reg = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_STATUS);
3290 		status = (reg & MHI_BHI_STATUS_MASK) >> MHI_BHI_STATUS_SHFT;
3291 	}
3292 
3293 	if (ret) {
3294 		printf("%s: BHI load timeout\n", sc->sc_dev.dv_xname);
3295 		reg = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_STATUS);
3296 		status = (reg & MHI_BHI_STATUS_MASK) >> MHI_BHI_STATUS_SHFT;
3297 		DNPRINTF(QWX_D_MHI, "%s: BHI status is 0x%x EE is 0x%x\n",
3298 		    __func__, status, psc->bhi_ee);
3299 	}
3300 
3301 	qwx_dmamem_free(sc->sc_dmat, data_adm);
3302 	return ret;
3303 }
3304 
3305 int
3306 qwx_mhi_fw_load_bhie(struct qwx_pci_softc *psc, uint8_t *data, size_t len)
3307 {
3308 	struct qwx_softc *sc = &psc->sc_sc;
3309 	struct qwx_dma_vec_entry *vec;
3310 	uint32_t seq, reg, state = MHI_BHIE_TXVECSTATUS_STATUS_RESET;
3311 	uint64_t paddr;
3312 	const size_t chunk_size = MHI_DMA_VEC_CHUNK_SIZE;
3313 	size_t nseg, remain, vec_size;
3314 	int i, ret;
3315 
3316 	nseg = howmany(len, chunk_size);
3317 	if (nseg == 0) {
3318 		printf("%s: BHIE data too short, have only %zu bytes\n",
3319 		    sc->sc_dev.dv_xname, len);
3320 		return 1;
3321 	}
3322 
3323 	if (psc->amss_data == NULL || QWX_DMA_LEN(psc->amss_data) < len) {
3324 		if (psc->amss_data)
3325 			qwx_dmamem_free(sc->sc_dmat, psc->amss_data);
3326 		psc->amss_data = qwx_dmamem_alloc(sc->sc_dmat, len, 0);
3327 		if (psc->amss_data == NULL) {
3328 			printf("%s: could not allocate BHIE DMA data buffer\n",
3329 			    sc->sc_dev.dv_xname);
3330 			return 1;
3331 		}
3332 	}
3333 
3334 	vec_size = nseg * sizeof(*vec);
3335 	if (psc->amss_vec == NULL || QWX_DMA_LEN(psc->amss_vec) < vec_size) {
3336 		if (psc->amss_vec)
3337 			qwx_dmamem_free(sc->sc_dmat, psc->amss_vec);
3338 		psc->amss_vec = qwx_dmamem_alloc(sc->sc_dmat, vec_size, 0);
3339 		if (psc->amss_vec == NULL) {
3340 			printf("%s: could not allocate BHIE DMA vec buffer\n",
3341 			    sc->sc_dev.dv_xname);
3342 			qwx_dmamem_free(sc->sc_dmat, psc->amss_data);
3343 			psc->amss_data = NULL;
3344 			return 1;
3345 		}
3346 	}
3347 
3348 	/* Copy firmware image to DMA memory. */
3349 	memcpy(QWX_DMA_KVA(psc->amss_data), data, len);
3350 
3351 	/* Create vector which controls chunk-wise DMA copy in hardware. */
3352 	paddr = QWX_DMA_DVA(psc->amss_data);
3353 	vec = QWX_DMA_KVA(psc->amss_vec);
3354 	remain = len;
3355 	for (i = 0; i < nseg; i++) {
3356 		vec[i].paddr = paddr;
3357 		if (remain >= chunk_size) {
3358 			vec[i].size = chunk_size;
3359 			remain -= chunk_size;
3360 			paddr += chunk_size;
3361 		} else
3362 			vec[i].size = remain;
3363 	}
3364 
3365 	/* Set vector physical address and length. */
3366 	paddr = QWX_DMA_DVA(psc->amss_vec);
3367 	qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECADDR_HIGH_OFFS,
3368 	    paddr >> 32);
3369 	qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECADDR_LOW_OFFS,
3370 	    paddr & 0xffffffff);
3371 	qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECSIZE_OFFS, vec_size);
3372 
3373 	/* Set a random transaction sequence number. */
3374 	do {
3375 		seq = arc4random_uniform(MHI_BHIE_TXVECSTATUS_SEQNUM_BMSK);
3376 	} while (seq == 0);
3377 	reg = qwx_pci_read(sc, psc->bhie_off + MHI_BHIE_TXVECDB_OFFS);
3378 	reg &= ~MHI_BHIE_TXVECDB_SEQNUM_BMSK;
3379 	reg |= seq << MHI_BHIE_TXVECDB_SEQNUM_SHFT;
3380 	qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECDB_OFFS, reg);
3381 
3382 	/* Wait for completion. */
3383 	ret = 0;
3384 	while (state != MHI_BHIE_TXVECSTATUS_STATUS_XFER_COMPL) {
3385 		ret = tsleep_nsec(&psc->bhie_off, 0, "qwxbhie",
3386 		    SEC_TO_NSEC(5));
3387 		if (ret)
3388 			break;
3389 		reg = qwx_pci_read(sc,
3390 		    psc->bhie_off + MHI_BHIE_TXVECSTATUS_OFFS);
3391 		state = (reg & MHI_BHIE_TXVECSTATUS_STATUS_BMSK) >>
3392 		    MHI_BHIE_TXVECSTATUS_STATUS_SHFT;
3393 		DNPRINTF(QWX_D_MHI, "%s: txvec state is 0x%x\n", __func__,
3394 		    state);
3395 	}
3396 
3397 	if (ret) {
3398 		printf("%s: BHIE load timeout\n", sc->sc_dev.dv_xname);
3399 		return ret;
3400 	}
3401 	return 0;
3402 }
3403 
3404 void
3405 qwx_rddm_prepare(struct qwx_pci_softc *psc)
3406 {
3407 	struct qwx_softc *sc = &psc->sc_sc;
3408 	struct qwx_dma_vec_entry *vec;
3409 	struct qwx_dmamem *data_adm, *vec_adm;
3410 	uint32_t seq, reg;
3411 	uint64_t paddr;
3412 	const size_t len = QWX_RDDM_DUMP_SIZE;
3413 	const size_t chunk_size = MHI_DMA_VEC_CHUNK_SIZE;
3414 	size_t nseg, remain, vec_size;
3415 	int i;
3416 
3417 	nseg = howmany(len, chunk_size);
3418 	if (nseg == 0) {
3419 		printf("%s: RDDM data too short, have only %zu bytes\n",
3420 		    sc->sc_dev.dv_xname, len);
3421 		return;
3422 	}
3423 
3424 	data_adm = qwx_dmamem_alloc(sc->sc_dmat, len, 0);
3425 	if (data_adm == NULL) {
3426 		printf("%s: could not allocate BHIE DMA data buffer\n",
3427 		    sc->sc_dev.dv_xname);
3428 		return;
3429 	}
3430 
3431 	vec_size = nseg * sizeof(*vec);
3432 	vec_adm = qwx_dmamem_alloc(sc->sc_dmat, vec_size, 0);
3433 	if (vec_adm == NULL) {
3434 		printf("%s: could not allocate BHIE DMA vector buffer\n",
3435 		    sc->sc_dev.dv_xname);
3436 		qwx_dmamem_free(sc->sc_dmat, data_adm);
3437 		return;
3438 	}
3439 
3440 	/* Create vector which controls chunk-wise DMA copy from hardware. */
3441 	paddr = QWX_DMA_DVA(data_adm);
3442 	vec = QWX_DMA_KVA(vec_adm);
3443 	remain = len;
3444 	for (i = 0; i < nseg; i++) {
3445 		vec[i].paddr = paddr;
3446 		if (remain >= chunk_size) {
3447 			vec[i].size = chunk_size;
3448 			remain -= chunk_size;
3449 			paddr += chunk_size;
3450 		} else
3451 			vec[i].size = remain;
3452 	}
3453 
3454 	/* Set vector physical address and length. */
3455 	paddr = QWX_DMA_DVA(vec_adm);
3456 	qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECADDR_HIGH_OFFS,
3457 	    paddr >> 32);
3458 	qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECADDR_LOW_OFFS,
3459 	    paddr & 0xffffffff);
3460 	qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECSIZE_OFFS, vec_size);
3461 
3462 	/* Set a random transaction sequence number. */
3463 	do {
3464 		seq = arc4random_uniform(MHI_BHIE_RXVECSTATUS_SEQNUM_BMSK);
3465 	} while (seq == 0);
3466 
3467 	reg = qwx_pci_read(sc, psc->bhie_off + MHI_BHIE_RXVECDB_OFFS);
3468 	reg &= ~MHI_BHIE_RXVECDB_SEQNUM_BMSK;
3469 	reg |= seq << MHI_BHIE_RXVECDB_SEQNUM_SHFT;
3470 	qwx_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECDB_OFFS, reg);
3471 
3472 	psc->rddm_data = data_adm;
3473 	psc->rddm_vec = vec_adm;
3474 }
3475 
3476 #ifdef QWX_DEBUG
3477 void
3478 qwx_rddm_task(void *arg)
3479 {
3480 	struct qwx_pci_softc *psc = arg;
3481 	struct qwx_softc *sc = &psc->sc_sc;
3482 	uint32_t reg, state = MHI_BHIE_RXVECSTATUS_STATUS_RESET;
3483 	const size_t len = QWX_RDDM_DUMP_SIZE;
3484 	int i, timeout;
3485 	const uint32_t msecs = 100, retries = 20;
3486 	uint8_t *rddm;
3487 	struct nameidata nd;
3488 	struct vnode *vp = NULL;
3489 	struct iovec iov[3];
3490 	struct uio uio;
3491 	char path[PATH_MAX];
3492 	int error = 0;
3493 
3494 	if (psc->rddm_data == NULL) {
3495 		DPRINTF("%s: RDDM not prepared\n", __func__);
3496 		return;
3497 	}
3498 
3499 	/* Poll for completion */
3500 	timeout = retries;
3501 	while (timeout > 0 && state != MHI_BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
3502 		reg = qwx_pci_read(sc,
3503 		    psc->bhie_off + MHI_BHIE_RXVECSTATUS_OFFS);
3504 		state = (reg & MHI_BHIE_RXVECSTATUS_STATUS_BMSK) >>
3505 		    MHI_BHIE_RXVECSTATUS_STATUS_SHFT;
3506 		DPRINTF("%s: txvec state is 0x%x\n", __func__, state);
3507 		DELAY((msecs / retries) * 1000);
3508 		timeout--;
3509 	}
3510 
3511 	if (timeout == 0) {
3512 		DPRINTF("%s: RDDM dump failed\n", sc->sc_dev.dv_xname);
3513 		return;
3514 	}
3515 
3516 	rddm = QWX_DMA_KVA(psc->rddm_data);
3517 	DPRINTF("%s: RDDM snippet:\n", __func__);
3518 	for (i = 0; i < MIN(64, len); i++) {
3519 		DPRINTF("%s %.2x", i % 16 == 0 ? "\n" : "", rddm[i]);
3520 	}
3521 	DPRINTF("\n");
3522 
3523 	DPRINTF("%s: sleeping for 30 seconds to allow userland to boot\n", __func__);
3524 	tsleep_nsec(&psc->rddm_data, 0, "qwxrddm", SEC_TO_NSEC(30));
3525 
3526 	snprintf(path, sizeof(path), "/root/%s-rddm.bin", sc->sc_dev.dv_xname);
3527 	DPRINTF("%s: saving RDDM to %s\n", __func__, path);
3528 	NDINIT(&nd, 0, 0, UIO_SYSSPACE, path, curproc);
3529 	nd.ni_pledge = PLEDGE_CPATH | PLEDGE_WPATH;
3530 	nd.ni_unveil = UNVEIL_CREATE | UNVEIL_WRITE;
3531 	error = vn_open(&nd, FWRITE | O_CREAT | O_NOFOLLOW | O_TRUNC,
3532 	    S_IRUSR | S_IWUSR);
3533 	if (error) {
3534 		DPRINTF("%s: vn_open: error %d\n", __func__, error);
3535 		goto done;
3536 	}
3537 	vp = nd.ni_vp;
3538 	VOP_UNLOCK(vp);
3539 
3540 	iov[0].iov_base = (void *)rddm;
3541 	iov[0].iov_len = len;
3542 	iov[1].iov_len = 0;
3543 	uio.uio_iov = &iov[0];
3544 	uio.uio_offset = 0;
3545 	uio.uio_segflg = UIO_SYSSPACE;
3546 	uio.uio_rw = UIO_WRITE;
3547 	uio.uio_resid = len;
3548 	uio.uio_iovcnt = 1;
3549 	uio.uio_procp = curproc;
3550 	error = vget(vp, LK_EXCLUSIVE | LK_RETRY);
3551 	if (error) {
3552 		DPRINTF("%s: vget: error %d\n", __func__, error);
3553 		goto done;
3554 	}
3555 	error = VOP_WRITE(vp, &uio, IO_UNIT|IO_APPEND, curproc->p_ucred);
3556 	vput(vp);
3557 	if (error)
3558 		DPRINTF("%s: VOP_WRITE: error %d\n", __func__, error);
3559 	#if 0
3560 	error = vn_close(vp, FWRITE, curproc->p_ucred, curproc);
3561 	if (error)
3562 		DPRINTF("%s: vn_close: error %d\n", __func__, error);
3563 	#endif
3564 done:
3565 	qwx_dmamem_free(sc->sc_dmat, psc->rddm_data);
3566 	qwx_dmamem_free(sc->sc_dmat, psc->rddm_vec);
3567 	psc->rddm_data = NULL;
3568 	psc->rddm_vec = NULL;
3569 	DPRINTF("%s: done, error %d\n", __func__, error);
3570 }
3571 #endif
3572 
3573 void *
3574 qwx_pci_event_ring_get_elem(struct qwx_pci_event_ring *ring, uint64_t rp)
3575 {
3576 	uint64_t base = QWX_DMA_DVA(ring->dmamem), offset;
3577 	void *addr = QWX_DMA_KVA(ring->dmamem);
3578 
3579 	if (rp < base)
3580 		return NULL;
3581 
3582 	offset = rp - base;
3583 	if (offset >= ring->size)
3584 		return NULL;
3585 
3586 	return addr + offset;
3587 }
3588 
3589 void
3590 qwx_mhi_state_change(struct qwx_pci_softc *psc, int ee, int mhi_state)
3591 {
3592 	struct qwx_softc *sc = &psc->sc_sc;
3593 	uint32_t old_ee = psc->bhi_ee;
3594 	uint32_t old_mhi_state = psc->mhi_state;
3595 
3596 	if (ee != -1 && psc->bhi_ee != ee) {
3597 		switch (ee) {
3598 		case MHI_EE_PBL:
3599 			DNPRINTF(QWX_D_MHI, "%s: new EE PBL\n",
3600 			    sc->sc_dev.dv_xname);
3601 			psc->bhi_ee = ee;
3602 			break;
3603 		case MHI_EE_SBL:
3604 			psc->bhi_ee = ee;
3605 			DNPRINTF(QWX_D_MHI, "%s: new EE SBL\n",
3606 			    sc->sc_dev.dv_xname);
3607 			break;
3608 		case MHI_EE_AMSS:
3609 			DNPRINTF(QWX_D_MHI, "%s: new EE AMSS\n",
3610 			    sc->sc_dev.dv_xname);
3611 			psc->bhi_ee = ee;
3612 			/* Wake thread loading the full AMSS image. */
3613 			wakeup(&psc->bhie_off);
3614 			break;
3615 		case MHI_EE_WFW:
3616 			DNPRINTF(QWX_D_MHI, "%s: new EE WFW\n",
3617 			    sc->sc_dev.dv_xname);
3618 			psc->bhi_ee = ee;
3619 			break;
3620 		default:
3621 			printf("%s: unhandled EE change to %x\n",
3622 			    sc->sc_dev.dv_xname, ee);
3623 			break;
3624 		}
3625 	}
3626 
3627 	if (mhi_state != -1 && psc->mhi_state != mhi_state) {
3628 		switch (mhi_state) {
3629 		case -1:
3630 			break;
3631 		case MHI_STATE_RESET:
3632 			DNPRINTF(QWX_D_MHI, "%s: new MHI state RESET\n",
3633 			    sc->sc_dev.dv_xname);
3634 			psc->mhi_state = mhi_state;
3635 			break;
3636 		case MHI_STATE_READY:
3637 			DNPRINTF(QWX_D_MHI, "%s: new MHI state READY\n",
3638 			    sc->sc_dev.dv_xname);
3639 			psc->mhi_state = mhi_state;
3640 			qwx_mhi_ready_state_transition(psc);
3641 			break;
3642 		case MHI_STATE_M0:
3643 			DNPRINTF(QWX_D_MHI, "%s: new MHI state M0\n",
3644 			    sc->sc_dev.dv_xname);
3645 			psc->mhi_state = mhi_state;
3646 			qwx_mhi_mission_mode_state_transition(psc);
3647 			break;
3648 		case MHI_STATE_M1:
3649 			DNPRINTF(QWX_D_MHI, "%s: new MHI state M1\n",
3650 			    sc->sc_dev.dv_xname);
3651 			psc->mhi_state = mhi_state;
3652 			qwx_mhi_low_power_mode_state_transition(psc);
3653 			break;
3654 		case MHI_STATE_SYS_ERR:
3655 			DNPRINTF(QWX_D_MHI,
3656 			    "%s: new MHI state SYS ERR\n",
3657 			    sc->sc_dev.dv_xname);
3658 			psc->mhi_state = mhi_state;
3659 			break;
3660 		default:
3661 			printf("%s: unhandled MHI state change to %x\n",
3662 			    sc->sc_dev.dv_xname, mhi_state);
3663 			break;
3664 		}
3665 	}
3666 
3667 	if (old_ee != psc->bhi_ee)
3668 		wakeup(&psc->bhi_ee);
3669 	if (old_mhi_state != psc->mhi_state)
3670 		wakeup(&psc->mhi_state);
3671 }
3672 
3673 void
3674 qwx_pci_intr_ctrl_event_mhi(struct qwx_pci_softc *psc, uint32_t mhi_state)
3675 {
3676 	DNPRINTF(QWX_D_MHI, "%s: MHI state change 0x%x -> 0x%x\n", __func__,
3677 	    psc->mhi_state, mhi_state);
3678 
3679 	if (psc->mhi_state != mhi_state)
3680 		qwx_mhi_state_change(psc, -1, mhi_state);
3681 }
3682 
3683 void
3684 qwx_pci_intr_ctrl_event_ee(struct qwx_pci_softc *psc, uint32_t ee)
3685 {
3686 	DNPRINTF(QWX_D_MHI, "%s: EE change 0x%x to 0x%x\n", __func__,
3687 	    psc->bhi_ee, ee);
3688 
3689 	if (psc->bhi_ee != ee)
3690 		qwx_mhi_state_change(psc, ee, -1);
3691 }
3692 
3693 void
3694 qwx_pci_intr_ctrl_event_cmd_complete(struct qwx_pci_softc *psc,
3695     uint64_t ptr, uint32_t cmd_status)
3696 {
3697 	struct qwx_pci_cmd_ring	*cmd_ring = &psc->cmd_ring;
3698 	uint64_t base = QWX_DMA_DVA(cmd_ring->dmamem);
3699 	struct qwx_pci_xfer_ring *xfer_ring = NULL;
3700 	struct qwx_mhi_ring_element *e;
3701 	uint32_t tre1, chid;
3702 	size_t i;
3703 
3704 	e = qwx_pci_cmd_ring_get_elem(cmd_ring, ptr);
3705 	if (e == NULL)
3706 		return;
3707 
3708 	tre1 = le32toh(e->dword[1]);
3709 	chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT;
3710 
3711 	for (i = 0; i < nitems(psc->xfer_rings); i++) {
3712 		if (psc->xfer_rings[i].mhi_chan_id == chid) {
3713 			xfer_ring = &psc->xfer_rings[i];
3714 			break;
3715 		}
3716 	}
3717 	if (xfer_ring == NULL) {
3718 		printf("%s: no transfer ring found for command completion "
3719 		    "on channel %u\n", __func__, chid);
3720 		return;
3721 	}
3722 
3723 	xfer_ring->cmd_status = cmd_status;
3724 	wakeup(&xfer_ring->cmd_status);
3725 
3726 	if (cmd_ring->rp + sizeof(*e) >= base + cmd_ring->size)
3727 		cmd_ring->rp = base;
3728 	else
3729 		cmd_ring->rp += sizeof(*e);
3730 }
3731 
3732 int
3733 qwx_pci_intr_ctrl_event(struct qwx_pci_softc *psc, struct qwx_pci_event_ring *ring)
3734 {
3735 	struct qwx_softc *sc = &psc->sc_sc;
3736 	struct qwx_mhi_event_ctxt *c;
3737 	uint64_t rp, wp, base;
3738 	struct qwx_mhi_ring_element *e;
3739 	uint32_t tre0, tre1, type, code, chid, len;
3740 
3741 	c = ring->event_ctxt;
3742 	if (c == NULL) {
3743 		/*
3744 		 * Interrupts can trigger before mhi_init_event_rings()
3745 		 * if the device is still active after a warm reboot.
3746 		 */
3747 		return 0;
3748 	}
3749 
3750 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
3751 	    QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_POSTREAD);
3752 
3753 	rp = le64toh(c->rp);
3754 	wp = le64toh(c->wp);
3755 
3756 	DNPRINTF(QWX_D_MHI, "%s: kernel rp=0x%llx\n", __func__, ring->rp);
3757 	DNPRINTF(QWX_D_MHI, "%s: device rp=0x%llx\n", __func__, rp);
3758 	DNPRINTF(QWX_D_MHI, "%s: kernel wp=0x%llx\n", __func__, ring->wp);
3759 	DNPRINTF(QWX_D_MHI, "%s: device wp=0x%llx\n", __func__, wp);
3760 
3761 	base = QWX_DMA_DVA(ring->dmamem);
3762 	if (ring->rp == rp || rp < base || rp >= base + ring->size)
3763 		return 0;
3764 	if (wp < base || wp >= base + ring->size)
3765 		return 0;
3766 
3767 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(ring->dmamem),
3768 	    0, QWX_DMA_LEN(ring->dmamem), BUS_DMASYNC_POSTREAD);
3769 
3770 	while (ring->rp != rp) {
3771 		e = qwx_pci_event_ring_get_elem(ring, ring->rp);
3772 		if (e == NULL)
3773 			return 0;
3774 
3775 		tre0 = le32toh(e->dword[0]);
3776 		tre1 = le32toh(e->dword[1]);
3777 
3778 		len = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT;
3779 		code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT;
3780 		type = (tre1 & MHI_TRE1_EV_TYPE_MASK) >> MHI_TRE1_EV_TYPE_SHFT;
3781 		chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT;
3782 		DNPRINTF(QWX_D_MHI, "%s: len=%u code=0x%x type=0x%x chid=%d\n",
3783 		    __func__, len, code, type, chid);
3784 
3785 		switch (type) {
3786 		case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
3787 			qwx_pci_intr_ctrl_event_mhi(psc, code);
3788 			break;
3789 		case MHI_PKT_TYPE_EE_EVENT:
3790 			qwx_pci_intr_ctrl_event_ee(psc, code);
3791 			break;
3792 		case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
3793 			qwx_pci_intr_ctrl_event_cmd_complete(psc,
3794 			    le64toh(e->ptr), code);
3795 			break;
3796 		default:
3797 			printf("%s: unhandled event type 0x%x\n",
3798 			    __func__, type);
3799 			break;
3800 		}
3801 
3802 		if (ring->rp + sizeof(*e) >= base + ring->size)
3803 			ring->rp = base;
3804 		else
3805 			ring->rp += sizeof(*e);
3806 
3807 		if (ring->wp + sizeof(*e) >= base + ring->size)
3808 			ring->wp = base;
3809 		else
3810 			ring->wp += sizeof(*e);
3811 	}
3812 
3813 	c->wp = htole64(ring->wp);
3814 
3815 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
3816 	    QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
3817 
3818 	qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
3819 	return 1;
3820 }
3821 
3822 void
3823 qwx_pci_intr_data_event_tx(struct qwx_pci_softc *psc, struct qwx_mhi_ring_element *e)
3824 {
3825 	struct qwx_softc *sc = &psc->sc_sc;
3826 	struct qwx_pci_xfer_ring *ring;
3827 	struct qwx_xfer_data *xfer;
3828 	uint64_t rp, evrp, base, paddr;
3829 	uint32_t tre0, tre1, code, chid, evlen, len;
3830 	int i;
3831 
3832 	tre0 = le32toh(e->dword[0]);
3833 	tre1 = le32toh(e->dword[1]);
3834 
3835 	evlen = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT;
3836 	code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT;
3837 	chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT;
3838 
3839 	switch (code) {
3840 	case MHI_EV_CC_EOT:
3841 		for (i = 0; i < nitems(psc->xfer_rings); i++) {
3842 			ring = &psc->xfer_rings[i];
3843 			if (ring->mhi_chan_id == chid)
3844 				break;
3845 		}
3846 		if (i == nitems(psc->xfer_rings)) {
3847 			printf("%s: unhandled channel 0x%x\n",
3848 			    __func__, chid);
3849 			break;
3850 		}
3851 		base = QWX_DMA_DVA(ring->dmamem);
3852 		/* PTR contains the entry that was last written */
3853 		evrp = letoh64(e->ptr);
3854 		rp = evrp;
3855 		if (rp < base || rp >= base + ring->size) {
3856 			printf("%s: invalid ptr 0x%llx\n",
3857 			    __func__, rp);
3858 			break;
3859 		}
3860 		/* Point rp to next empty slot */
3861 		if (rp + sizeof(*e) >= base + ring->size)
3862 			rp = base;
3863 		else
3864 			rp += sizeof(*e);
3865 		/* Parse until next empty slot */
3866 		while (ring->rp != rp) {
3867 			DNPRINTF(QWX_D_MHI, "%s:%d: ring->rp 0x%llx "
3868 			    "ring->wp 0x%llx rp 0x%llx\n", __func__,
3869 			    __LINE__, ring->rp, ring->wp, rp);
3870 			e = qwx_pci_xfer_ring_get_elem(ring, ring->rp);
3871 			xfer = qwx_pci_xfer_ring_get_data(ring, ring->rp);
3872 
3873 			if (ring->rp == evrp)
3874 				len = evlen;
3875 			else
3876 				len = xfer->m->m_pkthdr.len;
3877 
3878 			bus_dmamap_sync(sc->sc_dmat, xfer->map, 0,
3879 			    xfer->m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
3880 #ifdef QWX_DEBUG
3881 			{
3882 			int i;
3883 			DNPRINTF(QWX_D_MHI, "%s: chan %u data (len %u): ",
3884 			    __func__,
3885 			    ring->mhi_chan_id, len);
3886 			for (i = 0; i < MIN(32, len); i++) {
3887 				DNPRINTF(QWX_D_MHI, "%02x ",
3888 				    (unsigned char)mtod(xfer->m, caddr_t)[i]);
3889 			}
3890 			if (i < len)
3891 				DNPRINTF(QWX_D_MHI, "...");
3892 			DNPRINTF(QWX_D_MHI, "\n");
3893 			}
3894 #endif
3895 			if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) {
3896 				/* Save m_data as upper layers use m_adj(9) */
3897 				void *o_data = xfer->m->m_data;
3898 
3899 				/* Pass mbuf to upper layers */
3900 				qwx_qrtr_recv_msg(sc, xfer->m);
3901 
3902 				/* Reset RX mbuf instead of free/alloc */
3903 				KASSERT(xfer->m->m_next == NULL);
3904 				xfer->m->m_data = o_data;
3905 				xfer->m->m_len = xfer->m->m_pkthdr.len =
3906 				    QWX_PCI_XFER_MAX_DATA_SIZE;
3907 
3908 				paddr = xfer->map->dm_segs[0].ds_addr;
3909 
3910 				e->ptr = htole64(paddr);
3911 				e->dword[0] = htole32((
3912 				    QWX_PCI_XFER_MAX_DATA_SIZE <<
3913 				    MHI_TRE0_DATA_LEN_SHFT) &
3914 				    MHI_TRE0_DATA_LEN_MASK);
3915 				e->dword[1] = htole32(MHI_TRE1_DATA_IEOT |
3916 				    MHI_TRE1_DATA_BEI |
3917 				    MHI_TRE1_DATA_TYPE_TRANSFER <<
3918 				    MHI_TRE1_DATA_TYPE_SHIFT);
3919 
3920 				if (ring->wp + sizeof(*e) >= base + ring->size)
3921 					ring->wp = base;
3922 				else
3923 					ring->wp += sizeof(*e);
3924 			} else {
3925 				/* Unload and free TX mbuf */
3926 				bus_dmamap_unload(sc->sc_dmat, xfer->map);
3927 				m_freem(xfer->m);
3928 				xfer->m = NULL;
3929 				ring->queued--;
3930 			}
3931 
3932 			if (ring->rp + sizeof(*e) >= base + ring->size)
3933 				ring->rp = base;
3934 			else
3935 				ring->rp += sizeof(*e);
3936 		}
3937 
3938 		if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) {
3939 			ring->chan_ctxt->wp = htole64(ring->wp);
3940 
3941 			bus_dmamap_sync(sc->sc_dmat,
3942 			    QWX_DMA_MAP(psc->chan_ctxt), 0,
3943 			    QWX_DMA_LEN(psc->chan_ctxt),
3944 			    BUS_DMASYNC_PREWRITE);
3945 
3946 			qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
3947 		}
3948 		break;
3949 	default:
3950 		printf("%s: unhandled event code 0x%x\n",
3951 		    __func__, code);
3952 	}
3953 }
3954 
3955 int
3956 qwx_pci_intr_data_event(struct qwx_pci_softc *psc, struct qwx_pci_event_ring *ring)
3957 {
3958 	struct qwx_softc *sc = &psc->sc_sc;
3959 	struct qwx_mhi_event_ctxt *c;
3960 	uint64_t rp, wp, base;
3961 	struct qwx_mhi_ring_element *e;
3962 	uint32_t tre0, tre1, type, code, chid, len;
3963 
3964 	c = ring->event_ctxt;
3965 	if (c == NULL) {
3966 		/*
3967 		 * Interrupts can trigger before mhi_init_event_rings()
3968 		 * if the device is still active after a warm reboot.
3969 		 */
3970 		return 0;
3971 	}
3972 
3973 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
3974 	    QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_POSTREAD);
3975 
3976 	rp = le64toh(c->rp);
3977 	wp = le64toh(c->wp);
3978 
3979 	DNPRINTF(QWX_D_MHI, "%s: kernel rp=0x%llx\n", __func__, ring->rp);
3980 	DNPRINTF(QWX_D_MHI, "%s: device rp=0x%llx\n", __func__, rp);
3981 	DNPRINTF(QWX_D_MHI, "%s: kernel wp=0x%llx\n", __func__, ring->wp);
3982 	DNPRINTF(QWX_D_MHI, "%s: device wp=0x%llx\n", __func__, wp);
3983 
3984 	base = QWX_DMA_DVA(ring->dmamem);
3985 	if (ring->rp == rp || rp < base || rp >= base + ring->size)
3986 		return 0;
3987 
3988 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(ring->dmamem),
3989 	    0, QWX_DMA_LEN(ring->dmamem), BUS_DMASYNC_POSTREAD);
3990 
3991 	while (ring->rp != rp) {
3992 		e = qwx_pci_event_ring_get_elem(ring, ring->rp);
3993 		if (e == NULL)
3994 			return 0;
3995 
3996 		tre0 = le32toh(e->dword[0]);
3997 		tre1 = le32toh(e->dword[1]);
3998 
3999 		len = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT;
4000 		code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT;
4001 		type = (tre1 & MHI_TRE1_EV_TYPE_MASK) >> MHI_TRE1_EV_TYPE_SHFT;
4002 		chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT;
4003 		DNPRINTF(QWX_D_MHI, "%s: len=%u code=0x%x type=0x%x chid=%d\n",
4004 		    __func__, len, code, type, chid);
4005 
4006 		switch (type) {
4007 		case MHI_PKT_TYPE_TX_EVENT:
4008 			qwx_pci_intr_data_event_tx(psc, e);
4009 			break;
4010 		default:
4011 			printf("%s: unhandled event type 0x%x\n",
4012 			    __func__, type);
4013 			break;
4014 		}
4015 
4016 		if (ring->rp + sizeof(*e) >= base + ring->size)
4017 			ring->rp = base;
4018 		else
4019 			ring->rp += sizeof(*e);
4020 
4021 		if (ring->wp + sizeof(*e) >= base + ring->size)
4022 			ring->wp = base;
4023 		else
4024 			ring->wp += sizeof(*e);
4025 	}
4026 
4027 	c->wp = htole64(ring->wp);
4028 
4029 	bus_dmamap_sync(sc->sc_dmat, QWX_DMA_MAP(psc->event_ctxt), 0,
4030 	    QWX_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
4031 
4032 	qwx_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
4033 	return 1;
4034 }
4035 
4036 int
4037 qwx_pci_intr_mhi_ctrl(void *arg)
4038 {
4039 	struct qwx_pci_softc *psc = arg;
4040 
4041 	if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
4042 		return 1;
4043 
4044 	return 0;
4045 }
4046 
4047 int
4048 qwx_pci_intr_mhi_data(void *arg)
4049 {
4050 	struct qwx_pci_softc *psc = arg;
4051 
4052 	if (qwx_pci_intr_data_event(psc, &psc->event_rings[1]))
4053 		return 1;
4054 
4055 	return 0;
4056 }
4057 
4058 int
4059 qwx_pci_intr(void *arg)
4060 {
4061 	struct qwx_pci_softc *psc = arg;
4062 	struct qwx_softc *sc = (void *)psc;
4063 	uint32_t ee, state;
4064 	int ret = 0;
4065 
4066 	/*
4067 	 * Interrupts can trigger before mhi_start() during boot if the device
4068 	 * is still active after a warm reboot.
4069 	 */
4070 	if (psc->bhi_off == 0)
4071 		psc->bhi_off = qwx_pci_read(sc, MHI_BHI_OFFSET);
4072 
4073 	ee = qwx_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
4074 	state = qwx_pci_read(sc, MHI_STATUS);
4075 	state = (state & MHI_STATUS_MHISTATE_MASK) >>
4076 	    MHI_STATUS_MHISTATE_SHFT;
4077 
4078 	DNPRINTF(QWX_D_MHI,
4079 	    "%s: BHI interrupt with EE: 0x%x -> 0x%x state: 0x%x -> 0x%x\n",
4080 	     sc->sc_dev.dv_xname, psc->bhi_ee, ee, psc->mhi_state, state);
4081 
4082 	if (ee == MHI_EE_RDDM) {
4083 		/* Firmware crash, e.g. due to invalid DMA memory access. */
4084 		psc->bhi_ee = ee;
4085 #ifdef QWX_DEBUG
4086 		if (!psc->rddm_triggered) {
4087 			/* Write fw memory dump to root's home directory. */
4088 			task_add(systq, &psc->rddm_task);
4089 			psc->rddm_triggered = 1;
4090 		}
4091 #else
4092 		printf("%s: fatal firmware error\n",
4093 		   sc->sc_dev.dv_xname);
4094 		if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags) &&
4095 		    (sc->sc_ic.ic_if.if_flags & (IFF_UP | IFF_RUNNING)) ==
4096 		    (IFF_UP | IFF_RUNNING)) {
4097 			/* Try to reset the device. */
4098 			set_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
4099 			task_add(systq, &sc->init_task);
4100 		}
4101 #endif
4102 		return 1;
4103 	} else if (psc->bhi_ee == MHI_EE_PBL || psc->bhi_ee == MHI_EE_SBL) {
4104 		int new_ee = -1, new_mhi_state = -1;
4105 
4106 		if (psc->bhi_ee != ee)
4107 			new_ee = ee;
4108 
4109 		if (psc->mhi_state != state)
4110 			new_mhi_state = state;
4111 
4112 		if (new_ee != -1 || new_mhi_state != -1)
4113 			qwx_mhi_state_change(psc, new_ee, new_mhi_state);
4114 
4115 		ret = 1;
4116 	}
4117 
4118 	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
4119 		int i;
4120 
4121 		if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
4122 			ret = 1;
4123 		if (qwx_pci_intr_data_event(psc, &psc->event_rings[1]))
4124 			ret = 1;
4125 
4126 		for (i = 0; i < sc->hw_params.ce_count; i++) {
4127 			struct qwx_ce_pipe *ce_pipe = &sc->ce.ce_pipe[i];
4128 
4129 			if (qwx_ce_intr(ce_pipe))
4130 				ret = 1;
4131 		}
4132 
4133 		if (test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) {
4134 			for (i = 0; i < nitems(sc->ext_irq_grp); i++) {
4135 				if (qwx_dp_service_srng(sc, i))
4136 					ret = 1;
4137 			}
4138 		}
4139 	}
4140 
4141 	return ret;
4142 }
4143