xref: /openbsd-src/sys/dev/fdt/dwpcie.c (revision 1ad61ae0a79a724d2d3ec69e69c8e1d1ff6b53a0)
1 /*	$OpenBSD: dwpcie.c,v 1.50 2023/09/21 19:39:41 patrick Exp $	*/
2 /*
3  * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/evcount.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24 
25 #include <machine/intr.h>
26 #include <machine/bus.h>
27 #include <machine/fdt.h>
28 
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcireg.h>
31 #include <dev/pci/pcivar.h>
32 #include <dev/pci/ppbreg.h>
33 
34 #include <dev/ofw/openfirm.h>
35 #include <dev/ofw/ofw_clock.h>
36 #include <dev/ofw/ofw_gpio.h>
37 #include <dev/ofw/ofw_misc.h>
38 #include <dev/ofw/ofw_pinctrl.h>
39 #include <dev/ofw/ofw_power.h>
40 #include <dev/ofw/ofw_regulator.h>
41 #include <dev/ofw/fdt.h>
42 
43 /* Registers */
44 #define PCIE_PORT_LINK_CTRL		0x710
45 #define  PCIE_PORT_LINK_CTRL_LANES_MASK			(0x3f << 16)
46 #define  PCIE_PORT_LINK_CTRL_LANES_1			(0x1 << 16)
47 #define  PCIE_PORT_LINK_CTRL_LANES_2			(0x3 << 16)
48 #define  PCIE_PORT_LINK_CTRL_LANES_4			(0x7 << 16)
49 #define  PCIE_PORT_LINK_CTRL_LANES_8			(0xf << 16)
50 #define PCIE_PHY_DEBUG_R1		0x72c
51 #define  PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING	(1 << 29)
52 #define  PCIE_PHY_DEBUG_R1_XMLH_LINK_UP			(1 << 4)
53 #define PCIE_LINK_WIDTH_SPEED_CTRL	0x80c
54 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_MASK		(0x1f << 8)
55 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_1		(0x1 << 8)
56 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_2		(0x2 << 8)
57 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_4		(0x4 << 8)
58 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_8		(0x8 << 8)
59 #define  PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE		(1 << 17)
60 
61 #define PCIE_MSI_ADDR_LO	0x820
62 #define PCIE_MSI_ADDR_HI	0x824
63 #define PCIE_MSI_INTR0_ENABLE	0x828
64 #define PCIE_MSI_INTR0_MASK	0x82c
65 #define PCIE_MSI_INTR0_STATUS	0x830
66 
67 #define MISC_CONTROL_1		0x8bc
68 #define  MISC_CONTROL_1_DBI_RO_WR_EN	(1 << 0)
69 #define IATU_VIEWPORT		0x900
70 #define  IATU_VIEWPORT_INDEX0		0
71 #define  IATU_VIEWPORT_INDEX1		1
72 #define  IATU_VIEWPORT_INDEX2		2
73 #define  IATU_VIEWPORT_INDEX3		3
74 #define IATU_OFFSET_VIEWPORT	0x904
75 #define IATU_OFFSET_UNROLL(x)	(0x200 * (x))
76 #define IATU_REGION_CTRL_1	0x000
77 #define  IATU_REGION_CTRL_1_TYPE_MEM	0
78 #define  IATU_REGION_CTRL_1_TYPE_IO	2
79 #define  IATU_REGION_CTRL_1_TYPE_CFG0	4
80 #define  IATU_REGION_CTRL_1_TYPE_CFG1	5
81 #define IATU_REGION_CTRL_2	0x004
82 #define  IATU_REGION_CTRL_2_REGION_EN	(1U << 31)
83 #define IATU_LWR_BASE_ADDR	0x08
84 #define IATU_UPPER_BASE_ADDR	0x0c
85 #define IATU_LIMIT_ADDR		0x10
86 #define IATU_LWR_TARGET_ADDR	0x14
87 #define IATU_UPPER_TARGET_ADDR	0x18
88 
89 /* Marvell ARMADA 8k registers */
90 #define PCIE_GLOBAL_CTRL	0x8000
91 #define  PCIE_GLOBAL_CTRL_APP_LTSSM_EN		(1 << 2)
92 #define  PCIE_GLOBAL_CTRL_DEVICE_TYPE_MASK	(0xf << 4)
93 #define  PCIE_GLOBAL_CTRL_DEVICE_TYPE_RC	(0x4 << 4)
94 #define PCIE_GLOBAL_STATUS	0x8008
95 #define  PCIE_GLOBAL_STATUS_RDLH_LINK_UP	(1 << 1)
96 #define  PCIE_GLOBAL_STATUS_PHY_LINK_UP		(1 << 9)
97 #define PCIE_PM_STATUS		0x8014
98 #define PCIE_GLOBAL_INT_CAUSE	0x801c
99 #define PCIE_GLOBAL_INT_MASK	0x8020
100 #define  PCIE_GLOBAL_INT_MASK_INT_A		(1 << 9)
101 #define  PCIE_GLOBAL_INT_MASK_INT_B		(1 << 10)
102 #define  PCIE_GLOBAL_INT_MASK_INT_C		(1 << 11)
103 #define  PCIE_GLOBAL_INT_MASK_INT_D		(1 << 12)
104 #define PCIE_ARCACHE_TRC	0x8050
105 #define  PCIE_ARCACHE_TRC_DEFAULT		0x3511
106 #define PCIE_AWCACHE_TRC	0x8054
107 #define  PCIE_AWCACHE_TRC_DEFAULT		0x5311
108 #define PCIE_ARUSER		0x805c
109 #define PCIE_AWUSER		0x8060
110 #define  PCIE_AXUSER_DOMAIN_MASK		(0x3 << 4)
111 #define  PCIE_AXUSER_DOMAIN_INNER_SHARABLE	(0x1 << 4)
112 #define  PCIE_AXUSER_DOMAIN_OUTER_SHARABLE	(0x2 << 4)
113 #define PCIE_STREAMID		0x8064
114 #define  PCIE_STREAMID_FUNC_BITS(x)		((x) << 0)
115 #define  PCIE_STREAMID_DEV_BITS(x)		((x) << 4)
116 #define  PCIE_STREAMID_BUS_BITS(x)		((x) << 8)
117 #define  PCIE_STREAMID_ROOTPORT(x)		((x) << 12)
118 #define  PCIE_STREAMID_8040			\
119     (PCIE_STREAMID_ROOTPORT(0x80) | PCIE_STREAMID_BUS_BITS(2) | \
120      PCIE_STREAMID_DEV_BITS(2) | PCIE_STREAMID_FUNC_BITS(3))
121 
122 /* Amlogic G12A registers */
123 #define PCIE_CFG0		0x0000
124 #define  PCIE_CFG0_APP_LTSSM_EN			(1 << 7)
125 #define PCIE_STATUS12		0x0030
126 #define  PCIE_STATUS12_RDLH_LINK_UP		(1 << 16)
127 #define  PCIE_STATUS12_LTSSM_MASK		(0x1f << 10)
128 #define  PCIE_STATUS12_LTSSM_UP			(0x11 << 10)
129 #define  PCIE_STATUS12_SMLH_LINK_UP		(1 << 6)
130 
131 /* NXP i.MX8MQ registers */
132 #define PCIE_RC_LCR				0x7c
133 #define  PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1		0x1
134 #define  PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2		0x2
135 #define  PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK		0xf
136 #define  PCIE_RC_LCR_L1EL_MASK				(0x7 << 15)
137 #define  PCIE_RC_LCR_L1EL_64US				(0x6 << 15)
138 
139 #define IOMUXC_GPR12				0x30
140 #define  IMX8MQ_GPR_PCIE2_DEVICE_TYPE_MASK		(0xf << 8)
141 #define  IMX8MQ_GPR_PCIE2_DEVICE_TYPE_RC		(0x4 << 8)
142 #define  IMX8MQ_GPR_PCIE1_DEVICE_TYPE_MASK		(0xf << 12)
143 #define  IMX8MQ_GPR_PCIE1_DEVICE_TYPE_RC		(0x4 << 12)
144 #define IOMUXC_GPR14				0x38
145 #define IOMUXC_GPR16				0x40
146 #define  IMX8MQ_GPR_PCIE_REF_USE_PAD			(1 << 9)
147 #define  IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN		(1 << 10)
148 #define  IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE		(1 << 11)
149 #define  IMX8MM_GPR_PCIE_SSC_EN				(1 << 16)
150 #define  IMX8MM_GPR_PCIE_POWER_OFF			(1 << 17)
151 #define  IMX8MM_GPR_PCIE_CMN_RST			(1 << 18)
152 #define  IMX8MM_GPR_PCIE_AUX_EN				(1 << 19)
153 #define  IMX8MM_GPR_PCIE_REF_CLK_MASK			(0x3 << 24)
154 #define  IMX8MM_GPR_PCIE_REF_CLK_PLL			(0x3 << 24)
155 #define  IMX8MM_GPR_PCIE_REF_CLK_EXT			(0x2 << 24)
156 
157 #define IMX8MM_PCIE_PHY_CMN_REG62			0x188
158 #define  IMX8MM_PCIE_PHY_CMN_REG62_PLL_CLK_OUT			0x08
159 #define IMX8MM_PCIE_PHY_CMN_REG64			0x190
160 #define  IMX8MM_PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM		0x8c
161 #define IMX8MM_PCIE_PHY_CMN_REG75			0x1d4
162 #define  IMX8MM_PCIE_PHY_CMN_REG75_PLL_DONE			0x3
163 #define IMX8MM_PCIE_PHY_TRSV_REG5			0x414
164 #define  IMX8MM_PCIE_PHY_TRSV_REG5_GEN1_DEEMP			0x2d
165 #define IMX8MM_PCIE_PHY_TRSV_REG6			0x418
166 #define  IMX8MM_PCIE_PHY_TRSV_REG6_GEN2_DEEMP			0xf
167 
168 #define ANATOP_PLLOUT_CTL			0x74
169 #define  ANATOP_PLLOUT_CTL_CKE				(1 << 4)
170 #define  ANATOP_PLLOUT_CTL_SEL_SYSPLL1			0xb
171 #define  ANATOP_PLLOUT_CTL_SEL_MASK			0xf
172 #define ANATOP_PLLOUT_DIV			0x7c
173 #define  ANATOP_PLLOUT_DIV_SYSPLL1			0x7
174 
175 /* Rockchip RK3568/RK3588 registers */
176 #define PCIE_CLIENT_GENERAL_CON			0x0000
177 #define  PCIE_CLIENT_DEV_TYPE_RC		((0xf << 4) << 16 | (0x4 << 4))
178 #define  PCIE_CLIENT_LINK_REQ_RST_GRT		((1 << 3) << 16 | (1 << 3))
179 #define  PCIE_CLIENT_APP_LTSSM_ENABLE		((1 << 2) << 16 | (1 << 2))
180 #define PCIE_CLIENT_INTR_STATUS_LEGACY		0x0008
181 #define PCIE_CLIENT_INTR_MASK_LEGACY		0x001c
182 #define PCIE_CLIENT_HOT_RESET_CTRL		0x0180
183 #define  PCIE_CLIENT_APP_LTSSM_ENABLE_ENHANCE	((1 << 4) << 16 | (1 << 4))
184 #define PCIE_CLIENT_LTSSM_STATUS		0x0300
185 #define  PCIE_CLIENT_RDLH_LINK_UP		(1 << 17)
186 #define  PCIE_CLIENT_SMLH_LINK_UP		(1 << 16)
187 #define  PCIE_CLIENT_LTSSM_MASK			(0x1f << 0)
188 #define  PCIE_CLIENT_LTSSM_UP			(0x11 << 0)
189 
190 #define HREAD4(sc, reg)							\
191 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
192 #define HWRITE4(sc, reg, val)						\
193 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
194 #define HSET4(sc, reg, bits)						\
195 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
196 #define HCLR4(sc, reg, bits)						\
197 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
198 
199 struct dwpcie_range {
200 	uint32_t		flags;
201 	uint64_t		pci_base;
202 	uint64_t		phys_base;
203 	uint64_t		size;
204 };
205 
206 struct dwpcie_intx {
207 	int			(*di_func)(void *);
208 	void			*di_arg;
209 	int			di_ipl;
210 	int			di_flags;
211 	int			di_pin;
212 	struct evcount		di_count;
213 	char			*di_name;
214 	struct dwpcie_softc	*di_sc;
215 	TAILQ_ENTRY(dwpcie_intx) di_next;
216 };
217 
218 #define DWPCIE_NUM_MSI		32
219 
220 struct dwpcie_msi {
221 	int			(*dm_func)(void *);
222 	void			*dm_arg;
223 	int			dm_ipl;
224 	int			dm_flags;
225 	int			dm_vec;
226 	struct evcount		dm_count;
227 	char			*dm_name;
228 };
229 
230 struct dwpcie_softc {
231 	struct device		sc_dev;
232 	bus_space_tag_t		sc_iot;
233 	bus_space_handle_t	sc_ioh;
234 	bus_dma_tag_t		sc_dmat;
235 
236 	bus_addr_t		sc_ctrl_base;
237 	bus_size_t		sc_ctrl_size;
238 
239 	bus_addr_t		sc_conf_base;
240 	bus_size_t		sc_conf_size;
241 	bus_space_handle_t	sc_conf_ioh;
242 
243 	bus_addr_t		sc_glue_base;
244 	bus_size_t		sc_glue_size;
245 	bus_space_handle_t	sc_glue_ioh;
246 
247 	bus_addr_t		sc_atu_base;
248 	bus_size_t		sc_atu_size;
249 	bus_space_handle_t	sc_atu_ioh;
250 
251 	bus_addr_t		sc_io_base;
252 	bus_addr_t		sc_io_bus_addr;
253 	bus_size_t		sc_io_size;
254 	bus_addr_t		sc_mem_base;
255 	bus_addr_t		sc_mem_bus_addr;
256 	bus_size_t		sc_mem_size;
257 	bus_addr_t		sc_pmem_base;
258 	bus_addr_t		sc_pmem_bus_addr;
259 	bus_size_t		sc_pmem_size;
260 
261 	int			sc_node;
262 	int			sc_acells;
263 	int			sc_scells;
264 	int			sc_pacells;
265 	int			sc_pscells;
266 	struct dwpcie_range	*sc_ranges;
267 	int			sc_nranges;
268 
269 	struct bus_space	sc_bus_iot;
270 	struct bus_space	sc_bus_memt;
271 
272 	struct machine_pci_chipset sc_pc;
273 	int			sc_bus;
274 
275 	int			sc_num_viewport;
276 	int			sc_atu_unroll;
277 	int			sc_atu_viewport;
278 
279 	void			*sc_ih;
280 	struct interrupt_controller sc_ic;
281 	TAILQ_HEAD(,dwpcie_intx) sc_intx[4];
282 
283 	uint64_t		sc_msi_addr;
284 	struct dwpcie_msi	sc_msi[DWPCIE_NUM_MSI];
285 };
286 
287 struct dwpcie_intr_handle {
288 	struct machine_intr_handle pih_ih;
289 	struct dwpcie_softc	*pih_sc;
290 	struct dwpcie_msi	*pih_dm;
291 	bus_dma_tag_t		pih_dmat;
292 	bus_dmamap_t		pih_map;
293 };
294 
295 int dwpcie_match(struct device *, void *, void *);
296 void dwpcie_attach(struct device *, struct device *, void *);
297 
298 const struct cfattach	dwpcie_ca = {
299 	sizeof (struct dwpcie_softc), dwpcie_match, dwpcie_attach
300 };
301 
302 struct cfdriver dwpcie_cd = {
303 	NULL, "dwpcie", DV_DULL
304 };
305 
306 int
307 dwpcie_match(struct device *parent, void *match, void *aux)
308 {
309 	struct fdt_attach_args *faa = aux;
310 
311 	return (OF_is_compatible(faa->fa_node, "amlogic,g12a-pcie") ||
312 	    OF_is_compatible(faa->fa_node, "baikal,bm1000-pcie") ||
313 	    OF_is_compatible(faa->fa_node, "fsl,imx8mm-pcie") ||
314 	    OF_is_compatible(faa->fa_node, "fsl,imx8mq-pcie") ||
315 	    OF_is_compatible(faa->fa_node, "marvell,armada8k-pcie") ||
316 	    OF_is_compatible(faa->fa_node, "qcom,pcie-sc8280xp") ||
317 	    OF_is_compatible(faa->fa_node, "rockchip,rk3568-pcie") ||
318 	    OF_is_compatible(faa->fa_node, "rockchip,rk3588-pcie") ||
319 	    OF_is_compatible(faa->fa_node, "sifive,fu740-pcie"));
320 }
321 
322 void	dwpcie_attach_deferred(struct device *);
323 
324 void	dwpcie_atu_disable(struct dwpcie_softc *, int);
325 void	dwpcie_atu_config(struct dwpcie_softc *, int, int,
326 	    uint64_t, uint64_t, uint64_t);
327 void	dwpcie_link_config(struct dwpcie_softc *);
328 int	dwpcie_link_up(struct dwpcie_softc *);
329 
330 int	dwpcie_armada8k_init(struct dwpcie_softc *);
331 int	dwpcie_armada8k_link_up(struct dwpcie_softc *);
332 int	dwpcie_armada8k_intr(void *);
333 
334 int	dwpcie_g12a_init(struct dwpcie_softc *);
335 int	dwpcie_g12a_link_up(struct dwpcie_softc *);
336 
337 int	dwpcie_imx8mq_init(struct dwpcie_softc *);
338 int	dwpcie_imx8mq_intr(void *);
339 
340 int	dwpcie_fu740_init(struct dwpcie_softc *);
341 
342 int	dwpcie_rk3568_init(struct dwpcie_softc *);
343 int	dwpcie_rk3568_intr(void *);
344 void	*dwpcie_rk3568_intr_establish(void *, int *, int,
345  	    struct cpu_info *, int (*)(void *), void *, char *);
346 void	dwpcie_rk3568_intr_disestablish(void *);
347 void	dwpcie_rk3568_intr_barrier(void *);
348 
349 int	dwpcie_sc8280xp_init(struct dwpcie_softc *);
350 
351 void	dwpcie_attach_hook(struct device *, struct device *,
352 	    struct pcibus_attach_args *);
353 int	dwpcie_bus_maxdevs(void *, int);
354 pcitag_t dwpcie_make_tag(void *, int, int, int);
355 void	dwpcie_decompose_tag(void *, pcitag_t, int *, int *, int *);
356 int	dwpcie_conf_size(void *, pcitag_t);
357 pcireg_t dwpcie_conf_read(void *, pcitag_t, int);
358 void	dwpcie_conf_write(void *, pcitag_t, int, pcireg_t);
359 int	dwpcie_probe_device_hook(void *, struct pci_attach_args *);
360 
361 int	dwpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *);
362 const char *dwpcie_intr_string(void *, pci_intr_handle_t);
363 void	*dwpcie_intr_establish(void *, pci_intr_handle_t, int,
364 	    struct cpu_info *, int (*)(void *), void *, char *);
365 void	dwpcie_intr_disestablish(void *, void *);
366 
367 int	dwpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
368 	    bus_space_handle_t *);
369 int	dwpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
370 	    bus_space_handle_t *);
371 
372 struct interrupt_controller dwpcie_ic = {
373 	.ic_barrier = intr_barrier
374 };
375 
376 void
377 dwpcie_attach(struct device *parent, struct device *self, void *aux)
378 {
379 	struct dwpcie_softc *sc = (struct dwpcie_softc *)self;
380 	struct fdt_attach_args *faa = aux;
381 	uint32_t *ranges;
382 	int i, j, nranges, rangeslen;
383 	int atu, config, ctrl, glue;
384 
385 	if (faa->fa_nreg < 2) {
386 		printf(": no registers\n");
387 		return;
388 	}
389 
390 	sc->sc_ctrl_base = faa->fa_reg[0].addr;
391 	sc->sc_ctrl_size = faa->fa_reg[0].size;
392 
393 	ctrl = OF_getindex(faa->fa_node, "dbi", "reg-names");
394 	if (ctrl >= 0 && ctrl < faa->fa_nreg) {
395 		sc->sc_ctrl_base = faa->fa_reg[ctrl].addr;
396 		sc->sc_ctrl_size = faa->fa_reg[ctrl].size;
397 	}
398 
399 	config = OF_getindex(faa->fa_node, "config", "reg-names");
400 	if (config < 0 || config >= faa->fa_nreg) {
401 		printf(": no config registers\n");
402 		return;
403 	}
404 
405 	sc->sc_conf_base = faa->fa_reg[config].addr;
406 	sc->sc_conf_size = faa->fa_reg[config].size;
407 
408 	sc->sc_atu_base = sc->sc_ctrl_base + 0x300000;
409 	sc->sc_atu_size = sc->sc_ctrl_size - 0x300000;
410 
411 	atu = OF_getindex(faa->fa_node, "atu", "reg-names");
412 	if (atu >= 0 && atu < faa->fa_nreg) {
413 		sc->sc_atu_base = faa->fa_reg[atu].addr;
414 		sc->sc_atu_size = faa->fa_reg[atu].size;
415 	}
416 
417 	if (OF_is_compatible(faa->fa_node, "amlogic,g12a-pcie")) {
418 		glue = OF_getindex(faa->fa_node, "cfg", "reg-names");
419 		if (glue < 0 || glue >= faa->fa_nreg) {
420 			printf(": no glue registers\n");
421 			return;
422 		}
423 
424 		sc->sc_glue_base = faa->fa_reg[glue].addr;
425 		sc->sc_glue_size = faa->fa_reg[glue].size;
426 	}
427 
428 	if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-pcie") ||
429 	    OF_is_compatible(faa->fa_node, "rockchip,rk3588-pcie")) {
430 		glue = OF_getindex(faa->fa_node, "apb", "reg-names");
431 		if (glue < 0 || glue >= faa->fa_nreg) {
432 			printf(": no glue registers\n");
433 			return;
434 		}
435 
436 		sc->sc_glue_base = faa->fa_reg[glue].addr;
437 		sc->sc_glue_size = faa->fa_reg[glue].size;
438 	}
439 
440 	sc->sc_iot = faa->fa_iot;
441 	sc->sc_dmat = faa->fa_dmat;
442 	sc->sc_node = faa->fa_node;
443 
444 	sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
445 	    faa->fa_acells);
446 	sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
447 	    faa->fa_scells);
448 	sc->sc_pacells = faa->fa_acells;
449 	sc->sc_pscells = faa->fa_scells;
450 
451 	rangeslen = OF_getproplen(sc->sc_node, "ranges");
452 	if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) ||
453 	     (rangeslen / sizeof(uint32_t)) % (sc->sc_acells +
454 	     sc->sc_pacells + sc->sc_scells)) {
455 		printf(": invalid ranges property\n");
456 		return;
457 	}
458 
459 	ranges = malloc(rangeslen, M_TEMP, M_WAITOK);
460 	OF_getpropintarray(sc->sc_node, "ranges", ranges,
461 	    rangeslen);
462 
463 	nranges = (rangeslen / sizeof(uint32_t)) /
464 	    (sc->sc_acells + sc->sc_pacells + sc->sc_scells);
465 	sc->sc_ranges = mallocarray(nranges,
466 	    sizeof(struct dwpcie_range), M_TEMP, M_WAITOK);
467 	sc->sc_nranges = nranges;
468 
469 	for (i = 0, j = 0; i < sc->sc_nranges; i++) {
470 		sc->sc_ranges[i].flags = ranges[j++];
471 		sc->sc_ranges[i].pci_base = ranges[j++];
472 		if (sc->sc_acells - 1 == 2) {
473 			sc->sc_ranges[i].pci_base <<= 32;
474 			sc->sc_ranges[i].pci_base |= ranges[j++];
475 		}
476 		sc->sc_ranges[i].phys_base = ranges[j++];
477 		if (sc->sc_pacells == 2) {
478 			sc->sc_ranges[i].phys_base <<= 32;
479 			sc->sc_ranges[i].phys_base |= ranges[j++];
480 		}
481 		sc->sc_ranges[i].size = ranges[j++];
482 		if (sc->sc_scells == 2) {
483 			sc->sc_ranges[i].size <<= 32;
484 			sc->sc_ranges[i].size |= ranges[j++];
485 		}
486 	}
487 
488 	free(ranges, M_TEMP, rangeslen);
489 
490 	if (bus_space_map(sc->sc_iot, sc->sc_ctrl_base,
491 	    sc->sc_ctrl_size, 0, &sc->sc_ioh)) {
492 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
493 		    sizeof(struct dwpcie_range));
494 		printf(": can't map ctrl registers\n");
495 		return;
496 	}
497 
498 	if (bus_space_map(sc->sc_iot, sc->sc_conf_base,
499 	    sc->sc_conf_size, 0, &sc->sc_conf_ioh)) {
500 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ctrl_size);
501 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
502 		    sizeof(struct dwpcie_range));
503 		printf(": can't map config registers\n");
504 		return;
505 	}
506 
507 	sc->sc_num_viewport = OF_getpropint(sc->sc_node, "num-viewport", 2);
508 
509 	printf("\n");
510 
511 	pinctrl_byname(sc->sc_node, "default");
512 	clock_set_assigned(sc->sc_node);
513 
514 	config_defer(self, dwpcie_attach_deferred);
515 }
516 
517 void
518 dwpcie_attach_deferred(struct device *self)
519 {
520 	struct dwpcie_softc *sc = (struct dwpcie_softc *)self;
521 	struct pcibus_attach_args pba;
522 	bus_addr_t iobase, iolimit;
523 	bus_addr_t membase, memlimit;
524 	bus_addr_t pmembase, pmemlimit;
525 	uint32_t bus_range[2];
526 	pcireg_t bir, blr, csr;
527 	int i, error = 0;
528 
529 	if (OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie"))
530 		error = dwpcie_armada8k_init(sc);
531 	if (OF_is_compatible(sc->sc_node, "amlogic,g12a-pcie"))
532 		error = dwpcie_g12a_init(sc);
533 	if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie") ||
534 	    OF_is_compatible(sc->sc_node, "fsl,imx8mq-pcie"))
535 		error = dwpcie_imx8mq_init(sc);
536 	if (OF_is_compatible(sc->sc_node, "qcom,pcie-sc8280xp"))
537 		error = dwpcie_sc8280xp_init(sc);
538 	if (OF_is_compatible(sc->sc_node, "rockchip,rk3568-pcie") ||
539 	    OF_is_compatible(sc->sc_node, "rockchip,rk3588-pcie"))
540 		error = dwpcie_rk3568_init(sc);
541 	if (OF_is_compatible(sc->sc_node, "sifive,fu740-pcie"))
542 		error = dwpcie_fu740_init(sc);
543 	if (error != 0) {
544 		bus_space_unmap(sc->sc_iot, sc->sc_conf_ioh, sc->sc_conf_size);
545 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ctrl_size);
546 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
547 		    sizeof(struct dwpcie_range));
548 		printf("%s: can't initialize hardware\n",
549 		    sc->sc_dev.dv_xname);
550 		return;
551 	}
552 
553 	sc->sc_atu_viewport = -1;
554 	if (HREAD4(sc, IATU_VIEWPORT) == 0xffffffff) {
555 		sc->sc_atu_unroll = 1;
556 		if (bus_space_map(sc->sc_iot, sc->sc_atu_base,
557 		    sc->sc_atu_size, 0, &sc->sc_atu_ioh)) {
558 			bus_space_unmap(sc->sc_iot, sc->sc_conf_ioh,
559 			    sc->sc_conf_size);
560 			bus_space_unmap(sc->sc_iot, sc->sc_ioh,
561 			    sc->sc_ctrl_size);
562 			free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
563 			    sizeof(struct dwpcie_range));
564 			printf("%s: can't map atu registers\n",
565 			    sc->sc_dev.dv_xname);
566 			return;
567 		}
568 	}
569 
570 	/* Set up address translation for I/O space. */
571 	for (i = 0; i < sc->sc_nranges; i++) {
572 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
573 		    sc->sc_ranges[i].size > 0) {
574 			sc->sc_io_base = sc->sc_ranges[i].phys_base;
575 			sc->sc_io_bus_addr = sc->sc_ranges[i].pci_base;
576 			sc->sc_io_size = sc->sc_ranges[i].size;
577 		}
578 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
579 		    sc->sc_ranges[i].size > 0) {
580 			sc->sc_mem_base = sc->sc_ranges[i].phys_base;
581 			sc->sc_mem_bus_addr = sc->sc_ranges[i].pci_base;
582 			sc->sc_mem_size = sc->sc_ranges[i].size;
583 		}
584 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x03000000 &&
585 		    sc->sc_ranges[i].size > 0) {
586 			sc->sc_pmem_base = sc->sc_ranges[i].phys_base;
587 			sc->sc_pmem_bus_addr = sc->sc_ranges[i].pci_base;
588 			sc->sc_pmem_size = sc->sc_ranges[i].size;
589 		}
590 	}
591 	if (sc->sc_mem_size == 0) {
592 		printf("%s: no memory mapped I/O window\n",
593 		    sc->sc_dev.dv_xname);
594 		return;
595 	}
596 
597 	/*
598 	 * Disable prefetchable memory mapped I/O window if we don't
599 	 * have enough viewports to enable it.
600 	 */
601 	if (sc->sc_num_viewport < 4)
602 		sc->sc_pmem_size = 0;
603 
604 	for (i = 0; i < sc->sc_num_viewport; i++)
605 		dwpcie_atu_disable(sc, i);
606 
607 	dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX0,
608 	    IATU_REGION_CTRL_1_TYPE_MEM, sc->sc_mem_base,
609 	    sc->sc_mem_bus_addr, sc->sc_mem_size);
610 	if (sc->sc_num_viewport > 2 && sc->sc_io_size > 0)
611 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX2,
612 		    IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
613 		    sc->sc_io_bus_addr, sc->sc_io_size);
614 	if (sc->sc_num_viewport > 3 && sc->sc_pmem_size > 0)
615 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX3,
616 		    IATU_REGION_CTRL_1_TYPE_MEM, sc->sc_pmem_base,
617 		    sc->sc_pmem_bus_addr, sc->sc_pmem_size);
618 
619 	/* Enable modification of read-only bits. */
620 	HSET4(sc, MISC_CONTROL_1, MISC_CONTROL_1_DBI_RO_WR_EN);
621 
622 	/* A Root Port is a PCI-PCI Bridge. */
623 	HWRITE4(sc, PCI_CLASS_REG,
624 	    PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT |
625 	    PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT);
626 
627 	/* Clear BAR as U-Boot seems to leave garbage in it. */
628 	HWRITE4(sc, PCI_MAPREG_START, PCI_MAPREG_MEM_TYPE_64BIT);
629 	HWRITE4(sc, PCI_MAPREG_START + 4, 0);
630 
631 	/* Enable 32-bit I/O addressing. */
632 	HSET4(sc, PPB_REG_IOSTATUS,
633 	    PPB_IO_32BIT | (PPB_IO_32BIT << PPB_IOLIMIT_SHIFT));
634 
635 	/* Make sure read-only bits are write-protected. */
636 	HCLR4(sc, MISC_CONTROL_1, MISC_CONTROL_1_DBI_RO_WR_EN);
637 
638 	/* Set up bus range. */
639 	if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range,
640 	    sizeof(bus_range)) != sizeof(bus_range)) {
641 		bus_range[0] = 0;
642 		bus_range[1] = 31;
643 	}
644 	sc->sc_bus = bus_range[0];
645 
646 	/* Initialize bus range. */
647 	bir = bus_range[0];
648 	bir |= ((bus_range[0] + 1) << 8);
649 	bir |= (bus_range[1] << 16);
650 	HWRITE4(sc, PPB_REG_BUSINFO, bir);
651 
652 	/* Initialize memory mapped I/O window. */
653 	membase = sc->sc_mem_bus_addr;
654 	memlimit = membase + sc->sc_mem_size - 1;
655 	blr = memlimit & PPB_MEM_MASK;
656 	blr |= (membase >> PPB_MEM_SHIFT);
657 	HWRITE4(sc, PPB_REG_MEM, blr);
658 
659 	/* Initialize I/O window. */
660 	if (sc->sc_io_size > 0) {
661 		iobase = sc->sc_io_bus_addr;
662 		iolimit = iobase + sc->sc_io_size - 1;
663 		blr = iolimit & PPB_IO_MASK;
664 		blr |= (iobase >> PPB_IO_SHIFT);
665 		HWRITE4(sc, PPB_REG_IOSTATUS, blr);
666 		blr = (iobase & 0xffff0000) >> 16;
667 		blr |= iolimit & 0xffff0000;
668 		HWRITE4(sc, PPB_REG_IO_HI, blr);
669 	} else {
670 		HWRITE4(sc, PPB_REG_IOSTATUS, 0x000000ff);
671 		HWRITE4(sc, PPB_REG_IO_HI, 0x0000ffff);
672 	}
673 
674 	/* Initialize prefetchable memory mapped I/O window. */
675 	if (sc->sc_pmem_size > 0) {
676 		pmembase = sc->sc_pmem_bus_addr;
677 		pmemlimit = pmembase + sc->sc_pmem_size - 1;
678 		blr = pmemlimit & PPB_MEM_MASK;
679 		blr |= (pmembase >> PPB_MEM_SHIFT);
680 		HWRITE4(sc, PPB_REG_PREFMEM, blr);
681 		HWRITE4(sc, PPB_REG_PREFBASE_HI32, pmembase >> 32);
682 		HWRITE4(sc, PPB_REG_PREFLIM_HI32, pmemlimit >> 32);
683 	} else {
684 		HWRITE4(sc, PPB_REG_PREFMEM, 0x0000ffff);
685 		HWRITE4(sc, PPB_REG_PREFBASE_HI32, 0);
686 		HWRITE4(sc, PPB_REG_PREFLIM_HI32, 0);
687 	}
688 
689 	csr = PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE;
690 	if (sc->sc_io_size > 0)
691 		csr |= PCI_COMMAND_IO_ENABLE;
692 	HWRITE4(sc, PCI_COMMAND_STATUS_REG, csr);
693 
694 	memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot));
695 	sc->sc_bus_iot.bus_private = sc;
696 	sc->sc_bus_iot._space_map = dwpcie_bs_iomap;
697 	memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt));
698 	sc->sc_bus_memt.bus_private = sc;
699 	sc->sc_bus_memt._space_map = dwpcie_bs_memmap;
700 
701 	sc->sc_pc.pc_conf_v = sc;
702 	sc->sc_pc.pc_attach_hook = dwpcie_attach_hook;
703 	sc->sc_pc.pc_bus_maxdevs = dwpcie_bus_maxdevs;
704 	sc->sc_pc.pc_make_tag = dwpcie_make_tag;
705 	sc->sc_pc.pc_decompose_tag = dwpcie_decompose_tag;
706 	sc->sc_pc.pc_conf_size = dwpcie_conf_size;
707 	sc->sc_pc.pc_conf_read = dwpcie_conf_read;
708 	sc->sc_pc.pc_conf_write = dwpcie_conf_write;
709 	sc->sc_pc.pc_probe_device_hook = dwpcie_probe_device_hook;
710 
711 	sc->sc_pc.pc_intr_v = sc;
712 	sc->sc_pc.pc_intr_map = dwpcie_intr_map;
713 	sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
714 	sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
715 	sc->sc_pc.pc_intr_string = dwpcie_intr_string;
716 	sc->sc_pc.pc_intr_establish = dwpcie_intr_establish;
717 	sc->sc_pc.pc_intr_disestablish = dwpcie_intr_disestablish;
718 
719 	memset(&pba, 0, sizeof(pba));
720 	pba.pba_busname = "pci";
721 	pba.pba_iot = &sc->sc_bus_iot;
722 	pba.pba_memt = &sc->sc_bus_memt;
723 	pba.pba_dmat = sc->sc_dmat;
724 	pba.pba_pc = &sc->sc_pc;
725 	pba.pba_domain = pci_ndomains++;
726 	pba.pba_bus = sc->sc_bus;
727 	if (OF_is_compatible(sc->sc_node, "baikal,bm1000-pcie") ||
728 	    OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie") ||
729 	    OF_getproplen(sc->sc_node, "msi-map") > 0 ||
730 	    sc->sc_msi_addr)
731 		pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
732 
733 	/* XXX No working MSI on RK3588 yet. */
734 	if (OF_is_compatible(sc->sc_node, "rockchip,rk3588-pcie"))
735 		pba.pba_flags &= ~PCI_FLAGS_MSI_ENABLED;
736 
737 	pci_dopm = 1;
738 
739 	config_found(self, &pba, NULL);
740 }
741 
742 void
743 dwpcie_link_config(struct dwpcie_softc *sc)
744 {
745 	uint32_t mode, width, reg;
746 	int lanes;
747 
748 	lanes = OF_getpropint(sc->sc_node, "num-lanes", 0);
749 
750 	switch (lanes) {
751 	case 1:
752 		mode = PCIE_PORT_LINK_CTRL_LANES_1;
753 		width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_1;
754 		break;
755 	case 2:
756 		mode = PCIE_PORT_LINK_CTRL_LANES_2;
757 		width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_2;
758 		break;
759 	case 4:
760 		mode = PCIE_PORT_LINK_CTRL_LANES_4;
761 		width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_4;
762 		break;
763 	case 8:
764 		mode = PCIE_PORT_LINK_CTRL_LANES_8;
765 		width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_8;
766 		break;
767 	default:
768 		printf("%s: %d lanes not supported\n", __func__, lanes);
769 		return;
770 	}
771 
772 	reg = HREAD4(sc, PCIE_PORT_LINK_CTRL);
773 	reg &= ~PCIE_PORT_LINK_CTRL_LANES_MASK;
774 	reg |= mode;
775 	HWRITE4(sc, PCIE_PORT_LINK_CTRL, reg);
776 
777 	reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
778 	reg &= ~PCIE_LINK_WIDTH_SPEED_CTRL_LANES_MASK;
779 	reg |= width;
780 	HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
781 
782 	reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
783 	reg |= PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE;
784 	HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
785 }
786 
787 int
788 dwpcie_msi_intr(void *arg)
789 {
790 	struct dwpcie_softc *sc = arg;
791 	struct dwpcie_msi *dm;
792 	uint32_t status;
793 	int vec, s;
794 
795 	status = HREAD4(sc, PCIE_MSI_INTR0_STATUS);
796 	if (status == 0)
797 		return 0;
798 
799 	HWRITE4(sc, PCIE_MSI_INTR0_STATUS, status);
800 	while (status) {
801 		vec = ffs(status) - 1;
802 		status &= ~(1U << vec);
803 
804 		dm = &sc->sc_msi[vec];
805 		if (dm->dm_func == NULL)
806 			continue;
807 
808 		if ((dm->dm_flags & IPL_MPSAFE) == 0)
809 			KERNEL_LOCK();
810 		s = splraise(dm->dm_ipl);
811 		if (dm->dm_func(dm->dm_arg))
812 			dm->dm_count.ec_count++;
813 		splx(s);
814 		if ((dm->dm_flags & IPL_MPSAFE) == 0)
815 			KERNEL_UNLOCK();
816 	}
817 
818 	return 1;
819 }
820 
821 int
822 dwpcie_msi_init(struct dwpcie_softc *sc)
823 {
824 	bus_dma_segment_t seg;
825 	bus_dmamap_t map;
826 	uint64_t addr;
827 	int error, rseg;
828 
829 	/*
830 	 * Allocate some DMA memory such that we have a "safe" target
831 	 * address for MSIs.
832 	 */
833 	error = bus_dmamem_alloc(sc->sc_dmat, sizeof(uint32_t),
834 	    sizeof(uint32_t), 0, &seg, 1, &rseg, BUS_DMA_WAITOK);
835 	if (error)
836 		return error;
837 
838 	/*
839 	 * Translate the CPU address into a bus address that we can
840 	 * program into the hardware.
841 	 */
842 	error = bus_dmamap_create(sc->sc_dmat, sizeof(uint32_t), 1,
843 	    sizeof(uint32_t), 0, BUS_DMA_WAITOK, &map);
844 	if (error) {
845 		bus_dmamem_free(sc->sc_dmat, &seg, 1);
846 		return error;
847 	}
848 	error = bus_dmamap_load_raw(sc->sc_dmat, map, &seg, 1,
849 	    sizeof(uint32_t), BUS_DMA_WAITOK);
850 	if (error) {
851 		bus_dmamap_destroy(sc->sc_dmat, map);
852 		bus_dmamem_free(sc->sc_dmat, &seg, 1);
853 		return error;
854 	}
855 
856 	addr = map->dm_segs[0].ds_addr;
857 	HWRITE4(sc, PCIE_MSI_ADDR_LO, addr);
858 	HWRITE4(sc, PCIE_MSI_ADDR_HI, addr >> 32);
859 
860 	bus_dmamap_unload(sc->sc_dmat, map);
861 	bus_dmamap_destroy(sc->sc_dmat, map);
862 
863 	/* Enable, mask and clear all MSIs. */
864 	HWRITE4(sc, PCIE_MSI_INTR0_ENABLE, 0xffffffff);
865 	HWRITE4(sc, PCIE_MSI_INTR0_MASK, 0xffffffff);
866 	HWRITE4(sc, PCIE_MSI_INTR0_STATUS, 0xffffffff);
867 
868 	KASSERT(sc->sc_ih == NULL);
869 	sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_BIO | IPL_MPSAFE,
870 	    dwpcie_msi_intr, sc, sc->sc_dev.dv_xname);
871 	if (sc->sc_ih == NULL) {
872 		bus_dmamem_free(sc->sc_dmat, &seg, 1);
873 		return EINVAL;
874 	}
875 
876 	/*
877 	 * Hold on to the DMA memory such that nobody can use it to
878 	 * actually do DMA transfers.
879 	 */
880 
881 	sc->sc_msi_addr = addr;
882 	return 0;
883 }
884 
885 int
886 dwpcie_armada8k_init(struct dwpcie_softc *sc)
887 {
888 	uint32_t reg;
889 	int timo;
890 
891 	clock_enable_all(sc->sc_node);
892 
893 	dwpcie_link_config(sc);
894 
895 	if (!dwpcie_armada8k_link_up(sc)) {
896 		reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
897 		reg &= ~PCIE_GLOBAL_CTRL_APP_LTSSM_EN;
898 		HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
899 	}
900 
901 	/*
902 	 * Setup Requester-ID to Stream-ID mapping
903 	 * XXX: TF-A is supposed to set this up, but doesn't!
904 	 */
905 	HWRITE4(sc, PCIE_STREAMID, PCIE_STREAMID_8040);
906 
907 	/* Enable Root Complex mode. */
908 	reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
909 	reg &= ~PCIE_GLOBAL_CTRL_DEVICE_TYPE_MASK;
910 	reg |= PCIE_GLOBAL_CTRL_DEVICE_TYPE_RC;
911 	HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
912 
913 	HWRITE4(sc, PCIE_ARCACHE_TRC, PCIE_ARCACHE_TRC_DEFAULT);
914 	HWRITE4(sc, PCIE_AWCACHE_TRC, PCIE_AWCACHE_TRC_DEFAULT);
915 	reg = HREAD4(sc, PCIE_ARUSER);
916 	reg &= ~PCIE_AXUSER_DOMAIN_MASK;
917 	reg |= PCIE_AXUSER_DOMAIN_OUTER_SHARABLE;
918 	HWRITE4(sc, PCIE_ARUSER, reg);
919 	reg = HREAD4(sc, PCIE_AWUSER);
920 	reg &= ~PCIE_AXUSER_DOMAIN_MASK;
921 	reg |= PCIE_AXUSER_DOMAIN_OUTER_SHARABLE;
922 	HWRITE4(sc, PCIE_AWUSER, reg);
923 
924 	if (!dwpcie_armada8k_link_up(sc)) {
925 		reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
926 		reg |= PCIE_GLOBAL_CTRL_APP_LTSSM_EN;
927 		HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
928 	}
929 
930 	for (timo = 40; timo > 0; timo--) {
931 		if (dwpcie_armada8k_link_up(sc))
932 			break;
933 		delay(1000);
934 	}
935 	if (timo == 0)
936 		return ETIMEDOUT;
937 
938 	sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_AUDIO | IPL_MPSAFE,
939 	    dwpcie_armada8k_intr, sc, sc->sc_dev.dv_xname);
940 
941 	/* Unmask INTx interrupts. */
942 	HWRITE4(sc, PCIE_GLOBAL_INT_MASK,
943 	    PCIE_GLOBAL_INT_MASK_INT_A | PCIE_GLOBAL_INT_MASK_INT_B |
944 	    PCIE_GLOBAL_INT_MASK_INT_C | PCIE_GLOBAL_INT_MASK_INT_D);
945 
946 	return 0;
947 }
948 
949 int
950 dwpcie_armada8k_link_up(struct dwpcie_softc *sc)
951 {
952 	uint32_t reg, mask;
953 
954 	mask = PCIE_GLOBAL_STATUS_RDLH_LINK_UP;
955 	mask |= PCIE_GLOBAL_STATUS_PHY_LINK_UP;
956 	reg = HREAD4(sc, PCIE_GLOBAL_STATUS);
957 	return ((reg & mask) == mask);
958 }
959 
960 int
961 dwpcie_armada8k_intr(void *arg)
962 {
963 	struct dwpcie_softc *sc = arg;
964 	uint32_t cause;
965 
966 	/* Acknowledge interrupts. */
967 	cause = HREAD4(sc, PCIE_GLOBAL_INT_CAUSE);
968 	HWRITE4(sc, PCIE_GLOBAL_INT_CAUSE, cause);
969 
970 	/* INTx interrupt, so not really ours. */
971 	return 0;
972 }
973 
974 int
975 dwpcie_g12a_init(struct dwpcie_softc *sc)
976 {
977 	uint32_t *reset_gpio;
978 	ssize_t reset_gpiolen;
979 	uint32_t reg;
980 	int error, timo;
981 
982 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
983 	if (reset_gpiolen <= 0)
984 		return ENXIO;
985 
986 	if (bus_space_map(sc->sc_iot, sc->sc_glue_base,
987 	    sc->sc_glue_size, 0, &sc->sc_glue_ioh))
988 		return ENOMEM;
989 
990 	power_domain_enable(sc->sc_node);
991 
992 	phy_enable(sc->sc_node, "pcie");
993 
994 	reset_assert_all(sc->sc_node);
995 	delay(500);
996 	reset_deassert_all(sc->sc_node);
997 	delay(500);
998 
999 	clock_set_frequency(sc->sc_node, "port", 100000000UL);
1000 	clock_enable_all(sc->sc_node);
1001 
1002 	reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1003 	OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
1004 	    reset_gpiolen);
1005 	gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1006 	gpio_controller_set_pin(reset_gpio, 1);
1007 
1008 	dwpcie_link_config(sc);
1009 
1010 	reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CFG0);
1011 	reg |= PCIE_CFG0_APP_LTSSM_EN;
1012 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CFG0, reg);
1013 
1014 	gpio_controller_set_pin(reset_gpio, 1);
1015 	delay(500);
1016 	gpio_controller_set_pin(reset_gpio, 0);
1017 
1018 	free(reset_gpio, M_TEMP, reset_gpiolen);
1019 
1020 	for (timo = 40; timo > 0; timo--) {
1021 		if (dwpcie_g12a_link_up(sc))
1022 			break;
1023 		delay(1000);
1024 	}
1025 	if (timo == 0)
1026 		return ETIMEDOUT;
1027 
1028 	error = dwpcie_msi_init(sc);
1029 	if (error)
1030 		return error;
1031 
1032 	return 0;
1033 }
1034 
1035 int
1036 dwpcie_g12a_link_up(struct dwpcie_softc *sc)
1037 {
1038 	uint32_t reg;
1039 
1040 	reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_STATUS12);
1041 	if ((reg & PCIE_STATUS12_SMLH_LINK_UP) &&
1042 	    (reg & PCIE_STATUS12_RDLH_LINK_UP) &&
1043 	    (reg & PCIE_STATUS12_LTSSM_MASK) == PCIE_STATUS12_LTSSM_UP)
1044 		return 1;
1045 	return 0;
1046 }
1047 
1048 int
1049 dwpcie_imx8mq_init(struct dwpcie_softc *sc)
1050 {
1051 	uint32_t *clkreq_gpio, *disable_gpio, *reset_gpio;
1052 	ssize_t clkreq_gpiolen, disable_gpiolen, reset_gpiolen;
1053 	struct regmap *anatop, *gpr, *phy;
1054 	uint32_t off, reg;
1055 	int error, timo;
1056 
1057 	if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1058 		anatop = regmap_bycompatible("fsl,imx8mm-anatop");
1059 		gpr = regmap_bycompatible("fsl,imx8mm-iomuxc-gpr");
1060 		phy = regmap_bycompatible("fsl,imx7d-pcie-phy");
1061 		KASSERT(phy != NULL);
1062 	} else {
1063 		anatop = regmap_bycompatible("fsl,imx8mq-anatop");
1064 		gpr = regmap_bycompatible("fsl,imx8mq-iomuxc-gpr");
1065 	}
1066 	KASSERT(anatop != NULL);
1067 	KASSERT(gpr != NULL);
1068 
1069 	clkreq_gpiolen = OF_getproplen(sc->sc_node, "clkreq-gpio");
1070 	disable_gpiolen = OF_getproplen(sc->sc_node, "disable-gpio");
1071 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpio");
1072 
1073 	if (clkreq_gpiolen > 0) {
1074 		clkreq_gpio = malloc(clkreq_gpiolen, M_TEMP, M_WAITOK);
1075 		OF_getpropintarray(sc->sc_node, "clkreq-gpio", clkreq_gpio,
1076 		    clkreq_gpiolen);
1077 		gpio_controller_config_pin(clkreq_gpio, GPIO_CONFIG_OUTPUT);
1078 		gpio_controller_set_pin(clkreq_gpio, 1);
1079 	}
1080 
1081 	if (disable_gpiolen > 0) {
1082 		disable_gpio = malloc(disable_gpiolen, M_TEMP, M_WAITOK);
1083 		OF_getpropintarray(sc->sc_node, "disable-gpio", disable_gpio,
1084 		    disable_gpiolen);
1085 		gpio_controller_config_pin(disable_gpio, GPIO_CONFIG_OUTPUT);
1086 		gpio_controller_set_pin(disable_gpio, 0);
1087 	}
1088 
1089 	if (reset_gpiolen > 0) {
1090 		reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1091 		OF_getpropintarray(sc->sc_node, "reset-gpio", reset_gpio,
1092 		    reset_gpiolen);
1093 		gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1094 		gpio_controller_set_pin(reset_gpio, 1);
1095 	}
1096 
1097 	power_domain_enable(sc->sc_node);
1098 	reset_assert(sc->sc_node, "pciephy");
1099 	reset_assert(sc->sc_node, "apps");
1100 
1101 	reg = regmap_read_4(gpr, IOMUXC_GPR12);
1102 	if (OF_getpropint(sc->sc_node, "ctrl-id", 0) == 0) {
1103 		off = IOMUXC_GPR14;
1104 		reg &= ~IMX8MQ_GPR_PCIE1_DEVICE_TYPE_MASK;
1105 		reg |= IMX8MQ_GPR_PCIE1_DEVICE_TYPE_RC;
1106 	} else {
1107 		off = IOMUXC_GPR16;
1108 		reg &= ~IMX8MQ_GPR_PCIE2_DEVICE_TYPE_MASK;
1109 		reg |= IMX8MQ_GPR_PCIE2_DEVICE_TYPE_RC;
1110 	}
1111 	regmap_write_4(gpr, IOMUXC_GPR12, reg);
1112 
1113 	if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1114 		if (OF_getproplen(sc->sc_node, "ext_osc") == 0 ||
1115 		    OF_getpropint(sc->sc_node, "ext_osc", 0)) {
1116 			reg = regmap_read_4(gpr, off);
1117 			reg &= ~(IMX8MQ_GPR_PCIE_REF_USE_PAD |
1118 			    IMX8MM_GPR_PCIE_SSC_EN |
1119 			    IMX8MM_GPR_PCIE_POWER_OFF |
1120 			    IMX8MM_GPR_PCIE_REF_CLK_MASK);
1121 			reg |= (IMX8MM_GPR_PCIE_AUX_EN |
1122 			    IMX8MM_GPR_PCIE_REF_CLK_EXT);
1123 			regmap_write_4(gpr, off, reg);
1124 			delay(100);
1125 			reg = regmap_read_4(gpr, off);
1126 			reg |= IMX8MM_GPR_PCIE_CMN_RST;
1127 			regmap_write_4(gpr, off, reg);
1128 			delay(200);
1129 		} else {
1130 			reg = regmap_read_4(gpr, off);
1131 			reg &= ~(IMX8MQ_GPR_PCIE_REF_USE_PAD |
1132 			    IMX8MM_GPR_PCIE_SSC_EN |
1133 			    IMX8MM_GPR_PCIE_POWER_OFF |
1134 			    IMX8MM_GPR_PCIE_REF_CLK_MASK);
1135 			reg |= (IMX8MM_GPR_PCIE_AUX_EN |
1136 			    IMX8MM_GPR_PCIE_REF_CLK_PLL);
1137 			regmap_write_4(gpr, off, reg);
1138 			delay(100);
1139 			regmap_write_4(phy, IMX8MM_PCIE_PHY_CMN_REG62,
1140 			    IMX8MM_PCIE_PHY_CMN_REG62_PLL_CLK_OUT);
1141 			regmap_write_4(phy, IMX8MM_PCIE_PHY_CMN_REG64,
1142 			    IMX8MM_PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM);
1143 			reg = regmap_read_4(gpr, off);
1144 			reg |= IMX8MM_GPR_PCIE_CMN_RST;
1145 			regmap_write_4(gpr, off, reg);
1146 			delay(200);
1147 			regmap_write_4(phy, IMX8MM_PCIE_PHY_TRSV_REG5,
1148 			    IMX8MM_PCIE_PHY_TRSV_REG5_GEN1_DEEMP);
1149 			regmap_write_4(phy, IMX8MM_PCIE_PHY_TRSV_REG6,
1150 			    IMX8MM_PCIE_PHY_TRSV_REG6_GEN2_DEEMP);
1151 		}
1152 	} else {
1153 		if (OF_getproplen(sc->sc_node, "ext_osc") == 0 ||
1154 		    OF_getpropint(sc->sc_node, "ext_osc", 0)) {
1155 			reg = regmap_read_4(gpr, off);
1156 			reg |= IMX8MQ_GPR_PCIE_REF_USE_PAD;
1157 			regmap_write_4(gpr, off, reg);
1158 		} else {
1159 			reg = regmap_read_4(gpr, off);
1160 			reg &= ~IMX8MQ_GPR_PCIE_REF_USE_PAD;
1161 			regmap_write_4(gpr, off, reg);
1162 
1163 			regmap_write_4(anatop, ANATOP_PLLOUT_CTL,
1164 			    ANATOP_PLLOUT_CTL_CKE |
1165 			    ANATOP_PLLOUT_CTL_SEL_SYSPLL1);
1166 			regmap_write_4(anatop, ANATOP_PLLOUT_DIV,
1167 			    ANATOP_PLLOUT_DIV_SYSPLL1);
1168 		}
1169 	}
1170 
1171 	clock_enable(sc->sc_node, "pcie_phy");
1172 	clock_enable(sc->sc_node, "pcie_bus");
1173 	clock_enable(sc->sc_node, "pcie");
1174 	clock_enable(sc->sc_node, "pcie_aux");
1175 
1176 	/* Allow clocks to stabilize. */
1177 	delay(200);
1178 
1179 	if (reset_gpiolen > 0) {
1180 		gpio_controller_set_pin(reset_gpio, 1);
1181 		delay(100000);
1182 		gpio_controller_set_pin(reset_gpio, 0);
1183 	}
1184 
1185 	reset_deassert(sc->sc_node, "pciephy");
1186 
1187 	if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1188 		for (timo = 2000; timo > 0; timo--) {
1189 			if (regmap_read_4(phy, IMX8MM_PCIE_PHY_CMN_REG75) ==
1190 			    IMX8MM_PCIE_PHY_CMN_REG75_PLL_DONE)
1191 				break;
1192 			delay(10);
1193 		}
1194 		if (timo == 0) {
1195 			error = ETIMEDOUT;
1196 			goto err;
1197 		}
1198 	}
1199 
1200 	reg = HREAD4(sc, 0x100000 + PCIE_RC_LCR);
1201 	reg &= ~PCIE_RC_LCR_L1EL_MASK;
1202 	reg |= PCIE_RC_LCR_L1EL_64US;
1203 	HWRITE4(sc, 0x100000 + PCIE_RC_LCR, reg);
1204 
1205 	dwpcie_link_config(sc);
1206 
1207 	reg = HREAD4(sc, PCIE_RC_LCR);
1208 	reg &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
1209 	reg |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
1210 	HWRITE4(sc, PCIE_RC_LCR, reg);
1211 
1212 	reset_deassert(sc->sc_node, "apps");
1213 
1214 	for (timo = 20000; timo > 0; timo--) {
1215 		if (dwpcie_link_up(sc))
1216 			break;
1217 		delay(10);
1218 	}
1219 	if (timo == 0) {
1220 		error = ETIMEDOUT;
1221 		goto err;
1222 	}
1223 
1224 	if (OF_getpropint(sc->sc_node, "fsl,max-link-speed", 1) >= 2) {
1225 		reg = HREAD4(sc, PCIE_RC_LCR);
1226 		reg &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
1227 		reg |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
1228 		HWRITE4(sc, PCIE_RC_LCR, reg);
1229 
1230 		reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
1231 		reg |= PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE;
1232 		HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
1233 
1234 		for (timo = 20000; timo > 0; timo--) {
1235 			if (dwpcie_link_up(sc))
1236 				break;
1237 			delay(10);
1238 		}
1239 		if (timo == 0) {
1240 			error = ETIMEDOUT;
1241 			goto err;
1242 		}
1243 	}
1244 
1245 	sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_AUDIO | IPL_MPSAFE,
1246 	    dwpcie_imx8mq_intr, sc, sc->sc_dev.dv_xname);
1247 
1248 	/* Unmask INTx interrupts. */
1249 	HWRITE4(sc, PCIE_GLOBAL_INT_MASK,
1250 	    PCIE_GLOBAL_INT_MASK_INT_A | PCIE_GLOBAL_INT_MASK_INT_B |
1251 	    PCIE_GLOBAL_INT_MASK_INT_C | PCIE_GLOBAL_INT_MASK_INT_D);
1252 
1253 	error = 0;
1254 err:
1255 	if (clkreq_gpiolen > 0)
1256 		free(clkreq_gpio, M_TEMP, clkreq_gpiolen);
1257 	if (disable_gpiolen > 0)
1258 		free(disable_gpio, M_TEMP, disable_gpiolen);
1259 	if (reset_gpiolen > 0)
1260 		free(reset_gpio, M_TEMP, reset_gpiolen);
1261 	return error;
1262 }
1263 
1264 int
1265 dwpcie_imx8mq_intr(void *arg)
1266 {
1267 	struct dwpcie_softc *sc = arg;
1268 	uint32_t cause;
1269 
1270 	/* Acknowledge interrupts. */
1271 	cause = HREAD4(sc, PCIE_GLOBAL_INT_CAUSE);
1272 	HWRITE4(sc, PCIE_GLOBAL_INT_CAUSE, cause);
1273 
1274 	/* INTx interrupt, so not really ours. */
1275 	return 0;
1276 }
1277 
1278 int
1279 dwpcie_fu740_init(struct dwpcie_softc *sc)
1280 {
1281 	sc->sc_num_viewport = 8;
1282 
1283 	return 0;
1284 }
1285 
1286 int
1287 dwpcie_rk3568_link_up(struct dwpcie_softc *sc)
1288 {
1289 	uint32_t reg;
1290 
1291 	reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh,
1292 	    PCIE_CLIENT_LTSSM_STATUS);
1293 	if ((reg & PCIE_CLIENT_SMLH_LINK_UP) &&
1294 	    (reg & PCIE_CLIENT_RDLH_LINK_UP) &&
1295 	    (reg & PCIE_CLIENT_LTSSM_MASK) == PCIE_CLIENT_LTSSM_UP)
1296 		return 1;
1297 	return 0;
1298 }
1299 
1300 int
1301 dwpcie_rk3568_init(struct dwpcie_softc *sc)
1302 {
1303 	uint32_t *reset_gpio;
1304 	ssize_t reset_gpiolen;
1305 	int error, idx, node;
1306 	int pin, timo;
1307 
1308 	sc->sc_num_viewport = 8;
1309 
1310 	if (bus_space_map(sc->sc_iot, sc->sc_glue_base,
1311 	    sc->sc_glue_size, 0, &sc->sc_glue_ioh))
1312 		return ENOMEM;
1313 
1314 	reset_assert_all(sc->sc_node);
1315 	/* Power must be enabled before initializing the PHY. */
1316 	regulator_enable(OF_getpropint(sc->sc_node, "vpcie3v3-supply", 0));
1317 	phy_enable(sc->sc_node, "pcie-phy");
1318 	reset_deassert_all(sc->sc_node);
1319 
1320 	clock_enable_all(sc->sc_node);
1321 
1322 	if (dwpcie_rk3568_link_up(sc))
1323 		return 0;
1324 
1325 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
1326 	if (reset_gpiolen > 0) {
1327 		reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1328 		OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
1329 		    reset_gpiolen);
1330 		gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1331 		gpio_controller_set_pin(reset_gpio, 1);
1332 	}
1333 
1334 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1335 	    PCIE_CLIENT_HOT_RESET_CTRL, PCIE_CLIENT_APP_LTSSM_ENABLE_ENHANCE);
1336 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1337 	    PCIE_CLIENT_GENERAL_CON, PCIE_CLIENT_DEV_TYPE_RC);
1338 
1339 	/* Assert PERST#. */
1340 	if (reset_gpiolen > 0)
1341 		gpio_controller_set_pin(reset_gpio, 0);
1342 
1343 	dwpcie_link_config(sc);
1344 
1345 	/* Enable LTSSM. */
1346 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CLIENT_GENERAL_CON,
1347 	    PCIE_CLIENT_LINK_REQ_RST_GRT | PCIE_CLIENT_APP_LTSSM_ENABLE);
1348 
1349 	/*
1350 	 * PERST# must remain asserted for at least 100us after the
1351 	 * reference clock becomes stable.  But also has to remain
1352 	 * active at least 100ms after power up.  Since we may have
1353 	 * just powered on the device, play it safe and use 100ms.
1354 	 */
1355 	delay(100000);
1356 
1357 	/* Deassert PERST#. */
1358 	if (reset_gpiolen > 0)
1359 		gpio_controller_set_pin(reset_gpio, 1);
1360 
1361 	/* Wait for the link to come up. */
1362 	for (timo = 100; timo > 0; timo--) {
1363 		if (dwpcie_rk3568_link_up(sc))
1364 			break;
1365 		delay(10000);
1366 	}
1367 	if (timo == 0) {
1368 		error = ETIMEDOUT;
1369 		goto err;
1370 	}
1371 
1372 	node = OF_getnodebyname(sc->sc_node, "legacy-interrupt-controller");
1373 	idx = OF_getindex(sc->sc_node, "legacy", "interrupt-names");
1374 	if (node && idx != -1) {
1375 		sc->sc_ih = fdt_intr_establish_idx(sc->sc_node, idx,
1376 		    IPL_BIO | IPL_MPSAFE, dwpcie_rk3568_intr, sc,
1377 		    sc->sc_dev.dv_xname);
1378 	}
1379 
1380 	if (sc->sc_ih) {
1381 		for (pin = 0; pin < nitems(sc->sc_intx); pin++)
1382 			TAILQ_INIT(&sc->sc_intx[pin]);
1383 		sc->sc_ic.ic_node = node;
1384 		sc->sc_ic.ic_cookie = sc;
1385 		sc->sc_ic.ic_establish = dwpcie_rk3568_intr_establish;
1386 		sc->sc_ic.ic_disestablish = dwpcie_rk3568_intr_disestablish;
1387 		sc->sc_ic.ic_barrier = dwpcie_rk3568_intr_barrier;
1388 		fdt_intr_register(&sc->sc_ic);
1389 	}
1390 
1391 	error = 0;
1392 err:
1393 	if (reset_gpiolen > 0)
1394 		free(reset_gpio, M_TEMP, reset_gpiolen);
1395 
1396 	return error;
1397 }
1398 
1399 int
1400 dwpcie_rk3568_intr(void *arg)
1401 {
1402 	struct dwpcie_softc *sc = arg;
1403 	struct dwpcie_intx *di;
1404 	uint32_t status;
1405 	int pin, s;
1406 
1407 	status = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh,
1408 	    PCIE_CLIENT_INTR_STATUS_LEGACY);
1409 	for (pin = 0; pin < nitems(sc->sc_intx); pin++) {
1410 		if ((status & (1 << pin)) == 0)
1411 			continue;
1412 
1413 		TAILQ_FOREACH(di, &sc->sc_intx[pin], di_next) {
1414 			if ((di->di_flags & IPL_MPSAFE) == 0)
1415 				KERNEL_LOCK();
1416 			s = splraise(di->di_ipl);
1417 			if (di->di_func(di->di_arg))
1418 				di->di_count.ec_count++;
1419 			splx(s);
1420 			if ((di->di_flags & IPL_MPSAFE) == 0)
1421 				KERNEL_UNLOCK();
1422 		}
1423 	}
1424 
1425 	return 1;
1426 }
1427 
1428 void *
1429 dwpcie_rk3568_intr_establish(void *cookie, int *cell, int level,
1430     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1431 {
1432 	struct dwpcie_softc *sc = (struct dwpcie_softc *)cookie;
1433 	struct dwpcie_intx *di;
1434 	int pin = cell[0];
1435 	uint32_t mask = (1U << pin);
1436 
1437 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
1438 		return NULL;
1439 
1440 	if (pin < 0 || pin >= nitems(sc->sc_intx))
1441 		return NULL;
1442 
1443 	/* Mask the interrupt. */
1444 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1445 	    PCIE_CLIENT_INTR_MASK_LEGACY, (mask << 16) | mask);
1446 	intr_barrier(sc->sc_ih);
1447 
1448 	di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO);
1449 	di->di_func = func;
1450 	di->di_arg = arg;
1451 	di->di_ipl = level & IPL_IRQMASK;
1452 	di->di_flags = level & IPL_FLAGMASK;
1453 	di->di_pin = pin;
1454 	di->di_name = name;
1455 	if (name != NULL)
1456 		evcount_attach(&di->di_count, name, &di->di_pin);
1457 	di->di_sc = sc;
1458 	TAILQ_INSERT_TAIL(&sc->sc_intx[pin], di, di_next);
1459 
1460 	/* Unmask the interrupt. */
1461 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1462 	    PCIE_CLIENT_INTR_MASK_LEGACY, mask << 16);
1463 
1464 	return di;
1465 }
1466 
1467 void
1468 dwpcie_rk3568_intr_disestablish(void *cookie)
1469 {
1470 	struct dwpcie_intx *di = cookie;
1471 	struct dwpcie_softc *sc = di->di_sc;
1472 	uint32_t mask = (1U << di->di_pin);
1473 
1474 	/* Mask the interrupt. */
1475 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1476 	    PCIE_CLIENT_INTR_MASK_LEGACY, (mask << 16) | mask);
1477 	intr_barrier(sc->sc_ih);
1478 
1479 	if (di->di_name)
1480 		evcount_detach(&di->di_count);
1481 
1482 	TAILQ_REMOVE(&sc->sc_intx[di->di_pin], di, di_next);
1483 
1484 	if (!TAILQ_EMPTY(&sc->sc_intx[di->di_pin])) {
1485 		/* Unmask the interrupt. */
1486 		bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1487 		    PCIE_CLIENT_INTR_MASK_LEGACY, mask << 16);
1488 	}
1489 
1490 	free(di, M_DEVBUF, sizeof(*di));
1491 }
1492 
1493 void
1494 dwpcie_rk3568_intr_barrier(void *cookie)
1495 {
1496 	struct dwpcie_intx *di = cookie;
1497 	struct dwpcie_softc *sc = di->di_sc;
1498 
1499 	intr_barrier(sc->sc_ih);
1500 }
1501 
1502 int
1503 dwpcie_sc8280xp_init(struct dwpcie_softc *sc)
1504 {
1505 	sc->sc_num_viewport = 8;
1506 
1507 	return 0;
1508 }
1509 
1510 void
1511 dwpcie_atu_write(struct dwpcie_softc *sc, int index, off_t reg,
1512     uint32_t val)
1513 {
1514 	if (sc->sc_atu_unroll) {
1515 		bus_space_write_4(sc->sc_iot, sc->sc_atu_ioh,
1516 		    IATU_OFFSET_UNROLL(index) + reg, val);
1517 		return;
1518 	}
1519 
1520 	if (sc->sc_atu_viewport != index) {
1521 		HWRITE4(sc, IATU_VIEWPORT, index);
1522 		sc->sc_atu_viewport = index;
1523 	}
1524 
1525 	HWRITE4(sc, IATU_OFFSET_VIEWPORT + reg, val);
1526 }
1527 
1528 uint32_t
1529 dwpcie_atu_read(struct dwpcie_softc *sc, int index, off_t reg)
1530 {
1531 	if (sc->sc_atu_unroll) {
1532 		return bus_space_read_4(sc->sc_iot, sc->sc_atu_ioh,
1533 		    IATU_OFFSET_UNROLL(index) + reg);
1534 	}
1535 
1536 	if (sc->sc_atu_viewport != index) {
1537 		HWRITE4(sc, IATU_VIEWPORT, index);
1538 		sc->sc_atu_viewport = index;
1539 	}
1540 
1541 	return HREAD4(sc, IATU_OFFSET_VIEWPORT + reg);
1542 }
1543 
1544 void
1545 dwpcie_atu_disable(struct dwpcie_softc *sc, int index)
1546 {
1547 	dwpcie_atu_write(sc, index, IATU_REGION_CTRL_2, 0);
1548 }
1549 
1550 void
1551 dwpcie_atu_config(struct dwpcie_softc *sc, int index, int type,
1552     uint64_t cpu_addr, uint64_t pci_addr, uint64_t size)
1553 {
1554 	uint32_t reg;
1555 	int timo;
1556 
1557 	dwpcie_atu_write(sc, index, IATU_LWR_BASE_ADDR, cpu_addr);
1558 	dwpcie_atu_write(sc, index, IATU_UPPER_BASE_ADDR, cpu_addr >> 32);
1559 	dwpcie_atu_write(sc, index, IATU_LIMIT_ADDR, cpu_addr + size - 1);
1560 	dwpcie_atu_write(sc, index, IATU_LWR_TARGET_ADDR, pci_addr);
1561 	dwpcie_atu_write(sc, index, IATU_UPPER_TARGET_ADDR, pci_addr >> 32);
1562 	dwpcie_atu_write(sc, index, IATU_REGION_CTRL_1, type);
1563 	dwpcie_atu_write(sc, index, IATU_REGION_CTRL_2,
1564 	    IATU_REGION_CTRL_2_REGION_EN);
1565 
1566 	for (timo = 5; timo > 0; timo--) {
1567 		reg = dwpcie_atu_read(sc, index, IATU_REGION_CTRL_2);
1568 		if (reg & IATU_REGION_CTRL_2_REGION_EN)
1569 			break;
1570 		delay(9000);
1571 	}
1572 	if (timo == 0)
1573 		printf("%s:%d: timeout\n", __func__, __LINE__);
1574 }
1575 
1576 int
1577 dwpcie_link_up(struct dwpcie_softc *sc)
1578 {
1579 	uint32_t reg;
1580 
1581 	reg = HREAD4(sc, PCIE_PHY_DEBUG_R1);
1582 	if ((reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) != 0 &&
1583 	    (reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING) == 0)
1584 		return 1;
1585 	return 0;
1586 }
1587 
1588 void
1589 dwpcie_attach_hook(struct device *parent, struct device *self,
1590     struct pcibus_attach_args *pba)
1591 {
1592 }
1593 
1594 int
1595 dwpcie_bus_maxdevs(void *v, int bus)
1596 {
1597 	struct dwpcie_softc *sc = v;
1598 
1599 	if (bus == sc->sc_bus || bus == sc->sc_bus + 1)
1600 		return 1;
1601 	return 32;
1602 }
1603 
1604 int
1605 dwpcie_find_node(int node, int bus, int device, int function)
1606 {
1607 	uint32_t reg[5];
1608 	uint32_t phys_hi;
1609 	int child;
1610 
1611 	phys_hi = ((bus << 16) | (device << 11) | (function << 8));
1612 
1613 	for (child = OF_child(node); child; child = OF_peer(child)) {
1614 		if (OF_getpropintarray(child, "reg",
1615 		    reg, sizeof(reg)) != sizeof(reg))
1616 			continue;
1617 
1618 		if (reg[0] == phys_hi)
1619 			return child;
1620 
1621 		node = dwpcie_find_node(child, bus, device, function);
1622 		if (node)
1623 			return node;
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 pcitag_t
1630 dwpcie_make_tag(void *v, int bus, int device, int function)
1631 {
1632 	struct dwpcie_softc *sc = v;
1633 	int node;
1634 
1635 	node = dwpcie_find_node(sc->sc_node, bus, device, function);
1636 	return (((pcitag_t)node << 32) |
1637 	    (bus << 24) | (device << 19) | (function << 16));
1638 }
1639 
1640 void
1641 dwpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
1642 {
1643 	if (bp != NULL)
1644 		*bp = (tag >> 24) & 0xff;
1645 	if (dp != NULL)
1646 		*dp = (tag >> 19) & 0x1f;
1647 	if (fp != NULL)
1648 		*fp = (tag >> 16) & 0x7;
1649 }
1650 
1651 int
1652 dwpcie_conf_size(void *v, pcitag_t tag)
1653 {
1654 	return PCIE_CONFIG_SPACE_SIZE;
1655 }
1656 
1657 pcireg_t
1658 dwpcie_conf_read(void *v, pcitag_t tag, int reg)
1659 {
1660 	struct dwpcie_softc *sc = v;
1661 	int bus, dev, fn;
1662 	uint32_t ret;
1663 
1664 	dwpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
1665 	if (bus == sc->sc_bus) {
1666 		KASSERT(dev == 0);
1667 		tag = dwpcie_make_tag(sc, 0, dev, fn);
1668 		return HREAD4(sc, PCITAG_OFFSET(tag) | reg);
1669 	}
1670 
1671 	if (bus == sc->sc_bus + 1) {
1672 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1673 		    IATU_REGION_CTRL_1_TYPE_CFG0,
1674 		    sc->sc_conf_base, PCITAG_OFFSET(tag),
1675 		    sc->sc_conf_size);
1676 	} else {
1677 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1678 		    IATU_REGION_CTRL_1_TYPE_CFG1,
1679 		    sc->sc_conf_base, PCITAG_OFFSET(tag),
1680 		    sc->sc_conf_size);
1681 	}
1682 
1683 	ret = bus_space_read_4(sc->sc_iot, sc->sc_conf_ioh, reg);
1684 
1685 	if (sc->sc_num_viewport <= 2 && sc->sc_io_size > 0) {
1686 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1687 		    IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
1688 		    sc->sc_io_bus_addr, sc->sc_io_size);
1689 	}
1690 
1691 	return ret;
1692 }
1693 
1694 void
1695 dwpcie_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data)
1696 {
1697 	struct dwpcie_softc *sc = v;
1698 	int bus, dev, fn;
1699 
1700 	dwpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
1701 	if (bus == sc->sc_bus) {
1702 		KASSERT(dev == 0);
1703 		tag = dwpcie_make_tag(sc, 0, dev, fn);
1704 		HWRITE4(sc, PCITAG_OFFSET(tag) | reg, data);
1705 		return;
1706 	}
1707 
1708 	if (bus == sc->sc_bus + 1) {
1709 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1710 		    IATU_REGION_CTRL_1_TYPE_CFG0,
1711 		    sc->sc_conf_base, PCITAG_OFFSET(tag),
1712 		    sc->sc_conf_size);
1713 	} else {
1714 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1715 		    IATU_REGION_CTRL_1_TYPE_CFG1,
1716 		    sc->sc_conf_base, PCITAG_OFFSET(tag),
1717 		    sc->sc_conf_size);
1718 	}
1719 
1720 	bus_space_write_4(sc->sc_iot, sc->sc_conf_ioh, reg, data);
1721 
1722 	if (sc->sc_num_viewport <= 2 && sc->sc_io_size > 0) {
1723 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1724 		    IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
1725 		    sc->sc_io_bus_addr, sc->sc_io_size);
1726 	}
1727 }
1728 
1729 int
1730 dwpcie_probe_device_hook(void *v, struct pci_attach_args *pa)
1731 {
1732 	struct dwpcie_softc *sc = v;
1733 	uint16_t rid;
1734 	int i;
1735 
1736 	rid = pci_requester_id(pa->pa_pc, pa->pa_tag);
1737 	pa->pa_dmat = iommu_device_map_pci(sc->sc_node, rid, pa->pa_dmat);
1738 
1739 	for (i = 0; i < sc->sc_nranges; i++) {
1740 		iommu_reserve_region_pci(sc->sc_node, rid,
1741 		    sc->sc_ranges[i].pci_base, sc->sc_ranges[i].size);
1742 	}
1743 
1744 	return 0;
1745 }
1746 
1747 int
1748 dwpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
1749 {
1750 	int pin = pa->pa_rawintrpin;
1751 
1752 	if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX)
1753 		return -1;
1754 
1755 	if (pa->pa_tag == 0)
1756 		return -1;
1757 
1758 	ihp->ih_pc = pa->pa_pc;
1759 	ihp->ih_tag = pa->pa_intrtag;
1760 	ihp->ih_intrpin = pa->pa_intrpin;
1761 	ihp->ih_type = PCI_INTX;
1762 
1763 	return 0;
1764 }
1765 
1766 const char *
1767 dwpcie_intr_string(void *v, pci_intr_handle_t ih)
1768 {
1769 	switch (ih.ih_type) {
1770 	case PCI_MSI:
1771 		return "msi";
1772 	case PCI_MSIX:
1773 		return "msix";
1774 	}
1775 
1776 	return "intx";
1777 }
1778 
1779 struct dwpcie_msi *
1780 dwpcie_msi_establish(struct dwpcie_softc *sc, int level,
1781     int (*func)(void *), void *arg, char *name)
1782 {
1783 	struct dwpcie_msi *dm;
1784 	int vec;
1785 
1786 	for (vec = 0; vec < DWPCIE_NUM_MSI; vec++) {
1787 		dm = &sc->sc_msi[vec];
1788 		if (dm->dm_func == NULL)
1789 			break;
1790 	}
1791 	if (vec == DWPCIE_NUM_MSI)
1792 		return NULL;
1793 
1794 	dm->dm_func = func;
1795 	dm->dm_arg = arg;
1796 	dm->dm_ipl = level & IPL_IRQMASK;
1797 	dm->dm_flags = level & IPL_FLAGMASK;
1798 	dm->dm_vec = vec;
1799 	dm->dm_name = name;
1800 	if (name != NULL)
1801 		evcount_attach(&dm->dm_count, name, &dm->dm_vec);
1802 
1803 	/* Unmask the MSI. */
1804 	HCLR4(sc, PCIE_MSI_INTR0_MASK, (1U << vec));
1805 
1806 	return dm;
1807 }
1808 
1809 void
1810 dwpcie_msi_disestablish(struct dwpcie_softc *sc, struct dwpcie_msi *dm)
1811 {
1812 	/* Mask the MSI. */
1813 	HSET4(sc, PCIE_MSI_INTR0_MASK, (1U << dm->dm_vec));
1814 
1815 	if (dm->dm_name)
1816 		evcount_detach(&dm->dm_count);
1817 	dm->dm_func = NULL;
1818 }
1819 
1820 void *
1821 dwpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
1822     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1823 {
1824 	struct dwpcie_softc *sc = v;
1825 	struct dwpcie_intr_handle *pih;
1826 	void *cookie = NULL;
1827 
1828 	KASSERT(ih.ih_type != PCI_NONE);
1829 
1830 	if (ih.ih_type != PCI_INTX) {
1831 		struct dwpcie_msi *dm = NULL;
1832 		bus_dma_tag_t dmat = ih.ih_dmat;
1833 		bus_dma_segment_t seg;
1834 		bus_dmamap_t map;
1835 		uint64_t addr, data;
1836 
1837 		if (sc->sc_msi_addr) {
1838 			dm = dwpcie_msi_establish(sc, level, func, arg, name);
1839 			if (dm == NULL)
1840 				return NULL;
1841 			addr = sc->sc_msi_addr;
1842 			data = dm->dm_vec;
1843 		} else {
1844 			/*
1845 			 * Assume hardware passes Requester ID as
1846 			 * sideband data.
1847 			 */
1848 			data = pci_requester_id(ih.ih_pc, ih.ih_tag);
1849 			cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
1850 			    &data, level, ci, func, arg, (void *)name);
1851 			if (cookie == NULL)
1852 				return NULL;
1853 		}
1854 
1855 		pih = malloc(sizeof(*pih), M_DEVBUF, M_WAITOK | M_ZERO);
1856 		pih->pih_ih.ih_ic = &dwpcie_ic;
1857 		pih->pih_ih.ih_ih = cookie;
1858 		pih->pih_sc = sc;
1859 		pih->pih_dm = dm;
1860 
1861 		if (sc->sc_msi_addr == 0) {
1862 			if (bus_dmamap_create(dmat, sizeof(uint32_t), 1,
1863 			    sizeof(uint32_t), 0, BUS_DMA_WAITOK, &map)) {
1864 				free(pih, M_DEVBUF, sizeof(*pih));
1865 				fdt_intr_disestablish(cookie);
1866 				return NULL;
1867 			}
1868 
1869 			memset(&seg, 0, sizeof(seg));
1870 			seg.ds_addr = addr;
1871 			seg.ds_len = sizeof(uint32_t);
1872 
1873 			if (bus_dmamap_load_raw(dmat, map, &seg, 1,
1874 			    sizeof(uint32_t), BUS_DMA_WAITOK)) {
1875 				bus_dmamap_destroy(dmat, map);
1876 				free(pih, M_DEVBUF, sizeof(*pih));
1877 				fdt_intr_disestablish(cookie);
1878 				return NULL;
1879 			}
1880 
1881 			addr = map->dm_segs[0].ds_addr;
1882 			pih->pih_dmat = dmat;
1883 			pih->pih_map = map;
1884 		}
1885 
1886 		if (ih.ih_type == PCI_MSIX) {
1887 			pci_msix_enable(ih.ih_pc, ih.ih_tag,
1888 			    &sc->sc_bus_memt, ih.ih_intrpin, addr, data);
1889 		} else
1890 			pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data);
1891 	} else {
1892 		int bus, dev, fn;
1893 		uint32_t reg[4];
1894 
1895 		dwpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn);
1896 
1897 		reg[0] = bus << 16 | dev << 11 | fn << 8;
1898 		reg[1] = reg[2] = 0;
1899 		reg[3] = ih.ih_intrpin;
1900 
1901 		cookie = fdt_intr_establish_imap_cpu(sc->sc_node, reg,
1902 		    sizeof(reg), level, ci, func, arg, name);
1903 		if (cookie == NULL)
1904 			return NULL;
1905 
1906 		pih = malloc(sizeof(*pih), M_DEVBUF, M_WAITOK | M_ZERO);
1907 		pih->pih_ih.ih_ic = &dwpcie_ic;
1908 		pih->pih_ih.ih_ih = cookie;
1909 	}
1910 
1911 	return pih;
1912 }
1913 
1914 void
1915 dwpcie_intr_disestablish(void *v, void *cookie)
1916 {
1917 	struct dwpcie_intr_handle *pih = cookie;
1918 
1919 	if (pih->pih_dm)
1920 		dwpcie_msi_disestablish(pih->pih_sc, pih->pih_dm);
1921 	else
1922 		fdt_intr_disestablish(pih->pih_ih.ih_ih);
1923 
1924 	if (pih->pih_dmat) {
1925 		bus_dmamap_unload(pih->pih_dmat, pih->pih_map);
1926 		bus_dmamap_destroy(pih->pih_dmat, pih->pih_map);
1927 	}
1928 
1929 	free(pih, M_DEVBUF, sizeof(*pih));
1930 }
1931 
1932 int
1933 dwpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
1934     int flags, bus_space_handle_t *bshp)
1935 {
1936 	struct dwpcie_softc *sc = t->bus_private;
1937 	int i;
1938 
1939 	for (i = 0; i < sc->sc_nranges; i++) {
1940 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
1941 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
1942 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
1943 
1944 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
1945 		    addr >= pci_start && addr + size <= pci_end) {
1946 			return bus_space_map(sc->sc_iot,
1947 			    addr - pci_start + phys_start, size, flags, bshp);
1948 		}
1949 	}
1950 
1951 	return ENXIO;
1952 }
1953 
1954 int
1955 dwpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
1956     int flags, bus_space_handle_t *bshp)
1957 {
1958 	struct dwpcie_softc *sc = t->bus_private;
1959 	int i;
1960 
1961 	for (i = 0; i < sc->sc_nranges; i++) {
1962 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
1963 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
1964 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
1965 
1966 		if ((sc->sc_ranges[i].flags & 0x02000000) == 0x02000000 &&
1967 		    addr >= pci_start && addr + size <= pci_end) {
1968 			return bus_space_map(sc->sc_iot,
1969 			    addr - pci_start + phys_start, size, flags, bshp);
1970 		}
1971 	}
1972 
1973 	return ENXIO;
1974 }
1975