xref: /openbsd-src/sys/dev/fdt/mvkpcie.c (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: mvkpcie.c,v 1.9 2021/02/25 23:07:49 patrick Exp $	*/
2 /*
3  * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24 #include <sys/evcount.h>
25 
26 #include <machine/intr.h>
27 #include <machine/bus.h>
28 #include <machine/fdt.h>
29 
30 #include <dev/pci/pcidevs.h>
31 #include <dev/pci/pcireg.h>
32 #include <dev/pci/pcivar.h>
33 #include <dev/pci/ppbreg.h>
34 
35 #include <dev/ofw/openfirm.h>
36 #include <dev/ofw/ofw_clock.h>
37 #include <dev/ofw/ofw_gpio.h>
38 #include <dev/ofw/ofw_misc.h>
39 #include <dev/ofw/ofw_pinctrl.h>
40 #include <dev/ofw/ofw_power.h>
41 #include <dev/ofw/fdt.h>
42 
43 /* Registers */
44 #define PCIE_DEV_ID			0x0000
45 #define PCIE_CMD			0x0004
46 #define PCIE_DEV_REV			0x0008
47 #define PCIE_DEV_CTRL_STATS		0x00c8
48 #define  PCIE_DEV_CTRL_STATS_SNOOP		(1 << 1)
49 #define  PCIE_DEV_CTRL_STATS_RELAX_ORDER	(1 << 4)
50 #define  PCIE_DEV_CTRL_STATS_MAX_PAYLOAD_7	(0x7 << 5)
51 #define  PCIE_DEV_CTRL_STATS_MAX_RD_REQ_SZ	(0x2 << 12)
52 #define PCIE_LINK_CTRL_STAT		0x00d0
53 #define  PCIE_LINK_CTRL_STAT_LINK_L0S_ENTRY	(1 << 0)
54 #define  PCIE_LINK_CTRL_STAT_LINK_TRAINING	(1 << 5)
55 #define  PCIE_LINK_CTRL_STAT_LINK_WIDTH_1	(1 << 20)
56 #define PCIE_ERR_CAPCTL			0x0118
57 #define  PCIE_ERR_CAPCTL_ECRC_CHK_TX		(1 << 5)
58 #define  PCIE_ERR_CAPCTL_ECRC_CHK_TX_EN		(1 << 6)
59 #define  PCIE_ERR_CAPCTL_ECRC_CHCK		(1 << 7)
60 #define  PCIE_ERR_CAPCTL_ECRC_CHCK_RCV		(1 << 8)
61 #define PIO_CTRL			0x4000
62 #define  PIO_CTRL_TYPE_MASK			(0xf << 0)
63 #define  PIO_CTRL_TYPE_RD0			(0x8 << 0)
64 #define  PIO_CTRL_TYPE_RD1			(0x9 << 0)
65 #define  PIO_CTRL_TYPE_WR0			(0xa << 0)
66 #define  PIO_CTRL_TYPE_WR1			(0xb << 0)
67 #define  PIO_CTRL_ADDR_WIN_DISABLE		(1 << 24)
68 #define PIO_STAT			0x4004
69 #define  PIO_STAT_COMP_STATUS			(0x7 << 7)
70 #define PIO_ADDR_LS			0x4008
71 #define PIO_ADDR_MS			0x400c
72 #define PIO_WR_DATA			0x4010
73 #define PIO_WR_DATA_STRB		0x4014
74 #define  PIO_WR_DATA_STRB_VALUE			0xf
75 #define PIO_RD_DATA			0x4018
76 #define PIO_START			0x401c
77 #define  PIO_START_STOP				(0 << 0)
78 #define  PIO_START_START			(1 << 0)
79 #define PIO_ISR				0x4020
80 #define  PIO_ISR_CLEAR				(1 << 0)
81 #define PIO_ISRM			0x4024
82 #define PCIE_CORE_CTRL0			0x4800
83 #define  PCIE_CORE_CTRL0_GEN_1			(0 << 0)
84 #define  PCIE_CORE_CTRL0_GEN_2			(1 << 0)
85 #define  PCIE_CORE_CTRL0_GEN_3			(2 << 0)
86 #define  PCIE_CORE_CTRL0_GEN_MASK		(0x3 << 0)
87 #define  PCIE_CORE_CTRL0_IS_RC			(1 << 2)
88 #define  PCIE_CORE_CTRL0_LANE_1			(0 << 3)
89 #define  PCIE_CORE_CTRL0_LANE_2			(1 << 3)
90 #define  PCIE_CORE_CTRL0_LANE_4			(2 << 3)
91 #define  PCIE_CORE_CTRL0_LANE_8			(3 << 3)
92 #define  PCIE_CORE_CTRL0_LANE_MASK		(0x3 << 3)
93 #define  PCIE_CORE_CTRL0_LINK_TRAINING		(1 << 6)
94 #define PCIE_CORE_CTRL2			0x4808
95 #define  PCIE_CORE_CTRL2_RESERVED		(0x7 << 0)
96 #define  PCIE_CORE_CTRL2_TD_ENABLE		(1 << 4)
97 #define  PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE	(1 << 5)
98 #define  PCIE_CORE_CTRL2_OB_WIN_ENABLE		(1 << 6)
99 #define  PCIE_CORE_CTRL2_MSI_ENABLE		(1 << 10)
100 #define PCIE_CORE_ISR0_STATUS		0x4840
101 #define PCIE_CORE_ISR0_MASK		0x4844
102 #define  PCIE_CORE_ISR0_MASK_MSI_INT		(1 << 24)
103 #define  PCIE_CORE_ISR0_MASK_ALL		0x07ffffff
104 #define PCIE_CORE_ISR1_STATUS		0x4848
105 #define PCIE_CORE_ISR1_MASK		0x484c
106 #define  PCIE_CORE_ISR1_MASK_ALL		0x00000ff0
107 #define  PCIE_CORE_ISR1_MASK_INTX(x)		(1 << (x + 8))
108 #define PCIE_CORE_MSI_ADDR_LOW		0x4850
109 #define PCIE_CORE_MSI_ADDR_HIGH		0x4854
110 #define PCIE_CORE_MSI_STATUS		0x4858
111 #define PCIE_CORE_MSI_MASK		0x485c
112 #define PCIE_CORE_MSI_PAYLOAD		0x489c
113 #define LMI_CFG				0x6000
114 #define  LMI_CFG_LTSSM_VAL(x)			(((x) >> 24) & 0x3f)
115 #define  LMI_CFG_LTSSM_L0			0x10
116 #define CTRL_CORE_CONFIG		0x18000
117 #define  CTRL_CORE_CONFIG_MODE_DIRECT		(0 << 0)
118 #define  CTRL_CORE_CONFIG_MODE_COMMAND		(1 << 0)
119 #define  CTRL_CORE_CONFIG_MODE_MASK		(1 << 0)
120 #define HOST_CTRL_INT_STATUS		0x1b000
121 #define HOST_CTRL_INT_MASK		0x1b004
122 #define  HOST_CTRL_INT_MASK_CORE_INT		(1 << 16)
123 #define  HOST_CTRL_INT_MASK_ALL			0xfff0fb
124 
125 #define HREAD4(sc, reg)							\
126 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
127 #define HWRITE4(sc, reg, val)						\
128 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
129 #define HSET4(sc, reg, bits)						\
130 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
131 #define HCLR4(sc, reg, bits)						\
132 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
133 
134 struct mvkpcie_dmamem {
135 	bus_dmamap_t		mdm_map;
136 	bus_dma_segment_t	mdm_seg;
137 	size_t			mdm_size;
138 	caddr_t			mdm_kva;
139 };
140 
141 #define MVKPCIE_DMA_MAP(_mdm)	((_mdm)->mdm_map)
142 #define MVKPCIE_DMA_LEN(_mdm)	((_mdm)->mdm_size)
143 #define MVKPCIE_DMA_DVA(_mdm)	((uint64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
144 #define MVKPCIE_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
145 
146 struct intrhand {
147 	int (*ih_func)(void *);		/* handler */
148 	void *ih_arg;			/* arg for handler */
149 	int ih_ipl;			/* IPL_* */
150 	int ih_irq;			/* IRQ number */
151 	struct evcount	ih_count;
152 	char *ih_name;
153 	void *ih_sc;
154 };
155 
156 struct mvkpcie_range {
157 	uint32_t		flags;
158 	uint64_t		pci_base;
159 	uint64_t		phys_base;
160 	uint64_t		size;
161 };
162 
163 struct mvkpcie_softc {
164 	struct device		sc_dev;
165 	bus_space_tag_t		sc_iot;
166 	bus_space_handle_t	sc_ioh;
167 	bus_dma_tag_t		sc_dmat;
168 
169 	bus_addr_t		sc_io_base;
170 	bus_addr_t		sc_io_bus_addr;
171 	bus_size_t		sc_io_size;
172 	bus_addr_t		sc_mem_base;
173 	bus_addr_t		sc_mem_bus_addr;
174 	bus_size_t		sc_mem_size;
175 
176 	int			sc_node;
177 	int			sc_acells;
178 	int			sc_scells;
179 	int			sc_pacells;
180 	int			sc_pscells;
181 	struct mvkpcie_range	*sc_ranges;
182 	int			sc_nranges;
183 
184 	struct bus_space	sc_bus_iot;
185 	struct bus_space	sc_bus_memt;
186 
187 	struct arm64_pci_chipset sc_pc;
188 	int			sc_bus;
189 
190 	uint32_t		sc_bridge_command;
191 	uint32_t		sc_bridge_businfo;
192 	uint32_t		sc_bridge_iostatus;
193 	uint32_t		sc_bridge_io_hi;
194 	uint32_t		sc_bridge_mem;
195 
196 	struct interrupt_controller sc_ic;
197 	struct intrhand		*sc_intx_handlers[4];
198 	struct interrupt_controller sc_msi_ic;
199 	struct intrhand		*sc_msi_handlers[32];
200 	struct mvkpcie_dmamem	*sc_msi_addr;
201 	void			*sc_ih;
202 	int			sc_ipl;
203 };
204 
205 int mvkpcie_match(struct device *, void *, void *);
206 void mvkpcie_attach(struct device *, struct device *, void *);
207 
208 struct cfattach mvkpcie_ca = {
209 	sizeof (struct mvkpcie_softc), mvkpcie_match, mvkpcie_attach
210 };
211 
212 struct cfdriver mvkpcie_cd = {
213 	NULL, "mvkpcie", DV_DULL
214 };
215 
216 int
217 mvkpcie_match(struct device *parent, void *match, void *aux)
218 {
219 	struct fdt_attach_args *faa = aux;
220 
221 	return OF_is_compatible(faa->fa_node, "marvell,armada-3700-pcie");
222 }
223 
224 int	mvkpcie_link_up(struct mvkpcie_softc *);
225 
226 void	mvkpcie_attach_hook(struct device *, struct device *,
227 	    struct pcibus_attach_args *);
228 int	mvkpcie_bus_maxdevs(void *, int);
229 pcitag_t mvkpcie_make_tag(void *, int, int, int);
230 void	mvkpcie_decompose_tag(void *, pcitag_t, int *, int *, int *);
231 int	mvkpcie_conf_size(void *, pcitag_t);
232 pcireg_t mvkpcie_conf_read(void *, pcitag_t, int);
233 void	mvkpcie_conf_write(void *, pcitag_t, int, pcireg_t);
234 int	mvkpcie_probe_device_hook(void *, struct pci_attach_args *);
235 
236 int	mvkpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *);
237 const char *mvkpcie_intr_string(void *, pci_intr_handle_t);
238 void	*mvkpcie_intr_establish(void *, pci_intr_handle_t, int,
239 	    struct cpu_info *, int (*)(void *), void *, char *);
240 void	mvkpcie_intr_disestablish(void *, void *);
241 
242 int	mvkpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
243 	    bus_space_handle_t *);
244 int	mvkpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
245 	    bus_space_handle_t *);
246 
247 int	mvkpcie_intc_intr(void *);
248 void	*mvkpcie_intc_intr_establish(void *, int *, int, struct cpu_info *,
249 	    int (*)(void *), void *, char *);
250 void	mvkpcie_intc_intr_disestablish(void *);
251 void	*mvkpcie_intc_intr_establish_msi(void *, uint64_t *, uint64_t *,
252 	    int , struct cpu_info *, int (*)(void *), void *, char *);
253 void	mvkpcie_intc_intr_disestablish_msi(void *);
254 void	mvkpcie_intc_intr_barrier(void *);
255 void	mvkpcie_intc_recalc_ipl(struct mvkpcie_softc *);
256 
257 struct mvkpcie_dmamem *mvkpcie_dmamem_alloc(struct mvkpcie_softc *, bus_size_t,
258 	    bus_size_t);
259 void	mvkpcie_dmamem_free(struct mvkpcie_softc *, struct mvkpcie_dmamem *);
260 
261 void
262 mvkpcie_attach(struct device *parent, struct device *self, void *aux)
263 {
264 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)self;
265 	struct fdt_attach_args *faa = aux;
266 	struct pcibus_attach_args pba;
267 	uint32_t *reset_gpio;
268 	ssize_t reset_gpiolen;
269 	bus_addr_t iobase, iolimit;
270 	bus_addr_t membase, memlimit;
271 	uint32_t bus_range[2];
272 	uint32_t *ranges;
273 	int i, j, nranges, rangeslen;
274 	pcireg_t csr, bir, blr;
275 	uint32_t reg;
276 	int node;
277 	int timo;
278 
279 	if (faa->fa_nreg < 1) {
280 		printf(": no registers\n");
281 		return;
282 	}
283 
284 	sc->sc_iot = faa->fa_iot;
285 	sc->sc_dmat = faa->fa_dmat;
286 	sc->sc_node = faa->fa_node;
287 
288 	sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
289 	    faa->fa_acells);
290 	sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
291 	    faa->fa_scells);
292 	sc->sc_pacells = faa->fa_acells;
293 	sc->sc_pscells = faa->fa_scells;
294 
295 	rangeslen = OF_getproplen(sc->sc_node, "ranges");
296 	if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) ||
297 	     (rangeslen / sizeof(uint32_t)) % (sc->sc_acells +
298 	     sc->sc_pacells + sc->sc_scells)) {
299 		printf(": invalid ranges property\n");
300 		return;
301 	}
302 
303 	sc->sc_msi_addr = mvkpcie_dmamem_alloc(sc, sizeof(uint16_t),
304 	    sizeof(uint64_t));
305 	if (sc->sc_msi_addr == NULL) {
306 		printf(": cannot allocate MSI address\n");
307 		return;
308 	}
309 
310 	ranges = malloc(rangeslen, M_TEMP, M_WAITOK);
311 	OF_getpropintarray(sc->sc_node, "ranges", ranges,
312 	    rangeslen);
313 
314 	nranges = (rangeslen / sizeof(uint32_t)) /
315 	    (sc->sc_acells + sc->sc_pacells + sc->sc_scells);
316 	sc->sc_ranges = mallocarray(nranges,
317 	    sizeof(struct mvkpcie_range), M_TEMP, M_WAITOK);
318 	sc->sc_nranges = nranges;
319 
320 	for (i = 0, j = 0; i < sc->sc_nranges; i++) {
321 		sc->sc_ranges[i].flags = ranges[j++];
322 		sc->sc_ranges[i].pci_base = ranges[j++];
323 		if (sc->sc_acells - 1 == 2) {
324 			sc->sc_ranges[i].pci_base <<= 32;
325 			sc->sc_ranges[i].pci_base |= ranges[j++];
326 		}
327 		sc->sc_ranges[i].phys_base = ranges[j++];
328 		if (sc->sc_pacells == 2) {
329 			sc->sc_ranges[i].phys_base <<= 32;
330 			sc->sc_ranges[i].phys_base |= ranges[j++];
331 		}
332 		sc->sc_ranges[i].size = ranges[j++];
333 		if (sc->sc_scells == 2) {
334 			sc->sc_ranges[i].size <<= 32;
335 			sc->sc_ranges[i].size |= ranges[j++];
336 		}
337 	}
338 
339 	free(ranges, M_TEMP, rangeslen);
340 
341 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
342 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
343 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
344 		    sizeof(struct mvkpcie_range));
345 		printf(": can't map ctrl registers\n");
346 		return;
347 	}
348 
349 	printf("\n");
350 
351 	pinctrl_byname(sc->sc_node, "default");
352 
353 	clock_set_assigned(sc->sc_node);
354 	clock_enable_all(sc->sc_node);
355 
356 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
357 	if (reset_gpiolen > 0) {
358 		/* Link training needs to be disabled during PCIe reset. */
359 		HCLR4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_LINK_TRAINING);
360 
361 		reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
362 		OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
363 		    reset_gpiolen);
364 
365 		/* Issue PCIe reset. */
366 		gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
367 		gpio_controller_set_pin(reset_gpio, 1);
368 		delay(10000);
369 		gpio_controller_set_pin(reset_gpio, 0);
370 
371 		free(reset_gpio, M_TEMP, reset_gpiolen);
372 	}
373 
374 	reg = HREAD4(sc, CTRL_CORE_CONFIG);
375 	reg &= ~CTRL_CORE_CONFIG_MODE_MASK;
376 	reg |= CTRL_CORE_CONFIG_MODE_DIRECT;
377 	HWRITE4(sc, CTRL_CORE_CONFIG, reg);
378 
379 	HSET4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_IS_RC);
380 
381 	HWRITE4(sc, PCIE_ERR_CAPCTL,
382 	    PCIE_ERR_CAPCTL_ECRC_CHK_TX |
383 	    PCIE_ERR_CAPCTL_ECRC_CHK_TX_EN |
384 	    PCIE_ERR_CAPCTL_ECRC_CHCK |
385 	    PCIE_ERR_CAPCTL_ECRC_CHCK_RCV);
386 
387 	HWRITE4(sc, PCIE_DEV_CTRL_STATS,
388 	    PCIE_DEV_CTRL_STATS_MAX_PAYLOAD_7 |
389 	    PCIE_DEV_CTRL_STATS_MAX_RD_REQ_SZ);
390 
391 	HWRITE4(sc, PCIE_CORE_CTRL2,
392 	    PCIE_CORE_CTRL2_RESERVED |
393 	    PCIE_CORE_CTRL2_TD_ENABLE);
394 
395 	reg = HREAD4(sc, PCIE_CORE_CTRL0);
396 	reg &= ~PCIE_CORE_CTRL0_GEN_MASK;
397 	reg |= PCIE_CORE_CTRL0_GEN_2;
398 	HWRITE4(sc, PCIE_CORE_CTRL0, reg);
399 
400 	reg = HREAD4(sc, PCIE_CORE_CTRL0);
401 	reg &= ~PCIE_CORE_CTRL0_LANE_MASK;
402 	reg |= PCIE_CORE_CTRL0_LANE_1;
403 	HWRITE4(sc, PCIE_CORE_CTRL0, reg);
404 
405 	HSET4(sc, PCIE_CORE_CTRL2, PCIE_CORE_CTRL2_MSI_ENABLE);
406 
407 	HWRITE4(sc, PCIE_CORE_ISR0_STATUS, PCIE_CORE_ISR0_MASK_ALL);
408 	HWRITE4(sc, PCIE_CORE_ISR1_STATUS, PCIE_CORE_ISR1_MASK_ALL);
409 	HWRITE4(sc, HOST_CTRL_INT_STATUS, HOST_CTRL_INT_MASK_ALL);
410 
411 	HWRITE4(sc, PCIE_CORE_ISR0_MASK, PCIE_CORE_ISR0_MASK_ALL &
412 	    ~PCIE_CORE_ISR0_MASK_MSI_INT);
413 	HWRITE4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_ALL);
414 	HWRITE4(sc, PCIE_CORE_MSI_MASK, 0);
415 	HWRITE4(sc, HOST_CTRL_INT_MASK, HOST_CTRL_INT_MASK_ALL &
416 	    ~HOST_CTRL_INT_MASK_CORE_INT);
417 
418 	HSET4(sc, PCIE_CORE_CTRL2, PCIE_CORE_CTRL2_OB_WIN_ENABLE);
419 	HSET4(sc, PIO_CTRL, PIO_CTRL_ADDR_WIN_DISABLE);
420 
421 	delay(100 * 1000);
422 
423 	HSET4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_LINK_TRAINING);
424 	HSET4(sc, PCIE_LINK_CTRL_STAT, PCIE_LINK_CTRL_STAT_LINK_TRAINING);
425 
426 	for (timo = 40; timo > 0; timo--) {
427 		if (mvkpcie_link_up(sc))
428 			break;
429 		delay(1000);
430 	}
431 	if (timo == 0) {
432 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
433 		return;
434 	}
435 
436 	HWRITE4(sc, PCIE_LINK_CTRL_STAT,
437 	    PCIE_LINK_CTRL_STAT_LINK_L0S_ENTRY |
438 	    PCIE_LINK_CTRL_STAT_LINK_WIDTH_1);
439 
440 	HSET4(sc, PCIE_CMD, PCI_COMMAND_IO_ENABLE |
441 	    PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE);
442 
443 	HWRITE4(sc, PCIE_CORE_MSI_ADDR_LOW,
444 	    MVKPCIE_DMA_DVA(sc->sc_msi_addr) & 0xffffffff);
445 	HWRITE4(sc, PCIE_CORE_MSI_ADDR_HIGH,
446 	    MVKPCIE_DMA_DVA(sc->sc_msi_addr) >> 32);
447 
448 	/* Set up address translation for I/O space. */
449 	sc->sc_io_bus_addr = sc->sc_mem_bus_addr = -1;
450 	for (i = 0; i < sc->sc_nranges; i++) {
451 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
452 		    sc->sc_ranges[i].size > 0) {
453 			sc->sc_io_base = sc->sc_ranges[i].phys_base;
454 			sc->sc_io_bus_addr = sc->sc_ranges[i].pci_base;
455 			sc->sc_io_size = sc->sc_ranges[i].size;
456 		}
457 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
458 		    sc->sc_ranges[i].size > 0) {
459 			sc->sc_mem_base = sc->sc_ranges[i].phys_base;
460 			sc->sc_mem_bus_addr = sc->sc_ranges[i].pci_base;
461 			sc->sc_mem_size = sc->sc_ranges[i].size;
462 		}
463 	}
464 
465 	/* Set up bus range. */
466 	if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range,
467 	    sizeof(bus_range)) != sizeof(bus_range) ||
468 	    bus_range[0] >= 256 || bus_range[1] >= 256) {
469 		bus_range[0] = 0;
470 		bus_range[1] = 255;
471 	}
472 	sc->sc_bus = bus_range[0];
473 
474 	/* Initialize command/status. */
475 	csr = PCI_COMMAND_MASTER_ENABLE;
476 	if (sc->sc_io_size > 0)
477 		csr |= PCI_COMMAND_IO_ENABLE;
478 	if (sc->sc_mem_size > 0)
479 		csr |= PCI_COMMAND_MEM_ENABLE;
480 	sc->sc_bridge_command = csr;
481 
482 	/* Initialize bus range. */
483 	bir = bus_range[0];
484 	bir |= ((bus_range[0] + 1) << 8);
485 	bir |= (bus_range[1] << 16);
486 	sc->sc_bridge_businfo = bir;
487 
488 	/* Initialize I/O window. */
489 	iobase = sc->sc_io_bus_addr;
490 	iolimit = iobase + sc->sc_io_size - 1;
491 	blr = (iolimit & PPB_IO_MASK) | (PPB_IO_32BIT << PPB_IOLIMIT_SHIFT);
492 	blr |= ((iobase & PPB_IO_MASK) >> PPB_IO_SHIFT) | PPB_IO_32BIT;
493 	sc->sc_bridge_iostatus = blr;
494 	blr = (iobase & 0xffff0000) >> 16;
495 	blr |= iolimit & 0xffff0000;
496 	sc->sc_bridge_io_hi = blr;
497 
498 	/* Initialize memory mapped I/O window. */
499 	membase = sc->sc_mem_bus_addr;
500 	memlimit = membase + sc->sc_mem_size - 1;
501 	blr = memlimit & PPB_MEM_MASK;
502 	blr |= (membase >> PPB_MEM_SHIFT);
503 	sc->sc_bridge_mem = blr;
504 
505 	memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot));
506 	sc->sc_bus_iot.bus_private = sc;
507 	sc->sc_bus_iot._space_map = mvkpcie_bs_iomap;
508 	memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt));
509 	sc->sc_bus_memt.bus_private = sc;
510 	sc->sc_bus_memt._space_map = mvkpcie_bs_memmap;
511 
512 	sc->sc_pc.pc_conf_v = sc;
513 	sc->sc_pc.pc_attach_hook = mvkpcie_attach_hook;
514 	sc->sc_pc.pc_bus_maxdevs = mvkpcie_bus_maxdevs;
515 	sc->sc_pc.pc_make_tag = mvkpcie_make_tag;
516 	sc->sc_pc.pc_decompose_tag = mvkpcie_decompose_tag;
517 	sc->sc_pc.pc_conf_size = mvkpcie_conf_size;
518 	sc->sc_pc.pc_conf_read = mvkpcie_conf_read;
519 	sc->sc_pc.pc_conf_write = mvkpcie_conf_write;
520 	sc->sc_pc.pc_probe_device_hook = mvkpcie_probe_device_hook;
521 
522 	sc->sc_pc.pc_intr_v = sc;
523 	sc->sc_pc.pc_intr_map = mvkpcie_intr_map;
524 	sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
525 	sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
526 	sc->sc_pc.pc_intr_string = mvkpcie_intr_string;
527 	sc->sc_pc.pc_intr_establish = mvkpcie_intr_establish;
528 	sc->sc_pc.pc_intr_disestablish = mvkpcie_intr_disestablish;
529 
530 	memset(&pba, 0, sizeof(pba));
531 	pba.pba_busname = "pci";
532 	pba.pba_iot = &sc->sc_bus_iot;
533 	pba.pba_memt = &sc->sc_bus_memt;
534 	pba.pba_dmat = faa->fa_dmat;
535 	pba.pba_pc = &sc->sc_pc;
536 	pba.pba_domain = pci_ndomains++;
537 	pba.pba_bus = sc->sc_bus;
538 	pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
539 
540 	node = OF_getnodebyname(faa->fa_node, "interrupt-controller");
541 	if (node) {
542 		sc->sc_ic.ic_node = node;
543 		sc->sc_ic.ic_cookie = self;
544 		sc->sc_ic.ic_establish = mvkpcie_intc_intr_establish;
545 		sc->sc_ic.ic_disestablish = mvkpcie_intc_intr_disestablish;
546 		arm_intr_register_fdt(&sc->sc_ic);
547 	}
548 
549 	sc->sc_msi_ic.ic_node = faa->fa_node;
550 	sc->sc_msi_ic.ic_cookie = self;
551 	sc->sc_msi_ic.ic_establish_msi = mvkpcie_intc_intr_establish_msi;
552 	sc->sc_msi_ic.ic_disestablish = mvkpcie_intc_intr_disestablish_msi;
553 	sc->sc_msi_ic.ic_barrier = mvkpcie_intc_intr_barrier;
554 	arm_intr_register_fdt(&sc->sc_msi_ic);
555 
556 	config_found(self, &pba, NULL);
557 }
558 
559 int
560 mvkpcie_link_up(struct mvkpcie_softc *sc)
561 {
562 	uint32_t reg;
563 
564 	reg = HREAD4(sc, LMI_CFG);
565 	return LMI_CFG_LTSSM_VAL(reg) >= LMI_CFG_LTSSM_L0;
566 }
567 
568 void
569 mvkpcie_attach_hook(struct device *parent, struct device *self,
570     struct pcibus_attach_args *pba)
571 {
572 }
573 
574 int
575 mvkpcie_bus_maxdevs(void *v, int bus)
576 {
577 	struct mvkpcie_softc *sc = v;
578 
579 	if (bus == sc->sc_bus || bus == sc->sc_bus + 1)
580 		return 1;
581 	return 32;
582 }
583 
584 pcitag_t
585 mvkpcie_make_tag(void *v, int bus, int device, int function)
586 {
587 	return ((bus << 20) | (device << 15) | (function << 12));
588 }
589 
590 void
591 mvkpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
592 {
593 	if (bp != NULL)
594 		*bp = (tag >> 20) & 0xff;
595 	if (dp != NULL)
596 		*dp = (tag >> 15) & 0x1f;
597 	if (fp != NULL)
598 		*fp = (tag >> 12) & 0x7;
599 }
600 
601 int
602 mvkpcie_conf_size(void *v, pcitag_t tag)
603 {
604 	return PCIE_CONFIG_SPACE_SIZE;
605 }
606 
607 pcireg_t
608 mvkpcie_conf_read_bridge(struct mvkpcie_softc *sc, int reg)
609 {
610 	switch (reg) {
611 	case PCI_ID_REG:
612 		return PCI_VENDOR_MARVELL |
613 		    (HREAD4(sc, PCIE_DEV_ID) & 0xffff0000);
614 	case PCI_COMMAND_STATUS_REG:
615 		return sc->sc_bridge_command;
616 	case PCI_CLASS_REG:
617 		return PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT |
618 		    PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT |
619 		    (HREAD4(sc, PCIE_DEV_REV) & 0xff);
620 	case PCI_BHLC_REG:
621 		return 1 << PCI_HDRTYPE_SHIFT |
622 		    0x10 << PCI_CACHELINE_SHIFT;
623 	case PPB_REG_BUSINFO:
624 		return sc->sc_bridge_businfo;
625 	case PPB_REG_IOSTATUS:
626 		return sc->sc_bridge_iostatus;
627 	case PPB_REG_MEM:
628 		return sc->sc_bridge_mem;
629 	case PPB_REG_IO_HI:
630 		return sc->sc_bridge_io_hi;
631 	case PPB_REG_PREFMEM:
632 	case PPB_REG_PREFBASE_HI32:
633 	case PPB_REG_PREFLIM_HI32:
634 	case PPB_REG_BRIDGECONTROL:
635 		return 0;
636 	default:
637 		break;
638 	}
639 	return 0;
640 }
641 
642 void
643 mvkpcie_conf_write_bridge(struct mvkpcie_softc *sc, int reg, pcireg_t data)
644 {
645 	/* Treat emulated bridge registers as read-only. */
646 }
647 
648 pcireg_t
649 mvkpcie_conf_read(void *v, pcitag_t tag, int off)
650 {
651 	struct mvkpcie_softc *sc = v;
652 	int bus, dev, fn;
653 	uint32_t reg;
654 	int i;
655 
656 	mvkpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
657 	if (bus == sc->sc_bus) {
658 		KASSERT(dev == 0);
659 		return mvkpcie_conf_read_bridge(sc, off);
660 	}
661 
662 	HWRITE4(sc, PIO_START, PIO_START_STOP);
663 	HWRITE4(sc, PIO_ISR, PIO_ISR_CLEAR);
664 	reg = HREAD4(sc, PIO_CTRL);
665 	reg &= ~PIO_CTRL_TYPE_MASK;
666 	if (bus == sc->sc_bus + 1)
667 		reg |= PIO_CTRL_TYPE_RD0;
668 	else
669 		reg |= PIO_CTRL_TYPE_RD1;
670 	HWRITE4(sc, PIO_CTRL, reg);
671 	HWRITE4(sc, PIO_ADDR_LS, tag | off);
672 	HWRITE4(sc, PIO_ADDR_MS, 0);
673 	HWRITE4(sc, PIO_WR_DATA_STRB, PIO_WR_DATA_STRB_VALUE);
674 	HWRITE4(sc, PIO_START, PIO_START_START);
675 
676 	for (i = 500; i > 0; i--) {
677 		if (HREAD4(sc, PIO_START) == 0 &&
678 		    HREAD4(sc, PIO_ISR) != 0)
679 			break;
680 		delay(2);
681 	}
682 	if (i == 0) {
683 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
684 		return 0xffffffff;
685 	}
686 
687 	return HREAD4(sc, PIO_RD_DATA);
688 }
689 
690 void
691 mvkpcie_conf_write(void *v, pcitag_t tag, int off, pcireg_t data)
692 {
693 	struct mvkpcie_softc *sc = v;
694 	int bus, dev, fn;
695 	uint32_t reg;
696 	int i;
697 
698 	mvkpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
699 	if (bus == sc->sc_bus) {
700 		KASSERT(dev == 0);
701 		mvkpcie_conf_write_bridge(sc, off, data);
702 		return;
703 	}
704 
705 	HWRITE4(sc, PIO_START, PIO_START_STOP);
706 	HWRITE4(sc, PIO_ISR, PIO_ISR_CLEAR);
707 	reg = HREAD4(sc, PIO_CTRL);
708 	reg &= ~PIO_CTRL_TYPE_MASK;
709 	if (bus == sc->sc_bus + 1)
710 		reg |= PIO_CTRL_TYPE_WR0;
711 	else
712 		reg |= PIO_CTRL_TYPE_WR1;
713 	HWRITE4(sc, PIO_CTRL, reg);
714 	HWRITE4(sc, PIO_ADDR_LS, tag | off);
715 	HWRITE4(sc, PIO_ADDR_MS, 0);
716 	HWRITE4(sc, PIO_WR_DATA, data);
717 	HWRITE4(sc, PIO_WR_DATA_STRB, PIO_WR_DATA_STRB_VALUE);
718 	HWRITE4(sc, PIO_START, PIO_START_START);
719 
720 	for (i = 500; i > 0; i--) {
721 		if (HREAD4(sc, PIO_START) == 0 &&
722 		    HREAD4(sc, PIO_ISR) != 0)
723 			break;
724 		delay(2);
725 	}
726 	if (i == 0) {
727 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
728 		return;
729 	}
730 }
731 
732 int
733 mvkpcie_probe_device_hook(void *v, struct pci_attach_args *pa)
734 {
735 	return 0;
736 }
737 
738 int
739 mvkpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
740 {
741 	int pin = pa->pa_rawintrpin;
742 
743 	if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX)
744 		return -1;
745 
746 	if (pa->pa_tag == 0)
747 		return -1;
748 
749 	ihp->ih_pc = pa->pa_pc;
750 	ihp->ih_tag = pa->pa_intrtag;
751 	ihp->ih_intrpin = pa->pa_intrpin;
752 	ihp->ih_type = PCI_INTX;
753 
754 	return 0;
755 }
756 
757 const char *
758 mvkpcie_intr_string(void *v, pci_intr_handle_t ih)
759 {
760 	switch (ih.ih_type) {
761 	case PCI_MSI:
762 		return "msi";
763 	case PCI_MSIX:
764 		return "msix";
765 	}
766 
767 	return "intx";
768 }
769 
770 void *
771 mvkpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
772     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
773 {
774 	struct mvkpcie_softc *sc = v;
775 	void *cookie;
776 
777 	KASSERT(ih.ih_type != PCI_NONE);
778 
779 	if (ih.ih_type != PCI_INTX) {
780 		uint64_t addr, data;
781 
782 		/* Assume hardware passes Requester ID as sideband data. */
783 		data = pci_requester_id(ih.ih_pc, ih.ih_tag);
784 		cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
785 		    &data, level, ci, func, arg, (void *)name);
786 		if (cookie == NULL)
787 			return NULL;
788 
789 		/* TODO: translate address to the PCI device's view */
790 
791 		if (ih.ih_type == PCI_MSIX) {
792 			pci_msix_enable(ih.ih_pc, ih.ih_tag,
793 			    &sc->sc_bus_memt, ih.ih_intrpin, addr, data);
794 		} else
795 			pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data);
796 	} else {
797 		int bus, dev, fn;
798 		uint32_t reg[4];
799 
800 		mvkpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn);
801 
802 		reg[0] = bus << 16 | dev << 11 | fn << 8;
803 		reg[1] = reg[2] = 0;
804 		reg[3] = ih.ih_intrpin;
805 
806 		cookie = fdt_intr_establish_imap_cpu(sc->sc_node, reg,
807 		    sizeof(reg), level, ci, func, arg, name);
808 	}
809 
810 	return cookie;
811 }
812 
813 void
814 mvkpcie_intr_disestablish(void *v, void *cookie)
815 {
816 	panic("%s", __func__);
817 }
818 
819 int
820 mvkpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
821     int flags, bus_space_handle_t *bshp)
822 {
823 	struct mvkpcie_softc *sc = t->bus_private;
824 	int i;
825 
826 	for (i = 0; i < sc->sc_nranges; i++) {
827 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
828 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
829 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
830 
831 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
832 		    addr >= pci_start && addr + size <= pci_end) {
833 			return bus_space_map(sc->sc_iot,
834 			    addr - pci_start + phys_start, size, flags, bshp);
835 		}
836 	}
837 
838 	return ENXIO;
839 }
840 
841 int
842 mvkpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
843     int flags, bus_space_handle_t *bshp)
844 {
845 	struct mvkpcie_softc *sc = t->bus_private;
846 	int i;
847 
848 	for (i = 0; i < sc->sc_nranges; i++) {
849 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
850 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
851 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
852 
853 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
854 		    addr >= pci_start && addr + size <= pci_end) {
855 			return bus_space_map(sc->sc_iot,
856 			    addr - pci_start + phys_start, size, flags, bshp);
857 		}
858 	}
859 
860 	return ENXIO;
861 }
862 
863 int
864 mvkpcie_intc_intr(void *cookie)
865 {
866 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
867 	struct intrhand *ih;
868 	uint32_t pending;
869 	int i, s;
870 
871 	if (!(HREAD4(sc, HOST_CTRL_INT_STATUS) & HOST_CTRL_INT_MASK_CORE_INT))
872 		return 0;
873 
874 	if (HREAD4(sc, PCIE_CORE_ISR0_STATUS) & PCIE_CORE_ISR0_MASK_MSI_INT) {
875 		pending = HREAD4(sc, PCIE_CORE_MSI_STATUS);
876 		while (pending) {
877 			i = ffs(pending) - 1;
878 			HWRITE4(sc, PCIE_CORE_MSI_STATUS, (1 << i));
879 			pending &= ~(1 << i);
880 
881 			i = HREAD4(sc, PCIE_CORE_MSI_PAYLOAD) & 0xff;
882 			if ((ih = sc->sc_msi_handlers[i]) != NULL) {
883 				s = splraise(ih->ih_ipl);
884 				if (ih->ih_func(ih->ih_arg))
885 					ih->ih_count.ec_count++;
886 				splx(s);
887 			}
888 		}
889 		HWRITE4(sc, PCIE_CORE_ISR0_STATUS, PCIE_CORE_ISR0_MASK_MSI_INT);
890 	}
891 
892 	pending = HREAD4(sc, PCIE_CORE_ISR1_STATUS);
893 	for (i = 0; i < nitems(sc->sc_intx_handlers); i++) {
894 		if (pending & PCIE_CORE_ISR1_MASK_INTX(i)) {
895 			if ((ih = sc->sc_intx_handlers[i]) != NULL) {
896 				s = splraise(ih->ih_ipl);
897 				if (ih->ih_func(ih->ih_arg))
898 					ih->ih_count.ec_count++;
899 				splx(s);
900 			}
901 		}
902 	}
903 	HWRITE4(sc, PCIE_CORE_ISR1_STATUS, pending);
904 
905 	HWRITE4(sc, HOST_CTRL_INT_STATUS, HOST_CTRL_INT_MASK_CORE_INT);
906 	return 1;
907 }
908 
909 void *
910 mvkpcie_intc_intr_establish(void *cookie, int *cell, int level,
911     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
912 {
913 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
914 	struct intrhand *ih;
915 	int irq = cell[0];
916 	int s;
917 
918 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
919 		return NULL;
920 
921 	if (irq < 0 || irq > nitems(sc->sc_intx_handlers))
922 		return NULL;
923 
924 	/* Don't allow shared interrupts for now. */
925 	if (sc->sc_intx_handlers[irq])
926 		return NULL;
927 
928 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
929 	ih->ih_func = func;
930 	ih->ih_arg = arg;
931 	ih->ih_ipl = level & IPL_IRQMASK;
932 	ih->ih_irq = irq;
933 	ih->ih_name = name;
934 	ih->ih_sc = sc;
935 
936 	s = splhigh();
937 
938 	sc->sc_intx_handlers[irq] = ih;
939 
940 	if (name != NULL)
941 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
942 
943 	mvkpcie_intc_recalc_ipl(sc);
944 
945 	splx(s);
946 
947 	HCLR4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_INTX(irq));
948 
949 	return (ih);
950 }
951 
952 void
953 mvkpcie_intc_intr_disestablish(void *cookie)
954 {
955 	struct intrhand *ih = cookie;
956 	struct mvkpcie_softc *sc = ih->ih_sc;
957 	int s;
958 
959 	HSET4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_INTX(ih->ih_irq));
960 
961 	s = splhigh();
962 
963 	sc->sc_intx_handlers[ih->ih_irq] = NULL;
964 	if (ih->ih_name != NULL)
965 		evcount_detach(&ih->ih_count);
966 	free(ih, M_DEVBUF, sizeof(*ih));
967 
968 	mvkpcie_intc_recalc_ipl(sc);
969 
970 	splx(s);
971 }
972 
973 void *
974 mvkpcie_intc_intr_establish_msi(void *cookie, uint64_t *addr, uint64_t *data,
975     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
976 {
977 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
978 	struct intrhand *ih;
979 	int i, s;
980 
981 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
982 		return NULL;
983 
984 	for (i = 0; i < nitems(sc->sc_msi_handlers); i++) {
985 		if (sc->sc_msi_handlers[i] == NULL)
986 			break;
987 	}
988 
989 	if (i == nitems(sc->sc_msi_handlers))
990 		return NULL;
991 
992 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
993 	ih->ih_func = func;
994 	ih->ih_arg = arg;
995 	ih->ih_ipl = level & IPL_IRQMASK;
996 	ih->ih_irq = i;
997 	ih->ih_name = name;
998 	ih->ih_sc = sc;
999 
1000 	s = splhigh();
1001 
1002 	sc->sc_msi_handlers[i] = ih;
1003 
1004 	if (name != NULL)
1005 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
1006 
1007 	mvkpcie_intc_recalc_ipl(sc);
1008 
1009 	*addr = MVKPCIE_DMA_DVA(sc->sc_msi_addr);
1010 	*data = i;
1011 
1012 	splx(s);
1013 	return (ih);
1014 }
1015 
1016 void
1017 mvkpcie_intc_intr_disestablish_msi(void *cookie)
1018 {
1019 	struct intrhand *ih = cookie;
1020 	struct mvkpcie_softc *sc = ih->ih_sc;
1021 	int s;
1022 
1023 	s = splhigh();
1024 
1025 	sc->sc_msi_handlers[ih->ih_irq] = NULL;
1026 	if (ih->ih_name != NULL)
1027 		evcount_detach(&ih->ih_count);
1028 	free(ih, M_DEVBUF, sizeof(*ih));
1029 
1030 	mvkpcie_intc_recalc_ipl(sc);
1031 
1032 	splx(s);
1033 }
1034 
1035 void
1036 mvkpcie_intc_intr_barrier(void *cookie)
1037 {
1038 	struct intrhand *ih = cookie;
1039 	struct mvkpcie_softc *sc = ih->ih_sc;
1040 
1041 	intr_barrier(sc->sc_ih);
1042 }
1043 
1044 void
1045 mvkpcie_intc_recalc_ipl(struct mvkpcie_softc *sc)
1046 {
1047 	struct intrhand *ih;
1048 	int max = IPL_NONE;
1049 	int min = IPL_HIGH;
1050 	int irq;
1051 
1052 	for (irq = 0; irq < nitems(sc->sc_intx_handlers); irq++) {
1053 		ih = sc->sc_intx_handlers[irq];
1054 		if (ih == NULL)
1055 			continue;
1056 
1057 		if (ih->ih_ipl > max)
1058 			max = ih->ih_ipl;
1059 
1060 		if (ih->ih_ipl < min)
1061 			min = ih->ih_ipl;
1062 	}
1063 
1064 	for (irq = 0; irq < nitems(sc->sc_msi_handlers); irq++) {
1065 		ih = sc->sc_msi_handlers[irq];
1066 		if (ih == NULL)
1067 			continue;
1068 
1069 		if (ih->ih_ipl > max)
1070 			max = ih->ih_ipl;
1071 
1072 		if (ih->ih_ipl < min)
1073 			min = ih->ih_ipl;
1074 	}
1075 
1076 	if (max == IPL_NONE)
1077 		min = IPL_NONE;
1078 
1079 	if (sc->sc_ipl != max) {
1080 		sc->sc_ipl = max;
1081 
1082 		if (sc->sc_ih != NULL)
1083 			fdt_intr_disestablish(sc->sc_ih);
1084 
1085 		if (sc->sc_ipl != IPL_NONE)
1086 			sc->sc_ih = fdt_intr_establish(sc->sc_node, sc->sc_ipl,
1087 			    mvkpcie_intc_intr, sc, sc->sc_dev.dv_xname);
1088 	}
1089 }
1090 
1091 /* Only needed for the 16-bit MSI address */
1092 struct mvkpcie_dmamem *
1093 mvkpcie_dmamem_alloc(struct mvkpcie_softc *sc, bus_size_t size, bus_size_t align)
1094 {
1095 	struct mvkpcie_dmamem *mdm;
1096 	int nsegs;
1097 
1098 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1099 	mdm->mdm_size = size;
1100 
1101 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1102 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1103 		goto mdmfree;
1104 
1105 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1106 	    &nsegs, BUS_DMA_WAITOK) != 0)
1107 		goto destroy;
1108 
1109 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1110 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1111 		goto free;
1112 
1113 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1114 	    NULL, BUS_DMA_WAITOK) != 0)
1115 		goto unmap;
1116 
1117 	bzero(mdm->mdm_kva, size);
1118 
1119 	return (mdm);
1120 
1121 unmap:
1122 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1123 free:
1124 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1125 destroy:
1126 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1127 mdmfree:
1128 	free(mdm, M_DEVBUF, sizeof(*mdm));
1129 
1130 	return (NULL);
1131 }
1132 
1133 void
1134 mvkpcie_dmamem_free(struct mvkpcie_softc *sc, struct mvkpcie_dmamem *mdm)
1135 {
1136 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1137 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1138 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1139 	free(mdm, M_DEVBUF, sizeof(*mdm));
1140 }
1141