xref: /openbsd-src/sys/dev/fdt/mvkpcie.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: mvkpcie.c,v 1.7 2020/07/17 08:07:34 patrick Exp $	*/
2 /*
3  * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24 #include <sys/evcount.h>
25 
26 #include <machine/intr.h>
27 #include <machine/bus.h>
28 #include <machine/fdt.h>
29 
30 #include <dev/pci/pcidevs.h>
31 #include <dev/pci/pcireg.h>
32 #include <dev/pci/pcivar.h>
33 #include <dev/pci/ppbreg.h>
34 
35 #include <dev/ofw/openfirm.h>
36 #include <dev/ofw/ofw_clock.h>
37 #include <dev/ofw/ofw_gpio.h>
38 #include <dev/ofw/ofw_misc.h>
39 #include <dev/ofw/ofw_pinctrl.h>
40 #include <dev/ofw/ofw_power.h>
41 #include <dev/ofw/fdt.h>
42 
43 /* Registers */
44 #define PCIE_DEV_ID			0x0000
45 #define PCIE_CMD			0x0004
46 #define PCIE_DEV_REV			0x0008
47 #define PCIE_DEV_CTRL_STATS		0x00c8
48 #define  PCIE_DEV_CTRL_STATS_SNOOP		(1 << 1)
49 #define  PCIE_DEV_CTRL_STATS_RELAX_ORDER	(1 << 4)
50 #define  PCIE_DEV_CTRL_STATS_MAX_PAYLOAD_7	(0x7 << 5)
51 #define  PCIE_DEV_CTRL_STATS_MAX_RD_REQ_SZ	(0x2 << 12)
52 #define PCIE_LINK_CTRL_STAT		0x00d0
53 #define  PCIE_LINK_CTRL_STAT_LINK_L0S_ENTRY	(1 << 0)
54 #define  PCIE_LINK_CTRL_STAT_LINK_TRAINING	(1 << 5)
55 #define  PCIE_LINK_CTRL_STAT_LINK_WIDTH_1	(1 << 20)
56 #define PCIE_ERR_CAPCTL			0x0118
57 #define  PCIE_ERR_CAPCTL_ECRC_CHK_TX		(1 << 5)
58 #define  PCIE_ERR_CAPCTL_ECRC_CHK_TX_EN		(1 << 6)
59 #define  PCIE_ERR_CAPCTL_ECRC_CHCK		(1 << 7)
60 #define  PCIE_ERR_CAPCTL_ECRC_CHCK_RCV		(1 << 8)
61 #define PIO_CTRL			0x4000
62 #define  PIO_CTRL_TYPE_MASK			(0xf << 0)
63 #define  PIO_CTRL_TYPE_RD0			(0x8 << 0)
64 #define  PIO_CTRL_TYPE_RD1			(0x9 << 0)
65 #define  PIO_CTRL_TYPE_WR0			(0xa << 0)
66 #define  PIO_CTRL_TYPE_WR1			(0xb << 0)
67 #define  PIO_CTRL_ADDR_WIN_DISABLE		(1 << 24)
68 #define PIO_STAT			0x4004
69 #define  PIO_STAT_COMP_STATUS			(0x7 << 7)
70 #define PIO_ADDR_LS			0x4008
71 #define PIO_ADDR_MS			0x400c
72 #define PIO_WR_DATA			0x4010
73 #define PIO_WR_DATA_STRB		0x4014
74 #define  PIO_WR_DATA_STRB_VALUE			0xf
75 #define PIO_RD_DATA			0x4018
76 #define PIO_START			0x401c
77 #define  PIO_START_STOP				(0 << 0)
78 #define  PIO_START_START			(1 << 0)
79 #define PIO_ISR				0x4020
80 #define  PIO_ISR_CLEAR				(1 << 0)
81 #define PIO_ISRM			0x4024
82 #define PCIE_CORE_CTRL0			0x4800
83 #define  PCIE_CORE_CTRL0_GEN_1			(0 << 0)
84 #define  PCIE_CORE_CTRL0_GEN_2			(1 << 0)
85 #define  PCIE_CORE_CTRL0_GEN_3			(2 << 0)
86 #define  PCIE_CORE_CTRL0_GEN_MASK		(0x3 << 0)
87 #define  PCIE_CORE_CTRL0_IS_RC			(1 << 2)
88 #define  PCIE_CORE_CTRL0_LANE_1			(0 << 3)
89 #define  PCIE_CORE_CTRL0_LANE_2			(1 << 3)
90 #define  PCIE_CORE_CTRL0_LANE_4			(2 << 3)
91 #define  PCIE_CORE_CTRL0_LANE_8			(3 << 3)
92 #define  PCIE_CORE_CTRL0_LANE_MASK		(0x3 << 3)
93 #define  PCIE_CORE_CTRL0_LINK_TRAINING		(1 << 6)
94 #define PCIE_CORE_CTRL2			0x4808
95 #define  PCIE_CORE_CTRL2_RESERVED		(0x7 << 0)
96 #define  PCIE_CORE_CTRL2_TD_ENABLE		(1 << 4)
97 #define  PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE	(1 << 5)
98 #define  PCIE_CORE_CTRL2_OB_WIN_ENABLE		(1 << 6)
99 #define  PCIE_CORE_CTRL2_MSI_ENABLE		(1 << 10)
100 #define PCIE_CORE_ISR0_STATUS		0x4840
101 #define PCIE_CORE_ISR0_MASK		0x4844
102 #define  PCIE_CORE_ISR0_MASK_MSI_INT		(1 << 24)
103 #define  PCIE_CORE_ISR0_MASK_ALL		0x07ffffff
104 #define PCIE_CORE_ISR1_STATUS		0x4848
105 #define PCIE_CORE_ISR1_MASK		0x484c
106 #define  PCIE_CORE_ISR1_MASK_ALL		0x00000ff0
107 #define  PCIE_CORE_ISR1_MASK_INTX(x)		(1 << (x + 8))
108 #define PCIE_CORE_MSI_ADDR_LOW		0x4850
109 #define PCIE_CORE_MSI_ADDR_HIGH		0x4854
110 #define PCIE_CORE_MSI_STATUS		0x4858
111 #define PCIE_CORE_MSI_MASK		0x485c
112 #define PCIE_CORE_MSI_PAYLOAD		0x489c
113 #define LMI_CFG				0x6000
114 #define  LMI_CFG_LTSSM_VAL(x)			(((x) >> 24) & 0x3f)
115 #define  LMI_CFG_LTSSM_L0			0x10
116 #define CTRL_CORE_CONFIG		0x18000
117 #define  CTRL_CORE_CONFIG_MODE_DIRECT		(0 << 0)
118 #define  CTRL_CORE_CONFIG_MODE_COMMAND		(1 << 0)
119 #define  CTRL_CORE_CONFIG_MODE_MASK		(1 << 0)
120 #define HOST_CTRL_INT_STATUS		0x1b000
121 #define HOST_CTRL_INT_MASK		0x1b004
122 #define  HOST_CTRL_INT_MASK_CORE_INT		(1 << 16)
123 #define  HOST_CTRL_INT_MASK_ALL			0xfff0fb
124 
125 #define HREAD4(sc, reg)							\
126 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
127 #define HWRITE4(sc, reg, val)						\
128 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
129 #define HSET4(sc, reg, bits)						\
130 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
131 #define HCLR4(sc, reg, bits)						\
132 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
133 
134 struct mvkpcie_dmamem {
135 	bus_dmamap_t		mdm_map;
136 	bus_dma_segment_t	mdm_seg;
137 	size_t			mdm_size;
138 	caddr_t			mdm_kva;
139 };
140 
141 #define MVKPCIE_DMA_MAP(_mdm)	((_mdm)->mdm_map)
142 #define MVKPCIE_DMA_LEN(_mdm)	((_mdm)->mdm_size)
143 #define MVKPCIE_DMA_DVA(_mdm)	((uint64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
144 #define MVKPCIE_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
145 
146 struct intrhand {
147 	int (*ih_func)(void *);		/* handler */
148 	void *ih_arg;			/* arg for handler */
149 	int ih_ipl;			/* IPL_* */
150 	int ih_irq;			/* IRQ number */
151 	struct evcount	ih_count;
152 	char *ih_name;
153 	void *ih_sc;
154 };
155 
156 struct mvkpcie_range {
157 	uint32_t		flags;
158 	uint64_t		pci_base;
159 	uint64_t		phys_base;
160 	uint64_t		size;
161 };
162 
163 struct mvkpcie_softc {
164 	struct device		sc_dev;
165 	bus_space_tag_t		sc_iot;
166 	bus_space_handle_t	sc_ioh;
167 	bus_dma_tag_t		sc_dmat;
168 
169 	bus_addr_t		sc_io_base;
170 	bus_addr_t		sc_io_bus_addr;
171 	bus_size_t		sc_io_size;
172 	bus_addr_t		sc_mem_base;
173 	bus_addr_t		sc_mem_bus_addr;
174 	bus_size_t		sc_mem_size;
175 
176 	int			sc_node;
177 	int			sc_acells;
178 	int			sc_scells;
179 	int			sc_pacells;
180 	int			sc_pscells;
181 	struct mvkpcie_range	*sc_ranges;
182 	int			sc_nranges;
183 
184 	struct bus_space	sc_bus_iot;
185 	struct bus_space	sc_bus_memt;
186 
187 	struct arm64_pci_chipset sc_pc;
188 	int			sc_bus;
189 
190 	uint32_t		sc_bridge_command;
191 	uint32_t		sc_bridge_businfo;
192 	uint32_t		sc_bridge_iostatus;
193 	uint32_t		sc_bridge_io_hi;
194 	uint32_t		sc_bridge_mem;
195 
196 	struct interrupt_controller sc_msi_ic;
197 	struct intrhand		*sc_msi_handlers[32];
198 	struct mvkpcie_dmamem	*sc_msi_addr;
199 	void			*sc_ih;
200 	int			sc_ipl;
201 };
202 
203 int mvkpcie_match(struct device *, void *, void *);
204 void mvkpcie_attach(struct device *, struct device *, void *);
205 
206 struct cfattach mvkpcie_ca = {
207 	sizeof (struct mvkpcie_softc), mvkpcie_match, mvkpcie_attach
208 };
209 
210 struct cfdriver mvkpcie_cd = {
211 	NULL, "mvkpcie", DV_DULL
212 };
213 
214 int
215 mvkpcie_match(struct device *parent, void *match, void *aux)
216 {
217 	struct fdt_attach_args *faa = aux;
218 
219 	return OF_is_compatible(faa->fa_node, "marvell,armada-3700-pcie");
220 }
221 
222 int	mvkpcie_link_up(struct mvkpcie_softc *);
223 
224 void	mvkpcie_attach_hook(struct device *, struct device *,
225 	    struct pcibus_attach_args *);
226 int	mvkpcie_bus_maxdevs(void *, int);
227 pcitag_t mvkpcie_make_tag(void *, int, int, int);
228 void	mvkpcie_decompose_tag(void *, pcitag_t, int *, int *, int *);
229 int	mvkpcie_conf_size(void *, pcitag_t);
230 pcireg_t mvkpcie_conf_read(void *, pcitag_t, int);
231 void	mvkpcie_conf_write(void *, pcitag_t, int, pcireg_t);
232 
233 int	mvkpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *);
234 const char *mvkpcie_intr_string(void *, pci_intr_handle_t);
235 void	*mvkpcie_intr_establish(void *, pci_intr_handle_t, int,
236 	    struct cpu_info *, int (*)(void *), void *, char *);
237 void	mvkpcie_intr_disestablish(void *, void *);
238 
239 int	mvkpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
240 	    bus_space_handle_t *);
241 int	mvkpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
242 	    bus_space_handle_t *);
243 
244 int	mvkpcie_intc_intr(void *);
245 void	*mvkpcie_intc_intr_establish_msi(void *, uint64_t *, uint64_t *,
246 	    int , struct cpu_info *, int (*)(void *), void *, char *);
247 void	mvkpcie_intc_intr_disestablish_msi(void *);
248 void	mvkpcie_intc_intr_barrier(void *);
249 void	mvkpcie_intc_recalc_ipl(struct mvkpcie_softc *);
250 
251 struct mvkpcie_dmamem *mvkpcie_dmamem_alloc(struct mvkpcie_softc *, bus_size_t,
252 	    bus_size_t);
253 void	mvkpcie_dmamem_free(struct mvkpcie_softc *, struct mvkpcie_dmamem *);
254 
255 void
256 mvkpcie_attach(struct device *parent, struct device *self, void *aux)
257 {
258 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)self;
259 	struct fdt_attach_args *faa = aux;
260 	struct pcibus_attach_args pba;
261 	uint32_t *reset_gpio;
262 	ssize_t reset_gpiolen;
263 	bus_addr_t iobase, iolimit;
264 	bus_addr_t membase, memlimit;
265 	uint32_t bus_range[2];
266 	uint32_t *ranges;
267 	int i, j, nranges, rangeslen;
268 	pcireg_t csr, bir, blr;
269 	uint32_t reg;
270 	int timo;
271 
272 	if (faa->fa_nreg < 1) {
273 		printf(": no registers\n");
274 		return;
275 	}
276 
277 	sc->sc_iot = faa->fa_iot;
278 	sc->sc_dmat = faa->fa_dmat;
279 	sc->sc_node = faa->fa_node;
280 
281 	sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
282 	    faa->fa_acells);
283 	sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
284 	    faa->fa_scells);
285 	sc->sc_pacells = faa->fa_acells;
286 	sc->sc_pscells = faa->fa_scells;
287 
288 	rangeslen = OF_getproplen(sc->sc_node, "ranges");
289 	if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) ||
290 	     (rangeslen / sizeof(uint32_t)) % (sc->sc_acells +
291 	     sc->sc_pacells + sc->sc_scells)) {
292 		printf(": invalid ranges property\n");
293 		return;
294 	}
295 
296 	sc->sc_msi_addr = mvkpcie_dmamem_alloc(sc, sizeof(uint16_t),
297 	    sizeof(uint64_t));
298 	if (sc->sc_msi_addr == NULL) {
299 		printf(": cannot allocate MSI address\n");
300 		return;
301 	}
302 
303 	ranges = malloc(rangeslen, M_TEMP, M_WAITOK);
304 	OF_getpropintarray(sc->sc_node, "ranges", ranges,
305 	    rangeslen);
306 
307 	nranges = (rangeslen / sizeof(uint32_t)) /
308 	    (sc->sc_acells + sc->sc_pacells + sc->sc_scells);
309 	sc->sc_ranges = mallocarray(nranges,
310 	    sizeof(struct mvkpcie_range), M_TEMP, M_WAITOK);
311 	sc->sc_nranges = nranges;
312 
313 	for (i = 0, j = 0; i < sc->sc_nranges; i++) {
314 		sc->sc_ranges[i].flags = ranges[j++];
315 		sc->sc_ranges[i].pci_base = ranges[j++];
316 		if (sc->sc_acells - 1 == 2) {
317 			sc->sc_ranges[i].pci_base <<= 32;
318 			sc->sc_ranges[i].pci_base |= ranges[j++];
319 		}
320 		sc->sc_ranges[i].phys_base = ranges[j++];
321 		if (sc->sc_pacells == 2) {
322 			sc->sc_ranges[i].phys_base <<= 32;
323 			sc->sc_ranges[i].phys_base |= ranges[j++];
324 		}
325 		sc->sc_ranges[i].size = ranges[j++];
326 		if (sc->sc_scells == 2) {
327 			sc->sc_ranges[i].size <<= 32;
328 			sc->sc_ranges[i].size |= ranges[j++];
329 		}
330 	}
331 
332 	free(ranges, M_TEMP, rangeslen);
333 
334 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
335 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
336 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
337 		    sizeof(struct mvkpcie_range));
338 		printf(": can't map ctrl registers\n");
339 		return;
340 	}
341 
342 	printf("\n");
343 
344 	pinctrl_byname(sc->sc_node, "default");
345 
346 	clock_set_assigned(sc->sc_node);
347 	clock_enable_all(sc->sc_node);
348 
349 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
350 	if (reset_gpiolen > 0) {
351 		/* Link training needs to be disabled during PCIe reset. */
352 		HCLR4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_LINK_TRAINING);
353 
354 		reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
355 		OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
356 		    reset_gpiolen);
357 
358 		/* Issue PCIe reset. */
359 		gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
360 		gpio_controller_set_pin(reset_gpio, 1);
361 		delay(10000);
362 		gpio_controller_set_pin(reset_gpio, 0);
363 
364 		free(reset_gpio, M_TEMP, reset_gpiolen);
365 	}
366 
367 	reg = HREAD4(sc, CTRL_CORE_CONFIG);
368 	reg &= ~CTRL_CORE_CONFIG_MODE_MASK;
369 	reg |= CTRL_CORE_CONFIG_MODE_DIRECT;
370 	HWRITE4(sc, CTRL_CORE_CONFIG, reg);
371 
372 	HSET4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_IS_RC);
373 
374 	HWRITE4(sc, PCIE_ERR_CAPCTL,
375 	    PCIE_ERR_CAPCTL_ECRC_CHK_TX |
376 	    PCIE_ERR_CAPCTL_ECRC_CHK_TX_EN |
377 	    PCIE_ERR_CAPCTL_ECRC_CHCK |
378 	    PCIE_ERR_CAPCTL_ECRC_CHCK_RCV);
379 
380 	HWRITE4(sc, PCIE_DEV_CTRL_STATS,
381 	    PCIE_DEV_CTRL_STATS_MAX_PAYLOAD_7 |
382 	    PCIE_DEV_CTRL_STATS_MAX_RD_REQ_SZ);
383 
384 	HWRITE4(sc, PCIE_CORE_CTRL2,
385 	    PCIE_CORE_CTRL2_RESERVED |
386 	    PCIE_CORE_CTRL2_TD_ENABLE);
387 
388 	reg = HREAD4(sc, PCIE_CORE_CTRL0);
389 	reg &= ~PCIE_CORE_CTRL0_GEN_MASK;
390 	reg |= PCIE_CORE_CTRL0_GEN_2;
391 	HWRITE4(sc, PCIE_CORE_CTRL0, reg);
392 
393 	reg = HREAD4(sc, PCIE_CORE_CTRL0);
394 	reg &= ~PCIE_CORE_CTRL0_LANE_MASK;
395 	reg |= PCIE_CORE_CTRL0_LANE_1;
396 	HWRITE4(sc, PCIE_CORE_CTRL0, reg);
397 
398 	HSET4(sc, PCIE_CORE_CTRL2, PCIE_CORE_CTRL2_MSI_ENABLE);
399 
400 	HWRITE4(sc, PCIE_CORE_ISR0_STATUS, PCIE_CORE_ISR0_MASK_ALL);
401 	HWRITE4(sc, PCIE_CORE_ISR1_STATUS, PCIE_CORE_ISR1_MASK_ALL);
402 	HWRITE4(sc, HOST_CTRL_INT_STATUS, HOST_CTRL_INT_MASK_ALL);
403 
404 	HWRITE4(sc, PCIE_CORE_ISR0_MASK, PCIE_CORE_ISR0_MASK_ALL &
405 	    ~PCIE_CORE_ISR0_MASK_MSI_INT);
406 	HWRITE4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_ALL);
407 	HWRITE4(sc, PCIE_CORE_MSI_MASK, 0);
408 	HWRITE4(sc, HOST_CTRL_INT_MASK, HOST_CTRL_INT_MASK_ALL &
409 	    ~HOST_CTRL_INT_MASK_CORE_INT);
410 
411 	HSET4(sc, PCIE_CORE_CTRL2, PCIE_CORE_CTRL2_OB_WIN_ENABLE);
412 	HSET4(sc, PIO_CTRL, PIO_CTRL_ADDR_WIN_DISABLE);
413 
414 	delay(100 * 1000);
415 
416 	HSET4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_LINK_TRAINING);
417 	HSET4(sc, PCIE_LINK_CTRL_STAT, PCIE_LINK_CTRL_STAT_LINK_TRAINING);
418 
419 	for (timo = 40; timo > 0; timo--) {
420 		if (mvkpcie_link_up(sc))
421 			break;
422 		delay(1000);
423 	}
424 	if (timo == 0) {
425 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
426 		return;
427 	}
428 
429 	HWRITE4(sc, PCIE_LINK_CTRL_STAT,
430 	    PCIE_LINK_CTRL_STAT_LINK_L0S_ENTRY |
431 	    PCIE_LINK_CTRL_STAT_LINK_WIDTH_1);
432 
433 	HSET4(sc, PCIE_CMD, PCI_COMMAND_IO_ENABLE |
434 	    PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE);
435 
436 	HWRITE4(sc, PCIE_CORE_MSI_ADDR_LOW,
437 	    MVKPCIE_DMA_DVA(sc->sc_msi_addr) & 0xffffffff);
438 	HWRITE4(sc, PCIE_CORE_MSI_ADDR_HIGH,
439 	    MVKPCIE_DMA_DVA(sc->sc_msi_addr) >> 32);
440 
441 	/* Set up address translation for I/O space. */
442 	sc->sc_io_bus_addr = sc->sc_mem_bus_addr = -1;
443 	for (i = 0; i < sc->sc_nranges; i++) {
444 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
445 		    sc->sc_ranges[i].size > 0) {
446 			sc->sc_io_base = sc->sc_ranges[i].phys_base;
447 			sc->sc_io_bus_addr = sc->sc_ranges[i].pci_base;
448 			sc->sc_io_size = sc->sc_ranges[i].size;
449 		}
450 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
451 		    sc->sc_ranges[i].size > 0) {
452 			sc->sc_mem_base = sc->sc_ranges[i].phys_base;
453 			sc->sc_mem_bus_addr = sc->sc_ranges[i].pci_base;
454 			sc->sc_mem_size = sc->sc_ranges[i].size;
455 		}
456 	}
457 
458 	/* Set up bus range. */
459 	if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range,
460 	    sizeof(bus_range)) != sizeof(bus_range) ||
461 	    bus_range[0] >= 256 || bus_range[1] >= 256) {
462 		bus_range[0] = 0;
463 		bus_range[1] = 255;
464 	}
465 	sc->sc_bus = bus_range[0];
466 
467 	/* Initialize command/status. */
468 	csr = PCI_COMMAND_MASTER_ENABLE;
469 	if (sc->sc_io_size > 0)
470 		csr |= PCI_COMMAND_IO_ENABLE;
471 	if (sc->sc_mem_size > 0)
472 		csr |= PCI_COMMAND_MEM_ENABLE;
473 	sc->sc_bridge_command = csr;
474 
475 	/* Initialize bus range. */
476 	bir = bus_range[0];
477 	bir |= ((bus_range[0] + 1) << 8);
478 	bir |= (bus_range[1] << 16);
479 	sc->sc_bridge_businfo = bir;
480 
481 	/* Initialize I/O window. */
482 	iobase = sc->sc_io_bus_addr;
483 	iolimit = iobase + sc->sc_io_size - 1;
484 	blr = (iolimit & PPB_IO_MASK) | (PPB_IO_32BIT << PPB_IOLIMIT_SHIFT);
485 	blr |= ((iobase & PPB_IO_MASK) >> PPB_IO_SHIFT) | PPB_IO_32BIT;
486 	sc->sc_bridge_iostatus = blr;
487 	blr = (iobase & 0xffff0000) >> 16;
488 	blr |= iolimit & 0xffff0000;
489 	sc->sc_bridge_io_hi = blr;
490 
491 	/* Initialize memory mapped I/O window. */
492 	membase = sc->sc_mem_bus_addr;
493 	memlimit = membase + sc->sc_mem_size - 1;
494 	blr = memlimit & PPB_MEM_MASK;
495 	blr |= (membase >> PPB_MEM_SHIFT);
496 	sc->sc_bridge_mem = blr;
497 
498 	memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot));
499 	sc->sc_bus_iot.bus_private = sc;
500 	sc->sc_bus_iot._space_map = mvkpcie_bs_iomap;
501 	memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt));
502 	sc->sc_bus_memt.bus_private = sc;
503 	sc->sc_bus_memt._space_map = mvkpcie_bs_memmap;
504 
505 	sc->sc_pc.pc_conf_v = sc;
506 	sc->sc_pc.pc_attach_hook = mvkpcie_attach_hook;
507 	sc->sc_pc.pc_bus_maxdevs = mvkpcie_bus_maxdevs;
508 	sc->sc_pc.pc_make_tag = mvkpcie_make_tag;
509 	sc->sc_pc.pc_decompose_tag = mvkpcie_decompose_tag;
510 	sc->sc_pc.pc_conf_size = mvkpcie_conf_size;
511 	sc->sc_pc.pc_conf_read = mvkpcie_conf_read;
512 	sc->sc_pc.pc_conf_write = mvkpcie_conf_write;
513 
514 	sc->sc_pc.pc_intr_v = sc;
515 	sc->sc_pc.pc_intr_map = mvkpcie_intr_map;
516 	sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
517 	sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
518 	sc->sc_pc.pc_intr_string = mvkpcie_intr_string;
519 	sc->sc_pc.pc_intr_establish = mvkpcie_intr_establish;
520 	sc->sc_pc.pc_intr_disestablish = mvkpcie_intr_disestablish;
521 
522 	memset(&pba, 0, sizeof(pba));
523 	pba.pba_busname = "pci";
524 	pba.pba_iot = &sc->sc_bus_iot;
525 	pba.pba_memt = &sc->sc_bus_memt;
526 	pba.pba_dmat = faa->fa_dmat;
527 	pba.pba_pc = &sc->sc_pc;
528 	pba.pba_domain = pci_ndomains++;
529 	pba.pba_bus = sc->sc_bus;
530 	pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
531 
532 	sc->sc_msi_ic.ic_node = faa->fa_node;
533 	sc->sc_msi_ic.ic_cookie = self;
534 	sc->sc_msi_ic.ic_establish_msi = mvkpcie_intc_intr_establish_msi;
535 	sc->sc_msi_ic.ic_disestablish = mvkpcie_intc_intr_disestablish_msi;
536 	sc->sc_msi_ic.ic_barrier = mvkpcie_intc_intr_barrier;
537 	arm_intr_register_fdt(&sc->sc_msi_ic);
538 
539 	config_found(self, &pba, NULL);
540 }
541 
542 int
543 mvkpcie_link_up(struct mvkpcie_softc *sc)
544 {
545 	uint32_t reg;
546 
547 	reg = HREAD4(sc, LMI_CFG);
548 	return LMI_CFG_LTSSM_VAL(reg) >= LMI_CFG_LTSSM_L0;
549 }
550 
551 void
552 mvkpcie_attach_hook(struct device *parent, struct device *self,
553     struct pcibus_attach_args *pba)
554 {
555 }
556 
557 int
558 mvkpcie_bus_maxdevs(void *v, int bus)
559 {
560 	struct mvkpcie_softc *sc = v;
561 
562 	if (bus == sc->sc_bus || bus == sc->sc_bus + 1)
563 		return 1;
564 	return 32;
565 }
566 
567 pcitag_t
568 mvkpcie_make_tag(void *v, int bus, int device, int function)
569 {
570 	return ((bus << 20) | (device << 15) | (function << 12));
571 }
572 
573 void
574 mvkpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
575 {
576 	if (bp != NULL)
577 		*bp = (tag >> 20) & 0xff;
578 	if (dp != NULL)
579 		*dp = (tag >> 15) & 0x1f;
580 	if (fp != NULL)
581 		*fp = (tag >> 12) & 0x7;
582 }
583 
584 int
585 mvkpcie_conf_size(void *v, pcitag_t tag)
586 {
587 	return PCIE_CONFIG_SPACE_SIZE;
588 }
589 
590 pcireg_t
591 mvkpcie_conf_read_bridge(struct mvkpcie_softc *sc, int reg)
592 {
593 	switch (reg) {
594 	case PCI_ID_REG:
595 		return PCI_VENDOR_MARVELL |
596 		    (HREAD4(sc, PCIE_DEV_ID) & 0xffff0000);
597 	case PCI_COMMAND_STATUS_REG:
598 		return sc->sc_bridge_command;
599 	case PCI_CLASS_REG:
600 		return PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT |
601 		    PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT |
602 		    (HREAD4(sc, PCIE_DEV_REV) & 0xff);
603 	case PCI_BHLC_REG:
604 		return 1 << PCI_HDRTYPE_SHIFT |
605 		    0x10 << PCI_CACHELINE_SHIFT;
606 	case PPB_REG_BUSINFO:
607 		return sc->sc_bridge_businfo;
608 	case PPB_REG_IOSTATUS:
609 		return sc->sc_bridge_iostatus;
610 	case PPB_REG_MEM:
611 		return sc->sc_bridge_mem;
612 	case PPB_REG_IO_HI:
613 		return sc->sc_bridge_io_hi;
614 	case PPB_REG_PREFMEM:
615 	case PPB_REG_PREFBASE_HI32:
616 	case PPB_REG_PREFLIM_HI32:
617 	case PPB_REG_BRIDGECONTROL:
618 		return 0;
619 	default:
620 		break;
621 	}
622 	return 0;
623 }
624 
625 void
626 mvkpcie_conf_write_bridge(struct mvkpcie_softc *sc, int reg, pcireg_t data)
627 {
628 	/* Treat emulated bridge registers as read-only. */
629 }
630 
631 pcireg_t
632 mvkpcie_conf_read(void *v, pcitag_t tag, int off)
633 {
634 	struct mvkpcie_softc *sc = v;
635 	int bus, dev, fn;
636 	uint32_t reg;
637 	int i;
638 
639 	mvkpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
640 	if (bus == sc->sc_bus) {
641 		KASSERT(dev == 0);
642 		return mvkpcie_conf_read_bridge(sc, off);
643 	}
644 
645 	HWRITE4(sc, PIO_START, PIO_START_STOP);
646 	HWRITE4(sc, PIO_ISR, PIO_ISR_CLEAR);
647 	reg = HREAD4(sc, PIO_CTRL);
648 	reg &= ~PIO_CTRL_TYPE_MASK;
649 	if (bus == sc->sc_bus + 1)
650 		reg |= PIO_CTRL_TYPE_RD0;
651 	else
652 		reg |= PIO_CTRL_TYPE_RD1;
653 	HWRITE4(sc, PIO_CTRL, reg);
654 	HWRITE4(sc, PIO_ADDR_LS, tag | off);
655 	HWRITE4(sc, PIO_ADDR_MS, 0);
656 	HWRITE4(sc, PIO_WR_DATA_STRB, PIO_WR_DATA_STRB_VALUE);
657 	HWRITE4(sc, PIO_START, PIO_START_START);
658 
659 	for (i = 500; i > 0; i--) {
660 		if (HREAD4(sc, PIO_START) == 0 &&
661 		    HREAD4(sc, PIO_ISR) != 0)
662 			break;
663 		delay(2);
664 	}
665 	if (i == 0) {
666 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
667 		return 0xffffffff;
668 	}
669 
670 	return HREAD4(sc, PIO_RD_DATA);
671 }
672 
673 void
674 mvkpcie_conf_write(void *v, pcitag_t tag, int off, pcireg_t data)
675 {
676 	struct mvkpcie_softc *sc = v;
677 	int bus, dev, fn;
678 	uint32_t reg;
679 	int i;
680 
681 	mvkpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
682 	if (bus == sc->sc_bus) {
683 		KASSERT(dev == 0);
684 		mvkpcie_conf_write_bridge(sc, off, data);
685 		return;
686 	}
687 
688 	HWRITE4(sc, PIO_START, PIO_START_STOP);
689 	HWRITE4(sc, PIO_ISR, PIO_ISR_CLEAR);
690 	reg = HREAD4(sc, PIO_CTRL);
691 	reg &= ~PIO_CTRL_TYPE_MASK;
692 	if (bus == sc->sc_bus + 1)
693 		reg |= PIO_CTRL_TYPE_WR0;
694 	else
695 		reg |= PIO_CTRL_TYPE_WR1;
696 	HWRITE4(sc, PIO_CTRL, reg);
697 	HWRITE4(sc, PIO_ADDR_LS, tag | off);
698 	HWRITE4(sc, PIO_ADDR_MS, 0);
699 	HWRITE4(sc, PIO_WR_DATA, data);
700 	HWRITE4(sc, PIO_WR_DATA_STRB, PIO_WR_DATA_STRB_VALUE);
701 	HWRITE4(sc, PIO_START, PIO_START_START);
702 
703 	for (i = 500; i > 0; i--) {
704 		if (HREAD4(sc, PIO_START) == 0 &&
705 		    HREAD4(sc, PIO_ISR) != 0)
706 			break;
707 		delay(2);
708 	}
709 	if (i == 0) {
710 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
711 		return;
712 	}
713 }
714 
715 int
716 mvkpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
717 {
718 	return -1;
719 }
720 
721 const char *
722 mvkpcie_intr_string(void *v, pci_intr_handle_t ih)
723 {
724 	switch (ih.ih_type) {
725 	case PCI_MSI:
726 		return "msi";
727 	case PCI_MSIX:
728 		return "msix";
729 	}
730 
731 	return "intx";
732 }
733 
734 void *
735 mvkpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
736     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
737 {
738 	struct mvkpcie_softc *sc = v;
739 	void *cookie;
740 
741 	KASSERT(ih.ih_type != PCI_NONE);
742 
743 	if (ih.ih_type != PCI_INTX) {
744 		uint64_t addr, data;
745 
746 		/* Assume hardware passes Requester ID as sideband data. */
747 		data = pci_requester_id(ih.ih_pc, ih.ih_tag);
748 		cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
749 		    &data, level, ci, func, arg, (void *)name);
750 		if (cookie == NULL)
751 			return NULL;
752 
753 		/* TODO: translate address to the PCI device's view */
754 
755 		if (ih.ih_type == PCI_MSIX) {
756 			pci_msix_enable(ih.ih_pc, ih.ih_tag,
757 			    &sc->sc_bus_memt, ih.ih_intrpin, addr, data);
758 		} else
759 			pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data);
760 	} else {
761 		int bus, dev, fn;
762 		uint32_t reg[4];
763 
764 		mvkpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn);
765 
766 		reg[0] = bus << 16 | dev << 11 | fn << 8;
767 		reg[1] = reg[2] = 0;
768 		reg[3] = ih.ih_intrpin;
769 
770 		cookie = fdt_intr_establish_imap_cpu(sc->sc_node, reg,
771 		    sizeof(reg), level, ci, func, arg, name);
772 	}
773 
774 	return cookie;
775 }
776 
777 void
778 mvkpcie_intr_disestablish(void *v, void *cookie)
779 {
780 	panic("%s", __func__);
781 }
782 
783 int
784 mvkpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
785     int flags, bus_space_handle_t *bshp)
786 {
787 	struct mvkpcie_softc *sc = t->bus_private;
788 	int i;
789 
790 	for (i = 0; i < sc->sc_nranges; i++) {
791 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
792 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
793 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
794 
795 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
796 		    addr >= pci_start && addr + size <= pci_end) {
797 			return bus_space_map(sc->sc_iot,
798 			    addr - pci_start + phys_start, size, flags, bshp);
799 		}
800 	}
801 
802 	return ENXIO;
803 }
804 
805 int
806 mvkpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
807     int flags, bus_space_handle_t *bshp)
808 {
809 	struct mvkpcie_softc *sc = t->bus_private;
810 	int i;
811 
812 	for (i = 0; i < sc->sc_nranges; i++) {
813 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
814 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
815 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
816 
817 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
818 		    addr >= pci_start && addr + size <= pci_end) {
819 			return bus_space_map(sc->sc_iot,
820 			    addr - pci_start + phys_start, size, flags, bshp);
821 		}
822 	}
823 
824 	return ENXIO;
825 }
826 
827 int
828 mvkpcie_intc_intr(void *cookie)
829 {
830 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
831 	struct intrhand *ih;
832 	uint32_t pending;
833 	int i, s;
834 
835 	if (!(HREAD4(sc, HOST_CTRL_INT_STATUS) & HOST_CTRL_INT_MASK_CORE_INT))
836 		return 0;
837 
838 	if (!(HREAD4(sc, PCIE_CORE_ISR0_STATUS) & PCIE_CORE_ISR0_MASK_MSI_INT))
839 		return 0;
840 
841 	pending = HREAD4(sc, PCIE_CORE_MSI_STATUS);
842 	while (pending) {
843 		i = ffs(pending) - 1;
844 		HWRITE4(sc, PCIE_CORE_MSI_STATUS, (1 << i));
845 		pending &= ~(1 << i);
846 
847 		i = HREAD4(sc, PCIE_CORE_MSI_PAYLOAD) & 0xff;
848 		if ((ih = sc->sc_msi_handlers[i]) != NULL) {
849 			s = splraise(ih->ih_ipl);
850 			if (ih->ih_func(ih->ih_arg))
851 				ih->ih_count.ec_count++;
852 			splx(s);
853 		}
854 	}
855 
856 	HWRITE4(sc, PCIE_CORE_ISR0_STATUS, PCIE_CORE_ISR0_MASK_MSI_INT);
857 	HWRITE4(sc, HOST_CTRL_INT_STATUS, HOST_CTRL_INT_MASK_CORE_INT);
858 	return 1;
859 }
860 
861 void *
862 mvkpcie_intc_intr_establish_msi(void *cookie, uint64_t *addr, uint64_t *data,
863     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
864 {
865 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
866 	struct intrhand *ih;
867 	int i, s;
868 
869 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
870 		return NULL;
871 
872 	for (i = 0; i < nitems(sc->sc_msi_handlers); i++) {
873 		if (sc->sc_msi_handlers[i] == NULL)
874 			break;
875 	}
876 
877 	if (i == nitems(sc->sc_msi_handlers))
878 		return NULL;
879 
880 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
881 	ih->ih_func = func;
882 	ih->ih_arg = arg;
883 	ih->ih_ipl = level & IPL_IRQMASK;
884 	ih->ih_irq = i;
885 	ih->ih_name = name;
886 	ih->ih_sc = sc;
887 
888 	s = splhigh();
889 
890 	sc->sc_msi_handlers[i] = ih;
891 
892 	if (name != NULL)
893 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
894 
895 	mvkpcie_intc_recalc_ipl(sc);
896 
897 	*addr = MVKPCIE_DMA_DVA(sc->sc_msi_addr);
898 	*data = i;
899 
900 	splx(s);
901 	return (ih);
902 }
903 
904 void
905 mvkpcie_intc_intr_disestablish_msi(void *cookie)
906 {
907 	struct intrhand *ih = cookie;
908 	struct mvkpcie_softc *sc = ih->ih_sc;
909 	int s;
910 
911 	s = splhigh();
912 
913 	sc->sc_msi_handlers[ih->ih_irq] = NULL;
914 	if (ih->ih_name != NULL)
915 		evcount_detach(&ih->ih_count);
916 	free(ih, M_DEVBUF, sizeof(*ih));
917 
918 	mvkpcie_intc_recalc_ipl(sc);
919 
920 	splx(s);
921 }
922 
923 void
924 mvkpcie_intc_intr_barrier(void *cookie)
925 {
926 	struct intrhand *ih = cookie;
927 	struct mvkpcie_softc *sc = ih->ih_sc;
928 
929 	intr_barrier(sc->sc_ih);
930 }
931 
932 void
933 mvkpcie_intc_recalc_ipl(struct mvkpcie_softc *sc)
934 {
935 	struct intrhand *ih;
936 	int max = IPL_NONE;
937 	int min = IPL_HIGH;
938 	int irq;
939 
940 	for (irq = 0; irq < nitems(sc->sc_msi_handlers); irq++) {
941 		ih = sc->sc_msi_handlers[irq];
942 		if (ih == NULL)
943 			continue;
944 
945 		if (ih->ih_ipl > max)
946 			max = ih->ih_ipl;
947 
948 		if (ih->ih_ipl < min)
949 			min = ih->ih_ipl;
950 	}
951 
952 	for (irq = 0; irq < nitems(sc->sc_msi_handlers); irq++) {
953 		ih = sc->sc_msi_handlers[irq];
954 		if (ih == NULL)
955 			continue;
956 
957 		if (ih->ih_ipl > max)
958 			max = ih->ih_ipl;
959 
960 		if (ih->ih_ipl < min)
961 			min = ih->ih_ipl;
962 	}
963 
964 	if (max == IPL_NONE)
965 		min = IPL_NONE;
966 
967 	if (sc->sc_ipl != max) {
968 		sc->sc_ipl = max;
969 
970 		if (sc->sc_ih != NULL)
971 			fdt_intr_disestablish(sc->sc_ih);
972 
973 		if (sc->sc_ipl != IPL_NONE)
974 			sc->sc_ih = fdt_intr_establish(sc->sc_node, sc->sc_ipl,
975 			    mvkpcie_intc_intr, sc, sc->sc_dev.dv_xname);
976 	}
977 }
978 
979 /* Only needed for the 16-bit MSI address */
980 struct mvkpcie_dmamem *
981 mvkpcie_dmamem_alloc(struct mvkpcie_softc *sc, bus_size_t size, bus_size_t align)
982 {
983 	struct mvkpcie_dmamem *mdm;
984 	int nsegs;
985 
986 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
987 	mdm->mdm_size = size;
988 
989 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
990 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
991 		goto mdmfree;
992 
993 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
994 	    &nsegs, BUS_DMA_WAITOK) != 0)
995 		goto destroy;
996 
997 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
998 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
999 		goto free;
1000 
1001 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1002 	    NULL, BUS_DMA_WAITOK) != 0)
1003 		goto unmap;
1004 
1005 	bzero(mdm->mdm_kva, size);
1006 
1007 	return (mdm);
1008 
1009 unmap:
1010 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1011 free:
1012 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1013 destroy:
1014 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1015 mdmfree:
1016 	free(mdm, M_DEVBUF, sizeof(*mdm));
1017 
1018 	return (NULL);
1019 }
1020 
1021 void
1022 mvkpcie_dmamem_free(struct mvkpcie_softc *sc, struct mvkpcie_dmamem *mdm)
1023 {
1024 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1025 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1026 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1027 	free(mdm, M_DEVBUF, sizeof(*mdm));
1028 }
1029