xref: /openbsd-src/sys/dev/fdt/if_dwxe.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*	$OpenBSD: if_dwxe.c,v 1.21 2022/07/09 20:51:39 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2008 Mark Kettenis
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the ethernet controller on the Allwinner H3/A64 SoCs.
21  */
22 
23 #include "bpfilter.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/timeout.h>
35 
36 #include <machine/bus.h>
37 #include <machine/fdt.h>
38 
39 #include <net/if.h>
40 #include <net/if_media.h>
41 
42 #include <dev/ofw/openfirm.h>
43 #include <dev/ofw/ofw_clock.h>
44 #include <dev/ofw/ofw_misc.h>
45 #include <dev/ofw/ofw_pinctrl.h>
46 #include <dev/ofw/ofw_regulator.h>
47 #include <dev/ofw/fdt.h>
48 
49 #include <dev/mii/mii.h>
50 #include <dev/mii/miivar.h>
51 
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55 
56 #include <netinet/in.h>
57 #include <netinet/if_ether.h>
58 
59 /*
60  * DWXE registers.
61  */
62 
63 #define DWXE_BASIC_CTL0		0x00
64 #define  DWXE_BASIC_CTL0_DUPLEX		(1 << 0)
65 #define  DWXE_BASIC_CTL0_LOOPBACK		(1 << 1)
66 #define  DWXE_BASIC_CTL0_SPEED_1000		(0 << 2)
67 #define  DWXE_BASIC_CTL0_SPEED_10		(2 << 2)
68 #define  DWXE_BASIC_CTL0_SPEED_100		(3 << 2)
69 #define  DWXE_BASIC_CTL0_SPEED_MASK		(3 << 2)
70 #define DWXE_BASIC_CTL1		0x04
71 #define  DWXE_BASIC_CTL1_SOFT_RST		(1 << 0)
72 #define  DWXE_BASIC_CTL1_RX_TX_PRI		(1 << 1)
73 #define  DWXE_BASIC_CTL1_BURST_LEN_MASK	(0x3f << 24)
74 #define  DWXE_BASIC_CTL1_BURST_LEN(x)		((x) << 24)
75 #define DWXE_INT_STA			0x08
76 #define  DWXE_INT_STA_TX_INT			(1 << 0)
77 #define  DWXE_INT_STA_TX_DMA_STOP_INT		(1 << 1)
78 #define  DWXE_INT_STA_TX_BUF_UA_INT		(1 << 2)
79 #define  DWXE_INT_STA_TX_TIMEOUT_INT		(1 << 3)
80 #define  DWXE_INT_STA_TX_UNDERFLOW_INT	(1 << 4)
81 #define  DWXE_INT_STA_TX_EARLY_INT		(1 << 5)
82 #define  DWXE_INT_STA_RX_INT			(1 << 8)
83 #define  DWXE_INT_STA_RX_BUF_UA_INT		(1 << 9)
84 #define  DWXE_INT_STA_RX_DMA_STOP_INT		(1 << 10)
85 #define  DWXE_INT_STA_RX_TIMEOUT_INT		(1 << 11)
86 #define  DWXE_INT_STA_RX_OVERFLOW_INT		(1 << 12)
87 #define  DWXE_INT_STA_RX_EARLY_INT		(1 << 13)
88 #define  DWXE_INT_STA_RGMII_STA_INT		(1 << 16)
89 #define DWXE_INT_EN			0x0C
90 #define  DWXE_INT_EN_TX_INT			(1 << 0)
91 #define  DWXE_INT_EN_TX_DMA_STOP_INT		(1 << 1)
92 #define  DWXE_INT_EN_TX_BUF_UA_INT		(1 << 2)
93 #define  DWXE_INT_EN_TX_TIMEOUT_INT		(1 << 3)
94 #define  DWXE_INT_EN_TX_UNDERFLOW_INT		(1 << 4)
95 #define  DWXE_INT_EN_TX_EARLY_INT		(1 << 5)
96 #define  DWXE_INT_EN_RX_INT			(1 << 8)
97 #define  DWXE_INT_EN_RX_BUF_UA_INT		(1 << 9)
98 #define  DWXE_INT_EN_RX_DMA_STOP_INT		(1 << 10)
99 #define  DWXE_INT_EN_RX_TIMEOUT_INT		(1 << 11)
100 #define  DWXE_INT_EN_RX_OVERFLOW_INT		(1 << 12)
101 #define  DWXE_INT_EN_RX_EARLY_INT		(1 << 13)
102 #define  DWXE_INT_EN_RGMII_EN_INT		(1 << 16)
103 #define DWXE_TX_CTL0			0x10
104 #define  DWXE_TX_CTL0_TX_TRANSMITTER_EN	(1U << 31)
105 #define DWXE_TX_CTL1			0x14
106 #define  DWXE_TX_CTL1_TX_FIFO_FLUSH		(1 << 0)
107 #define  DWXE_TX_CTL1_TX_MD			(1 << 1)
108 #define  DWXE_TX_CTL1_TX_NEXT_FRM		(1 << 2)
109 #define  DWXE_TX_CTL1_TX_TH_MASK		(0x3 << 8)
110 #define  DWXE_TX_CTL1_TX_TH_64		0
111 #define  DWXE_TX_CTL1_TX_TH_128		(0x1 << 8)
112 #define  DWXE_TX_CTL1_TX_TH_192		(0x2 << 8)
113 #define  DWXE_TX_CTL1_TX_TH_256		(0x3 << 8)
114 #define  DWXE_TX_CTL1_TX_DMA_EN		(1 << 30)
115 #define  DWXE_TX_CTL1_TX_DMA_START		(1U << 31)
116 #define DWXE_TX_FLOW_CTL		0x1C
117 #define  DWXE_TX_FLOW_CTL_EN			(1 << 0)
118 #define DWXE_TX_DESC_LIST		0x20
119 #define DWXE_RX_CTL0			0x24
120 #define  DWXE_RX_CTL0_RX_FLOW_CTL_EN		(1 << 16)
121 #define  DWXE_RX_CTL0_RX_DO_CRC		(1 << 27)
122 #define  DWXE_RX_CTL0_RX_RECEIVER_EN		(1U << 31)
123 #define DWXE_RX_CTL1			0x28
124 #define  DWXE_RX_CTL1_RX_MD			(1 << 1)
125 #define  DWXE_RX_CTL1_RX_TH_MASK		(0x3 << 4)
126 #define  DWXE_RX_CTL1_RX_TH_32		(0x0 << 4)
127 #define  DWXE_RX_CTL1_RX_TH_64		(0x1 << 4)
128 #define  DWXE_RX_CTL1_RX_TH_96		(0x2 << 4)
129 #define  DWXE_RX_CTL1_RX_TH_128		(0x3 << 4)
130 #define  DWXE_RX_CTL1_RX_DMA_EN		(1 << 30)
131 #define  DWXE_RX_CTL1_RX_DMA_START		(1U << 31)
132 #define DWXE_RX_DESC_LIST		0x34
133 #define DWXE_RX_FRM_FLT		0x38
134 #define DWXE_RX_FRM_FLT_RX_ALL		(1 << 0)
135 #define DWXE_RX_FRM_FLT_HASH_UNICAST		(1 << 8)
136 #define DWXE_RX_FRM_FLT_HASH_MULTICAST	(1 << 9)
137 #define DWXE_RX_FRM_FLT_CTL			(1 << 13)
138 #define DWXE_RX_FRM_FLT_RX_ALL_MULTICAST	(1 << 16)
139 #define DWXE_RX_FRM_FLT_DIS_BROADCAST		(1 << 17)
140 #define DWXE_RX_FRM_FLT_DIS_ADDR_FILTER	(1U << 31)
141 #define DWXE_RX_HASH0			0x40
142 #define DWXE_RX_HASH1			0x44
143 #define DWXE_MDIO_CMD			0x48
144 #define  DWXE_MDIO_CMD_MII_BUSY		(1 << 0)
145 #define  DWXE_MDIO_CMD_MII_WRITE		(1 << 1)
146 #define  DWXE_MDIO_CMD_PHY_REG_SHIFT		4
147 #define  DWXE_MDIO_CMD_PHY_ADDR_SHIFT		12
148 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT	20
149 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_MASK	0x7
150 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_16	0
151 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_32	1
152 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_64	2
153 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_128	3
154 #define DWXE_MDIO_DATA		0x4C
155 #define DWXE_MACADDR_HI		0x50
156 #define DWXE_MACADDR_LO		0x54
157 #define DWXE_TX_DMA_STA		0xB0
158 #define DWXE_TX_CUR_DESC		0xB4
159 #define DWXE_TX_CUR_BUF		0xB8
160 #define DWXE_RX_DMA_STA		0xC0
161 #define DWXE_RX_CUR_DESC		0xC4
162 #define DWXE_RX_CUR_BUF		0xC8
163 
164 /*
165  * DWXE descriptors.
166  */
167 
168 struct dwxe_desc {
169 	uint32_t sd_status;
170 	uint32_t sd_len;
171 	uint32_t sd_addr;
172 	uint32_t sd_next;
173 };
174 
175 /* Tx status bits. */
176 #define DWXE_TX_DEFER			(1 << 0)
177 #define DWXE_TX_UNDERFLOW_ERR		(1 << 1)
178 #define DWXE_TX_DEFER_ERR		(1 << 2)
179 #define DWXE_TX_COL_CNT_MASK		(0xf << 3)
180 #define DWXE_TX_COL_CNT_SHIFT		3
181 #define DWXE_TX_COL_ERR_1		(1 << 8)
182 #define DWXE_TX_COL_ERR_0		(1 << 9)
183 #define DWXE_TX_CRS_ERR		(1 << 10)
184 #define DWXE_TX_PAYLOAD_ERR		(1 << 12)
185 #define DWXE_TX_LENGTH_ERR		(1 << 14)
186 #define DWXE_TX_HEADER_ERR		(1 << 16)
187 #define DWXE_TX_DESC_CTL		(1 << 31)
188 
189 /* Rx status bits */
190 #define DWXE_RX_PAYLOAD_ERR		(1 << 0)
191 #define DWXE_RX_CRC_ERR		(1 << 1)
192 #define DWXE_RX_PHY_ERR		(1 << 3)
193 #define DWXE_RX_LENGTH_ERR		(1 << 4)
194 #define DWXE_RX_FRM_TYPE		(1 << 5)
195 #define DWXE_RX_COL_ERR		(1 << 6)
196 #define DWXE_RX_HEADER_ERR		(1 << 7)
197 #define DWXE_RX_LAST_DESC		(1 << 8)
198 #define DWXE_RX_FIR_DESC		(1 << 9)
199 #define DWXE_RX_OVERFLOW_ERR		(1 << 11)
200 #define DWXE_RX_SAF_FAIL		(1 << 13)
201 #define DWXE_RX_NO_ENOUGH_BUF_ERR	(1 << 14)
202 #define DWXE_RX_FRM_LEN_MASK		0x3fff
203 #define DWXE_RX_FRM_LEN_SHIFT		16
204 #define DWXE_RX_DAF_FAIL		(1 << 30)
205 #define DWXE_RX_DESC_CTL		(1 << 31)
206 
207 /* Tx size bits */
208 #define DWXE_TX_BUF_SIZE		(0xfff << 0)
209 #define DWXE_TX_CRC_CTL		(1 << 26)
210 #define DWXE_TX_CHECKSUM_CTL_MASK	(0x3 << 27)
211 #define DWXE_TX_CHECKSUM_CTL_IP	(1 << 27)
212 #define DWXE_TX_CHECKSUM_CTL_NO_PSE	(2 << 27)
213 #define DWXE_TX_CHECKSUM_CTL_FULL	(3 << 27)
214 #define DWXE_TX_FIR_DESC		(1 << 29)
215 #define DWXE_TX_LAST_DESC		(1 << 30)
216 #define DWXE_TX_INT_CTL		(1 << 31)
217 
218 /* Rx size bits */
219 #define DWXE_RX_BUF_SIZE		(0xfff << 0)
220 #define DWXE_RX_INT_CTL		(1 << 31)
221 
222 /* EMAC syscon bits */
223 #define SYSCON_EMAC			0x30
224 #define SYSCON_ETCS_MASK		(0x3 << 0)
225 #define SYSCON_ETCS_MII			(0 << 0)
226 #define SYSCON_ETCS_EXT_GMII		(1 << 0)
227 #define SYSCON_ETCS_INT_GMII		(2 << 0)
228 #define SYSCON_EPIT			(1 << 2) /* 1: RGMII, 0: MII */
229 #define SYSCON_ERXDC_MASK		(0xf << 5)
230 #define SYSCON_ERXDC_SHIFT		5
231 #define SYSCON_ETXDC_MASK		(0x7 << 10)
232 #define SYSCON_ETXDC_SHIFT		10
233 #define SYSCON_RMII_EN			(1 << 13) /* 1: enable RMII (overrides EPIT) */
234 #define SYSCON_H3_EPHY_SELECT		(1 << 15) /* 1: internal PHY, 0: external PHY */
235 #define SYSCON_H3_EPHY_SHUTDOWN		(1 << 16) /* 1: shutdown, 0: power up */
236 #define SYSCON_H3_EPHY_LED_POL		(1 << 17) /* 1: active low, 0: active high */
237 #define SYSCON_H3_EPHY_CLK_SEL		(1 << 18) /* 1: 24MHz, 0: 25MHz */
238 #define SYSCON_H3_EPHY_ADDR_MASK	(0x1f << 20)
239 #define SYSCON_H3_EPHY_ADDR_SHIFT	20
240 
241 /* GMAC syscon bits (Allwinner R40) */
242 #define SYSCON_GMAC			0x00
243 #define SYSCON_GTCS_MASK		SYSCON_ETCS_MASK
244 #define SYSCON_GTCS_MII			SYSCON_ETCS_MII
245 #define SYSCON_GTCS_EXT_GMII		SYSCON_ETCS_EXT_GMII
246 #define SYSCON_GTCS_INT_GMII		SYSCON_ETCS_INT_GMII
247 #define SYSCON_GPIT			SYSCON_EPIT
248 #define SYSCON_GRXDC_MASK		(0x7 << 5)
249 #define SYSCON_GRXDC_SHIFT		5
250 
251 struct dwxe_buf {
252 	bus_dmamap_t	tb_map;
253 	struct mbuf	*tb_m;
254 };
255 
256 #define DWXE_NTXDESC	256
257 #define DWXE_NTXSEGS	16
258 
259 #define DWXE_NRXDESC	256
260 
261 struct dwxe_dmamem {
262 	bus_dmamap_t		tdm_map;
263 	bus_dma_segment_t	tdm_seg;
264 	size_t			tdm_size;
265 	caddr_t			tdm_kva;
266 };
267 #define DWXE_DMA_MAP(_tdm)	((_tdm)->tdm_map)
268 #define DWXE_DMA_LEN(_tdm)	((_tdm)->tdm_size)
269 #define DWXE_DMA_DVA(_tdm)	((_tdm)->tdm_map->dm_segs[0].ds_addr)
270 #define DWXE_DMA_KVA(_tdm)	((void *)(_tdm)->tdm_kva)
271 
272 struct dwxe_softc {
273 	struct device		sc_dev;
274 	int			sc_node;
275 	bus_space_tag_t		sc_iot;
276 	bus_space_handle_t	sc_ioh;
277 	bus_dma_tag_t		sc_dmat;
278 	void			*sc_ih;
279 
280 	struct arpcom		sc_ac;
281 #define sc_lladdr	sc_ac.ac_enaddr
282 	struct mii_data		sc_mii;
283 #define sc_media	sc_mii.mii_media
284 	int			sc_link;
285 	int			sc_phyloc;
286 
287 	struct dwxe_dmamem	*sc_txring;
288 	struct dwxe_buf		*sc_txbuf;
289 	struct dwxe_desc	*sc_txdesc;
290 	int			sc_tx_prod;
291 	int			sc_tx_cons;
292 
293 	struct dwxe_dmamem	*sc_rxring;
294 	struct dwxe_buf		*sc_rxbuf;
295 	struct dwxe_desc	*sc_rxdesc;
296 	int			sc_rx_prod;
297 	struct if_rxring	sc_rx_ring;
298 	int			sc_rx_cons;
299 
300 	struct timeout		sc_tick;
301 	struct timeout		sc_rxto;
302 
303 	uint32_t		sc_clk;
304 };
305 
306 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
307 
308 int	dwxe_match(struct device *, void *, void *);
309 void	dwxe_attach(struct device *, struct device *, void *);
310 int	dwxe_activate(struct device *, int);
311 void	dwxe_init(struct dwxe_softc *sc);
312 void	dwxe_phy_setup_emac(struct dwxe_softc *);
313 void	dwxe_phy_setup_gmac(struct dwxe_softc *);
314 
315 const struct cfattach dwxe_ca = {
316 	sizeof(struct dwxe_softc), dwxe_match, dwxe_attach,
317 	NULL, dwxe_activate
318 };
319 
320 struct cfdriver dwxe_cd = {
321 	NULL, "dwxe", DV_IFNET
322 };
323 
324 uint32_t dwxe_read(struct dwxe_softc *, bus_addr_t);
325 void	dwxe_write(struct dwxe_softc *, bus_addr_t, uint32_t);
326 
327 int	dwxe_ioctl(struct ifnet *, u_long, caddr_t);
328 void	dwxe_start(struct ifqueue *);
329 void	dwxe_watchdog(struct ifnet *);
330 
331 int	dwxe_media_change(struct ifnet *);
332 void	dwxe_media_status(struct ifnet *, struct ifmediareq *);
333 
334 int	dwxe_mii_readreg(struct device *, int, int);
335 void	dwxe_mii_writereg(struct device *, int, int, int);
336 void	dwxe_mii_statchg(struct device *);
337 
338 void	dwxe_lladdr_read(struct dwxe_softc *, uint8_t *);
339 void	dwxe_lladdr_write(struct dwxe_softc *);
340 
341 void	dwxe_tick(void *);
342 void	dwxe_rxtick(void *);
343 
344 int	dwxe_intr(void *);
345 void	dwxe_tx_proc(struct dwxe_softc *);
346 void	dwxe_rx_proc(struct dwxe_softc *);
347 
348 void	dwxe_up(struct dwxe_softc *);
349 void	dwxe_down(struct dwxe_softc *);
350 void	dwxe_iff(struct dwxe_softc *);
351 int	dwxe_encap(struct dwxe_softc *, struct mbuf *, int *, int *);
352 
353 void	dwxe_reset(struct dwxe_softc *);
354 void	dwxe_stop_dma(struct dwxe_softc *);
355 
356 struct dwxe_dmamem *
357 	dwxe_dmamem_alloc(struct dwxe_softc *, bus_size_t, bus_size_t);
358 void	dwxe_dmamem_free(struct dwxe_softc *, struct dwxe_dmamem *);
359 struct mbuf *dwxe_alloc_mbuf(struct dwxe_softc *, bus_dmamap_t);
360 void	dwxe_fill_rx_ring(struct dwxe_softc *);
361 
362 int
363 dwxe_match(struct device *parent, void *cfdata, void *aux)
364 {
365 	struct fdt_attach_args *faa = aux;
366 
367 	return OF_is_compatible(faa->fa_node, "allwinner,sun8i-h3-emac") ||
368 	    OF_is_compatible(faa->fa_node, "allwinner,sun8i-r40-gmac") ||
369 	    OF_is_compatible(faa->fa_node, "allwinner,sun50i-a64-emac");
370 }
371 
372 void
373 dwxe_attach(struct device *parent, struct device *self, void *aux)
374 {
375 	struct dwxe_softc *sc = (void *)self;
376 	struct fdt_attach_args *faa = aux;
377 	struct ifnet *ifp;
378 	uint32_t phy;
379 	int node;
380 
381 	sc->sc_node = faa->fa_node;
382 	sc->sc_iot = faa->fa_iot;
383 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
384 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
385 		printf("%s: cannot map registers\n", self->dv_xname);
386 		return;
387 	}
388 	sc->sc_dmat = faa->fa_dmat;
389 
390 	/* Lookup PHY. */
391 	phy = OF_getpropint(faa->fa_node, "phy-handle", 0);
392 	node = OF_getnodebyphandle(phy);
393 	if (node)
394 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
395 	else
396 		sc->sc_phyloc = MII_PHY_ANY;
397 
398 	sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth");
399 	if (sc->sc_clk > 160000000)
400 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_128;
401 	else if (sc->sc_clk > 80000000)
402 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_64;
403 	else if (sc->sc_clk > 40000000)
404 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_32;
405 	else
406 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_16;
407 
408 	if (OF_getprop(faa->fa_node, "local-mac-address",
409 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
410 		dwxe_lladdr_read(sc, sc->sc_lladdr);
411 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
412 
413 	dwxe_init(sc);
414 
415 	timeout_set(&sc->sc_tick, dwxe_tick, sc);
416 	timeout_set(&sc->sc_rxto, dwxe_rxtick, sc);
417 
418 	ifp = &sc->sc_ac.ac_if;
419 	ifp->if_softc = sc;
420 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
421 	ifp->if_xflags = IFXF_MPSAFE;
422 	ifp->if_ioctl = dwxe_ioctl;
423 	ifp->if_qstart = dwxe_start;
424 	ifp->if_watchdog = dwxe_watchdog;
425 	ifq_set_maxlen(&ifp->if_snd, DWXE_NTXDESC - 1);
426 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
427 
428 	ifp->if_capabilities = IFCAP_VLAN_MTU;
429 
430 	sc->sc_mii.mii_ifp = ifp;
431 	sc->sc_mii.mii_readreg = dwxe_mii_readreg;
432 	sc->sc_mii.mii_writereg = dwxe_mii_writereg;
433 	sc->sc_mii.mii_statchg = dwxe_mii_statchg;
434 
435 	ifmedia_init(&sc->sc_media, 0, dwxe_media_change, dwxe_media_status);
436 
437 	mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
438 	    MII_OFFSET_ANY, MIIF_NOISOLATE);
439 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
440 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
441 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
442 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
443 	} else
444 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
445 
446 	if_attach(ifp);
447 	ether_ifattach(ifp);
448 
449 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
450 	    dwxe_intr, sc, sc->sc_dev.dv_xname);
451 	if (sc->sc_ih == NULL)
452 		printf("%s: can't establish interrupt\n", sc->sc_dev.dv_xname);
453 }
454 
455 int
456 dwxe_activate(struct device *self, int act)
457 {
458 	struct dwxe_softc *sc = (struct dwxe_softc *)self;
459 	struct ifnet *ifp = &sc->sc_ac.ac_if;
460 
461 	switch (act) {
462 	case DVACT_SUSPEND:
463 		if (ifp->if_flags & IFF_RUNNING)
464 			dwxe_down(sc);
465 		break;
466 	case DVACT_RESUME:
467 		dwxe_init(sc);
468 		if (ifp->if_flags & IFF_UP)
469 			dwxe_up(sc);
470 		break;
471 	}
472 
473 	return 0;
474 }
475 
476 void
477 dwxe_init(struct dwxe_softc *sc)
478 {
479 	uint32_t phy_supply;
480 
481 	pinctrl_byname(sc->sc_node, "default");
482 
483 	/* Enable clock. */
484 	clock_enable(sc->sc_node, "stmmaceth");
485 	reset_deassert(sc->sc_node, "stmmaceth");
486 	delay(5000);
487 
488 	/* Power up PHY. */
489 	phy_supply = OF_getpropint(sc->sc_node, "phy-supply", 0);
490 	if (phy_supply)
491 		regulator_enable(phy_supply);
492 
493 	/* Do hardware specific initializations. */
494 	if (OF_is_compatible(sc->sc_node, "allwinner,sun8i-r40-gmac"))
495 		dwxe_phy_setup_gmac(sc);
496 	else
497 		dwxe_phy_setup_emac(sc);
498 
499 	dwxe_reset(sc);
500 }
501 
502 void
503 dwxe_phy_setup_emac(struct dwxe_softc *sc)
504 {
505 	struct regmap *rm;
506 	uint32_t syscon;
507 	uint32_t tx_delay, rx_delay;
508 	char *phy_mode;
509 	int len;
510 
511 	rm = regmap_byphandle(OF_getpropint(sc->sc_node, "syscon", 0));
512 	if (rm == NULL)
513 		return;
514 
515 	syscon = regmap_read_4(rm, SYSCON_EMAC);
516 	syscon &= ~(SYSCON_ETCS_MASK|SYSCON_EPIT|SYSCON_RMII_EN);
517 	syscon &= ~(SYSCON_ETXDC_MASK | SYSCON_ERXDC_MASK);
518 	syscon &= ~SYSCON_H3_EPHY_SELECT;
519 
520 	if ((len = OF_getproplen(sc->sc_node, "phy-mode")) <= 0)
521 		return;
522 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
523 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
524 	if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
525 		syscon |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
526 	else if (!strncmp(phy_mode, "rmii", strlen("rmii")))
527 		syscon |= SYSCON_EPIT | SYSCON_ETCS_EXT_GMII;
528 	else if (!strncmp(phy_mode, "mii", strlen("mii")) &&
529 	    OF_is_compatible(sc->sc_node, "allwinner,sun8i-h3-emac")) {
530 		syscon &= ~SYSCON_H3_EPHY_SHUTDOWN;
531 		syscon |= SYSCON_H3_EPHY_SELECT | SYSCON_H3_EPHY_CLK_SEL;
532 		if (OF_getproplen(sc->sc_node, "allwinner,leds-active-low") == 0)
533 			syscon |= SYSCON_H3_EPHY_LED_POL;
534 		else
535 			syscon &= ~SYSCON_H3_EPHY_LED_POL;
536 		syscon &= ~SYSCON_H3_EPHY_ADDR_MASK;
537 		syscon |= (sc->sc_phyloc << SYSCON_H3_EPHY_ADDR_SHIFT);
538 	}
539 	free(phy_mode, M_TEMP, len);
540 
541 	tx_delay = OF_getpropint(sc->sc_node, "allwinner,tx-delay-ps", 0);
542 	rx_delay = OF_getpropint(sc->sc_node, "allwinner,rx-delay-ps", 0);
543 	syscon |= ((tx_delay / 100) << SYSCON_ETXDC_SHIFT) & SYSCON_ETXDC_MASK;
544 	syscon |= ((rx_delay / 100) << SYSCON_ERXDC_SHIFT) & SYSCON_ERXDC_MASK;
545 
546 	regmap_write_4(rm, SYSCON_EMAC, syscon);
547 }
548 
549 void
550 dwxe_phy_setup_gmac(struct dwxe_softc *sc)
551 {
552 	struct regmap *rm;
553 	uint32_t syscon;
554 	uint32_t rx_delay;
555 	char *phy_mode;
556 	int len;
557 
558 	rm = regmap_byphandle(OF_getpropint(sc->sc_node, "syscon", 0));
559 	if (rm == NULL)
560 		return;
561 
562 	syscon = regmap_read_4(rm, SYSCON_GMAC);
563 	syscon &= ~(SYSCON_GTCS_MASK|SYSCON_GPIT|SYSCON_ERXDC_MASK);
564 
565 	if ((len = OF_getproplen(sc->sc_node, "phy-mode")) <= 0)
566 		return;
567 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
568 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
569 	if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
570 		syscon |= SYSCON_GPIT | SYSCON_GTCS_INT_GMII;
571 	else if (!strncmp(phy_mode, "rmii", strlen("rmii")))
572 		syscon |= SYSCON_GPIT | SYSCON_GTCS_EXT_GMII;
573 	free(phy_mode, M_TEMP, len);
574 
575 	rx_delay = OF_getpropint(sc->sc_node, "allwinner,rx-delay-ps", 0);
576 	syscon |= ((rx_delay / 100) << SYSCON_ERXDC_SHIFT) & SYSCON_ERXDC_MASK;
577 
578 	regmap_write_4(rm, SYSCON_GMAC, syscon);
579 }
580 
581 uint32_t
582 dwxe_read(struct dwxe_softc *sc, bus_addr_t addr)
583 {
584 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr);
585 }
586 
587 void
588 dwxe_write(struct dwxe_softc *sc, bus_addr_t addr, uint32_t data)
589 {
590 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data);
591 }
592 
593 void
594 dwxe_lladdr_read(struct dwxe_softc *sc, uint8_t *lladdr)
595 {
596 	uint32_t machi, maclo;
597 
598 	machi = dwxe_read(sc, DWXE_MACADDR_HI);
599 	maclo = dwxe_read(sc, DWXE_MACADDR_LO);
600 
601 	lladdr[0] = (maclo >> 0) & 0xff;
602 	lladdr[1] = (maclo >> 8) & 0xff;
603 	lladdr[2] = (maclo >> 16) & 0xff;
604 	lladdr[3] = (maclo >> 24) & 0xff;
605 	lladdr[4] = (machi >> 0) & 0xff;
606 	lladdr[5] = (machi >> 8) & 0xff;
607 }
608 
609 void
610 dwxe_lladdr_write(struct dwxe_softc *sc)
611 {
612 	dwxe_write(sc, DWXE_MACADDR_HI,
613 	    sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0);
614 	dwxe_write(sc, DWXE_MACADDR_LO,
615 	    sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 |
616 	    sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0);
617 }
618 
619 void
620 dwxe_start(struct ifqueue *ifq)
621 {
622 	struct ifnet *ifp = ifq->ifq_if;
623 	struct dwxe_softc *sc = ifp->if_softc;
624 	struct mbuf *m;
625 	int error, idx, left, used;
626 
627 	if (!(ifp->if_flags & IFF_RUNNING))
628 		return;
629 	if (ifq_is_oactive(&ifp->if_snd))
630 		return;
631 	if (ifq_empty(&ifp->if_snd))
632 		return;
633 	if (!sc->sc_link)
634 		return;
635 
636 	idx = sc->sc_tx_prod;
637 	left = sc->sc_tx_cons;
638 	if (left <= idx)
639 		left += DWXE_NTXDESC;
640 	left -= idx;
641 	used = 0;
642 
643 	for (;;) {
644 		if (used + DWXE_NTXSEGS + 1 > left) {
645 			ifq_set_oactive(ifq);
646 			break;
647 		}
648 
649 		m = ifq_dequeue(ifq);
650 		if (m == NULL)
651 			break;
652 
653 		error = dwxe_encap(sc, m, &idx, &used);
654 		if (error == EFBIG) {
655 			m_freem(m); /* give up: drop it */
656 			ifp->if_oerrors++;
657 			continue;
658 		}
659 
660 #if NBPFILTER > 0
661 		if (ifp->if_bpf)
662 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
663 #endif
664 	}
665 
666 	if (sc->sc_tx_prod != idx) {
667 		sc->sc_tx_prod = idx;
668 
669 		/* Set a timeout in case the chip goes out to lunch. */
670 		ifp->if_timer = 5;
671 
672 		dwxe_write(sc, DWXE_TX_CTL1, dwxe_read(sc,
673 		     DWXE_TX_CTL1) | DWXE_TX_CTL1_TX_DMA_START);
674 	}
675 }
676 
677 int
678 dwxe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
679 {
680 	struct dwxe_softc *sc = ifp->if_softc;
681 	struct ifreq *ifr = (struct ifreq *)addr;
682 	int error = 0, s;
683 
684 	s = splnet();
685 
686 	switch (cmd) {
687 	case SIOCSIFADDR:
688 		ifp->if_flags |= IFF_UP;
689 		/* FALLTHROUGH */
690 	case SIOCSIFFLAGS:
691 		if (ifp->if_flags & IFF_UP) {
692 			if (ifp->if_flags & IFF_RUNNING)
693 				error = ENETRESET;
694 			else
695 				dwxe_up(sc);
696 		} else {
697 			if (ifp->if_flags & IFF_RUNNING)
698 				dwxe_down(sc);
699 		}
700 		break;
701 
702 	case SIOCGIFMEDIA:
703 	case SIOCSIFMEDIA:
704 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
705 		break;
706 
707 	case SIOCGIFRXR:
708 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
709 		    NULL, MCLBYTES, &sc->sc_rx_ring);
710 		break;
711 
712 	default:
713 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
714 		break;
715 	}
716 
717 	if (error == ENETRESET) {
718 		if (ifp->if_flags & IFF_RUNNING)
719 			dwxe_iff(sc);
720 		error = 0;
721 	}
722 
723 	splx(s);
724 	return (error);
725 }
726 
727 void
728 dwxe_watchdog(struct ifnet *ifp)
729 {
730 	printf("%s\n", __func__);
731 }
732 
733 int
734 dwxe_media_change(struct ifnet *ifp)
735 {
736 	struct dwxe_softc *sc = ifp->if_softc;
737 
738 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
739 		mii_mediachg(&sc->sc_mii);
740 
741 	return (0);
742 }
743 
744 void
745 dwxe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
746 {
747 	struct dwxe_softc *sc = ifp->if_softc;
748 
749 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
750 		mii_pollstat(&sc->sc_mii);
751 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
752 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
753 	}
754 }
755 
756 int
757 dwxe_mii_readreg(struct device *self, int phy, int reg)
758 {
759 	struct dwxe_softc *sc = (void *)self;
760 	int n;
761 
762 	dwxe_write(sc, DWXE_MDIO_CMD,
763 	    sc->sc_clk << DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT |
764 	    phy << DWXE_MDIO_CMD_PHY_ADDR_SHIFT |
765 	    reg << DWXE_MDIO_CMD_PHY_REG_SHIFT |
766 	    DWXE_MDIO_CMD_MII_BUSY);
767 	for (n = 0; n < 1000; n++) {
768 		if ((dwxe_read(sc, DWXE_MDIO_CMD) &
769 		    DWXE_MDIO_CMD_MII_BUSY) == 0)
770 			return dwxe_read(sc, DWXE_MDIO_DATA);
771 		delay(10);
772 	}
773 
774 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
775 	return (0);
776 }
777 
778 void
779 dwxe_mii_writereg(struct device *self, int phy, int reg, int val)
780 {
781 	struct dwxe_softc *sc = (void *)self;
782 	int n;
783 
784 	dwxe_write(sc, DWXE_MDIO_DATA, val);
785 	dwxe_write(sc, DWXE_MDIO_CMD,
786 	    sc->sc_clk << DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT |
787 	    phy << DWXE_MDIO_CMD_PHY_ADDR_SHIFT |
788 	    reg << DWXE_MDIO_CMD_PHY_REG_SHIFT |
789 	    DWXE_MDIO_CMD_MII_WRITE |
790 	    DWXE_MDIO_CMD_MII_BUSY);
791 	for (n = 0; n < 1000; n++) {
792 		if ((dwxe_read(sc, DWXE_MDIO_CMD) &
793 		    DWXE_MDIO_CMD_MII_BUSY) == 0)
794 			return;
795 		delay(10);
796 	}
797 
798 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
799 }
800 
801 void
802 dwxe_mii_statchg(struct device *self)
803 {
804 	struct dwxe_softc *sc = (void *)self;
805 	uint32_t basicctrl;
806 
807 	basicctrl = dwxe_read(sc, DWXE_BASIC_CTL0);
808 	basicctrl &= ~DWXE_BASIC_CTL0_SPEED_MASK;
809 
810 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
811 	case IFM_1000_SX:
812 	case IFM_1000_LX:
813 	case IFM_1000_CX:
814 	case IFM_1000_T:
815 		basicctrl |= DWXE_BASIC_CTL0_SPEED_1000;
816 		sc->sc_link = 1;
817 		break;
818 	case IFM_100_TX:
819 		basicctrl |= DWXE_BASIC_CTL0_SPEED_100;
820 		sc->sc_link = 1;
821 		break;
822 	case IFM_10_T:
823 		basicctrl |= DWXE_BASIC_CTL0_SPEED_10;
824 		sc->sc_link = 1;
825 		break;
826 	default:
827 		sc->sc_link = 0;
828 		return;
829 	}
830 
831 	if (sc->sc_link == 0)
832 		return;
833 
834 	basicctrl &= ~DWXE_BASIC_CTL0_DUPLEX;
835 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
836 		basicctrl |= DWXE_BASIC_CTL0_DUPLEX;
837 
838 	/* XXX: RX/TX flow control? */
839 
840 	dwxe_write(sc, DWXE_BASIC_CTL0, basicctrl);
841 }
842 
843 void
844 dwxe_tick(void *arg)
845 {
846 	struct dwxe_softc *sc = arg;
847 	int s;
848 
849 	s = splnet();
850 	mii_tick(&sc->sc_mii);
851 	splx(s);
852 
853 	timeout_add_sec(&sc->sc_tick, 1);
854 }
855 
856 void
857 dwxe_rxtick(void *arg)
858 {
859 	struct dwxe_softc *sc = arg;
860 	uint32_t ctl;
861 	int s;
862 
863 	s = splnet();
864 
865 	ctl = dwxe_read(sc, DWXE_RX_CTL1);
866 	dwxe_write(sc, DWXE_RX_CTL1, ctl & ~DWXE_RX_CTL1_RX_DMA_EN);
867 
868 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
869 	    0, DWXE_DMA_LEN(sc->sc_rxring),
870 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
871 
872 	dwxe_write(sc, DWXE_RX_DESC_LIST, 0);
873 
874 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
875 	dwxe_fill_rx_ring(sc);
876 
877 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
878 	    0, DWXE_DMA_LEN(sc->sc_rxring),
879 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
880 
881 	dwxe_write(sc, DWXE_RX_DESC_LIST, DWXE_DMA_DVA(sc->sc_rxring));
882 	dwxe_write(sc, DWXE_RX_CTL1, ctl);
883 
884 	splx(s);
885 }
886 
887 int
888 dwxe_intr(void *arg)
889 {
890 	struct dwxe_softc *sc = arg;
891 	uint32_t reg;
892 
893 	reg = dwxe_read(sc, DWXE_INT_STA);
894 	dwxe_write(sc, DWXE_INT_STA, reg);
895 
896 	if (reg & DWXE_INT_STA_RX_INT)
897 		dwxe_rx_proc(sc);
898 
899 	if (reg & DWXE_INT_STA_TX_INT ||
900 	    reg & DWXE_INT_STA_TX_BUF_UA_INT)
901 		dwxe_tx_proc(sc);
902 
903 	return (1);
904 }
905 
906 void
907 dwxe_tx_proc(struct dwxe_softc *sc)
908 {
909 	struct ifnet *ifp = &sc->sc_ac.ac_if;
910 	struct dwxe_desc *txd;
911 	struct dwxe_buf *txb;
912 	int idx, txfree;
913 
914 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring), 0,
915 	    DWXE_DMA_LEN(sc->sc_txring),
916 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
917 
918 	txfree = 0;
919 	while (sc->sc_tx_cons != sc->sc_tx_prod) {
920 		idx = sc->sc_tx_cons;
921 		KASSERT(idx < DWXE_NTXDESC);
922 
923 		txd = &sc->sc_txdesc[idx];
924 		if (txd->sd_status & DWXE_TX_DESC_CTL)
925 			break;
926 
927 		txb = &sc->sc_txbuf[idx];
928 		if (txb->tb_m) {
929 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
930 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
931 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
932 
933 			m_freem(txb->tb_m);
934 			txb->tb_m = NULL;
935 		}
936 
937 		txfree++;
938 
939 		if (sc->sc_tx_cons == (DWXE_NTXDESC - 1))
940 			sc->sc_tx_cons = 0;
941 		else
942 			sc->sc_tx_cons++;
943 
944 		txd->sd_status = 0;
945 	}
946 
947 	if (sc->sc_tx_cons == sc->sc_tx_prod)
948 		ifp->if_timer = 0;
949 
950 	if (txfree) {
951 		if (ifq_is_oactive(&ifp->if_snd))
952 			ifq_restart(&ifp->if_snd);
953 	}
954 }
955 
956 void
957 dwxe_rx_proc(struct dwxe_softc *sc)
958 {
959 	struct ifnet *ifp = &sc->sc_ac.ac_if;
960 	struct dwxe_desc *rxd;
961 	struct dwxe_buf *rxb;
962 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
963 	struct mbuf *m;
964 	int idx, len, cnt, put;
965 
966 	if ((ifp->if_flags & IFF_RUNNING) == 0)
967 		return;
968 
969 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring), 0,
970 	    DWXE_DMA_LEN(sc->sc_rxring),
971 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
972 
973 	cnt = if_rxr_inuse(&sc->sc_rx_ring);
974 	put = 0;
975 	while (put < cnt) {
976 		idx = sc->sc_rx_cons;
977 		KASSERT(idx < DWXE_NRXDESC);
978 
979 		rxd = &sc->sc_rxdesc[idx];
980 		if (rxd->sd_status & DWXE_RX_DESC_CTL)
981 			break;
982 
983 		len = (rxd->sd_status >> DWXE_RX_FRM_LEN_SHIFT)
984 		    & DWXE_RX_FRM_LEN_MASK;
985 		rxb = &sc->sc_rxbuf[idx];
986 		KASSERT(rxb->tb_m);
987 
988 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
989 		    len, BUS_DMASYNC_POSTREAD);
990 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
991 
992 		/* Strip off CRC. */
993 		len -= ETHER_CRC_LEN;
994 		KASSERT(len > 0);
995 
996 		m = rxb->tb_m;
997 		rxb->tb_m = NULL;
998 		m->m_pkthdr.len = m->m_len = len;
999 
1000 		ml_enqueue(&ml, m);
1001 
1002 		put++;
1003 		if (sc->sc_rx_cons == (DWXE_NRXDESC - 1))
1004 			sc->sc_rx_cons = 0;
1005 		else
1006 			sc->sc_rx_cons++;
1007 	}
1008 
1009 	if_rxr_put(&sc->sc_rx_ring, put);
1010 	if (ifiq_input(&ifp->if_rcv, &ml))
1011 		if_rxr_livelocked(&sc->sc_rx_ring);
1012 
1013 	dwxe_fill_rx_ring(sc);
1014 
1015 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring), 0,
1016 	    DWXE_DMA_LEN(sc->sc_rxring),
1017 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1018 }
1019 
1020 void
1021 dwxe_up(struct dwxe_softc *sc)
1022 {
1023 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1024 	struct dwxe_buf *txb, *rxb;
1025 	int i;
1026 
1027 	/* Allocate Tx descriptor ring. */
1028 	sc->sc_txring = dwxe_dmamem_alloc(sc,
1029 	    DWXE_NTXDESC * sizeof(struct dwxe_desc), 8);
1030 	sc->sc_txdesc = DWXE_DMA_KVA(sc->sc_txring);
1031 
1032 	sc->sc_txbuf = malloc(sizeof(struct dwxe_buf) * DWXE_NTXDESC,
1033 	    M_DEVBUF, M_WAITOK);
1034 	for (i = 0; i < DWXE_NTXDESC; i++) {
1035 		txb = &sc->sc_txbuf[i];
1036 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWXE_NTXSEGS,
1037 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1038 		txb->tb_m = NULL;
1039 
1040 		sc->sc_txdesc[i].sd_next =
1041 		    DWXE_DMA_DVA(sc->sc_txring) +
1042 		    ((i+1) % DWXE_NTXDESC) * sizeof(struct dwxe_desc);
1043 	}
1044 
1045 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1046 	    0, DWXE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
1047 
1048 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1049 
1050 	dwxe_write(sc, DWXE_TX_DESC_LIST, DWXE_DMA_DVA(sc->sc_txring));
1051 
1052 	/* Allocate  descriptor ring. */
1053 	sc->sc_rxring = dwxe_dmamem_alloc(sc,
1054 	    DWXE_NRXDESC * sizeof(struct dwxe_desc), 8);
1055 	sc->sc_rxdesc = DWXE_DMA_KVA(sc->sc_rxring);
1056 
1057 	sc->sc_rxbuf = malloc(sizeof(struct dwxe_buf) * DWXE_NRXDESC,
1058 	    M_DEVBUF, M_WAITOK);
1059 
1060 	for (i = 0; i < DWXE_NRXDESC; i++) {
1061 		rxb = &sc->sc_rxbuf[i];
1062 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1063 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1064 		rxb->tb_m = NULL;
1065 
1066 		sc->sc_rxdesc[i].sd_next =
1067 		    DWXE_DMA_DVA(sc->sc_rxring) +
1068 		    ((i+1) % DWXE_NRXDESC) * sizeof(struct dwxe_desc);
1069 	}
1070 
1071 	if_rxr_init(&sc->sc_rx_ring, 2, DWXE_NRXDESC);
1072 
1073 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
1074 	dwxe_fill_rx_ring(sc);
1075 
1076 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
1077 	    0, DWXE_DMA_LEN(sc->sc_rxring),
1078 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1079 
1080 	dwxe_write(sc, DWXE_RX_DESC_LIST, DWXE_DMA_DVA(sc->sc_rxring));
1081 
1082 	dwxe_lladdr_write(sc);
1083 
1084 	//dwxe_write(sc, DWXE_BASIC_CTL1, DWXE_BASIC_CTL1_SOFT_RST);
1085 
1086 	/* Configure media. */
1087 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1088 		mii_mediachg(&sc->sc_mii);
1089 
1090 	/* Program promiscuous mode and multicast filters. */
1091 	dwxe_iff(sc);
1092 
1093 	ifp->if_flags |= IFF_RUNNING;
1094 	ifq_clr_oactive(&ifp->if_snd);
1095 
1096 	dwxe_write(sc, DWXE_INT_EN, DWXE_INT_EN_RX_INT |
1097 	    DWXE_INT_EN_TX_INT | DWXE_INT_EN_TX_BUF_UA_INT);
1098 
1099 	dwxe_write(sc, DWXE_TX_CTL1, dwxe_read(sc, DWXE_TX_CTL1) |
1100 	    DWXE_TX_CTL1_TX_MD | DWXE_TX_CTL1_TX_NEXT_FRM |
1101 	    DWXE_TX_CTL1_TX_DMA_EN);
1102 	dwxe_write(sc, DWXE_RX_CTL1, dwxe_read(sc, DWXE_RX_CTL1) |
1103 	    DWXE_RX_CTL1_RX_MD | DWXE_RX_CTL1_RX_DMA_EN);
1104 
1105 	dwxe_write(sc, DWXE_TX_CTL0, dwxe_read(sc, DWXE_TX_CTL0) |
1106 	    DWXE_TX_CTL0_TX_TRANSMITTER_EN);
1107 	dwxe_write(sc, DWXE_RX_CTL0, dwxe_read(sc, DWXE_RX_CTL0) |
1108 	    DWXE_RX_CTL0_RX_RECEIVER_EN | DWXE_RX_CTL0_RX_DO_CRC);
1109 
1110 	timeout_add_sec(&sc->sc_tick, 1);
1111 }
1112 
1113 void
1114 dwxe_down(struct dwxe_softc *sc)
1115 {
1116 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1117 	struct dwxe_buf *txb, *rxb;
1118 	uint32_t dmactrl;
1119 	int i;
1120 
1121 	timeout_del(&sc->sc_rxto);
1122 	timeout_del(&sc->sc_tick);
1123 
1124 	ifp->if_flags &= ~IFF_RUNNING;
1125 	ifq_clr_oactive(&ifp->if_snd);
1126 	ifp->if_timer = 0;
1127 
1128 	dwxe_stop_dma(sc);
1129 
1130 	dwxe_write(sc, DWXE_TX_CTL0, dwxe_read(sc,
1131 	    DWXE_TX_CTL0) & ~DWXE_TX_CTL0_TX_TRANSMITTER_EN);
1132 
1133 	dwxe_write(sc, DWXE_RX_CTL0, dwxe_read(sc,
1134 	    DWXE_RX_CTL0) & ~DWXE_RX_CTL0_RX_RECEIVER_EN);
1135 
1136 	dmactrl = dwxe_read(sc, DWXE_TX_CTL1);
1137 	dmactrl &= ~DWXE_TX_CTL1_TX_DMA_EN;
1138 	dwxe_write(sc, DWXE_TX_CTL1, dmactrl);
1139 
1140 	dmactrl = dwxe_read(sc, DWXE_RX_CTL1);
1141 	dmactrl &= ~DWXE_RX_CTL1_RX_DMA_EN;
1142 	dwxe_write(sc, DWXE_RX_CTL1, dmactrl);
1143 
1144 	dwxe_write(sc, DWXE_INT_EN, 0);
1145 
1146 	intr_barrier(sc->sc_ih);
1147 	ifq_barrier(&ifp->if_snd);
1148 
1149 	for (i = 0; i < DWXE_NTXDESC; i++) {
1150 		txb = &sc->sc_txbuf[i];
1151 		if (txb->tb_m) {
1152 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1153 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1154 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1155 			m_freem(txb->tb_m);
1156 		}
1157 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1158 	}
1159 
1160 	dwxe_dmamem_free(sc, sc->sc_txring);
1161 	free(sc->sc_txbuf, M_DEVBUF, 0);
1162 
1163 	for (i = 0; i < DWXE_NRXDESC; i++) {
1164 		rxb = &sc->sc_rxbuf[i];
1165 		if (rxb->tb_m) {
1166 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1167 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1168 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1169 			m_freem(rxb->tb_m);
1170 		}
1171 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1172 	}
1173 
1174 	dwxe_dmamem_free(sc, sc->sc_rxring);
1175 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1176 }
1177 
1178 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
1179 static uint32_t
1180 bitrev32(uint32_t x)
1181 {
1182 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1183 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1184 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1185 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1186 
1187 	return (x >> 16) | (x << 16);
1188 }
1189 
1190 void
1191 dwxe_iff(struct dwxe_softc *sc)
1192 {
1193 	struct arpcom *ac = &sc->sc_ac;
1194 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1195 	struct ether_multi *enm;
1196 	struct ether_multistep step;
1197 	uint32_t crc, hash[2], hashbit, hashreg;
1198 	uint32_t reg;
1199 
1200 	reg = 0;
1201 
1202 	ifp->if_flags &= ~IFF_ALLMULTI;
1203 	bzero(hash, sizeof(hash));
1204 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1205 		ifp->if_flags |= IFF_ALLMULTI;
1206 		reg |= DWXE_RX_FRM_FLT_RX_ALL_MULTICAST;
1207 		if (ifp->if_flags & IFF_PROMISC)
1208 			reg |= DWXE_RX_FRM_FLT_DIS_ADDR_FILTER;
1209 	} else {
1210 		reg |= DWXE_RX_FRM_FLT_HASH_MULTICAST;
1211 		ETHER_FIRST_MULTI(step, ac, enm);
1212 		while (enm != NULL) {
1213 			crc = ether_crc32_le(enm->enm_addrlo,
1214 			    ETHER_ADDR_LEN) & 0x7f;
1215 
1216 			crc = bitrev32(~crc) >> 26;
1217 			hashreg = (crc >> 5);
1218 			hashbit = (crc & 0x1f);
1219 			hash[hashreg] |= (1 << hashbit);
1220 
1221 			ETHER_NEXT_MULTI(step, enm);
1222 		}
1223 	}
1224 
1225 	dwxe_lladdr_write(sc);
1226 
1227 	dwxe_write(sc, DWXE_RX_HASH0, hash[1]);
1228 	dwxe_write(sc, DWXE_RX_HASH1, hash[0]);
1229 
1230 	dwxe_write(sc, DWXE_RX_FRM_FLT, reg);
1231 }
1232 
1233 int
1234 dwxe_encap(struct dwxe_softc *sc, struct mbuf *m, int *idx, int *used)
1235 {
1236 	struct dwxe_desc *txd, *txd_start;
1237 	bus_dmamap_t map;
1238 	int cur, frag, i;
1239 
1240 	cur = frag = *idx;
1241 	map = sc->sc_txbuf[cur].tb_map;
1242 
1243 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1244 		if (m_defrag(m, M_DONTWAIT))
1245 			return (EFBIG);
1246 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1247 			return (EFBIG);
1248 	}
1249 
1250 	/* Sync the DMA map. */
1251 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1252 	    BUS_DMASYNC_PREWRITE);
1253 
1254 	txd = txd_start = &sc->sc_txdesc[frag];
1255 	for (i = 0; i < map->dm_nsegs; i++) {
1256 		txd->sd_addr = map->dm_segs[i].ds_addr;
1257 		txd->sd_len = map->dm_segs[i].ds_len;
1258 		if (i == 0)
1259 			txd->sd_len |= DWXE_TX_FIR_DESC;
1260 		if (i == (map->dm_nsegs - 1))
1261 			txd->sd_len |= DWXE_TX_LAST_DESC | DWXE_TX_INT_CTL;
1262 		if (i != 0)
1263 			txd->sd_status = DWXE_TX_DESC_CTL;
1264 
1265 		bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1266 		    frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1267 
1268 		cur = frag;
1269 		if (frag == (DWXE_NTXDESC - 1)) {
1270 			txd = &sc->sc_txdesc[0];
1271 			frag = 0;
1272 		} else {
1273 			txd++;
1274 			frag++;
1275 		}
1276 		KASSERT(frag != sc->sc_tx_cons);
1277 	}
1278 
1279 	txd_start->sd_status = DWXE_TX_DESC_CTL;
1280 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1281 	    *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1282 
1283 	KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
1284 	sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
1285 	sc->sc_txbuf[cur].tb_map = map;
1286 	sc->sc_txbuf[cur].tb_m = m;
1287 
1288 	*idx = frag;
1289 	*used += map->dm_nsegs;
1290 
1291 	return (0);
1292 }
1293 
1294 void
1295 dwxe_reset(struct dwxe_softc *sc)
1296 {
1297 	int n;
1298 
1299 	dwxe_stop_dma(sc);
1300 
1301 	dwxe_write(sc, DWXE_BASIC_CTL1, DWXE_BASIC_CTL1_SOFT_RST);
1302 
1303 	for (n = 0; n < 1000; n++) {
1304 		if ((dwxe_read(sc, DWXE_BASIC_CTL1) &
1305 		    DWXE_BASIC_CTL1_SOFT_RST) == 0)
1306 			return;
1307 		delay(10);
1308 	}
1309 
1310 	printf("%s: reset timeout\n", sc->sc_dev.dv_xname);
1311 }
1312 
1313 void
1314 dwxe_stop_dma(struct dwxe_softc *sc)
1315 {
1316 	uint32_t dmactrl;
1317 
1318 	/* Stop DMA. */
1319 	dmactrl = dwxe_read(sc, DWXE_TX_CTL1);
1320 	dmactrl &= ~DWXE_TX_CTL1_TX_DMA_EN;
1321 	dmactrl |= DWXE_TX_CTL1_TX_FIFO_FLUSH;
1322 	dwxe_write(sc, DWXE_TX_CTL1, dmactrl);
1323 }
1324 
1325 struct dwxe_dmamem *
1326 dwxe_dmamem_alloc(struct dwxe_softc *sc, bus_size_t size, bus_size_t align)
1327 {
1328 	struct dwxe_dmamem *tdm;
1329 	int nsegs;
1330 
1331 	tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
1332 	tdm->tdm_size = size;
1333 
1334 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1335 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1336 		goto tdmfree;
1337 
1338 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1,
1339 	    &nsegs, BUS_DMA_WAITOK) != 0)
1340 		goto destroy;
1341 
1342 	if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
1343 	    &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1344 		goto free;
1345 
1346 	if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
1347 	    NULL, BUS_DMA_WAITOK) != 0)
1348 		goto unmap;
1349 
1350 	bzero(tdm->tdm_kva, size);
1351 
1352 	return (tdm);
1353 
1354 unmap:
1355 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
1356 free:
1357 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1358 destroy:
1359 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1360 tdmfree:
1361 	free(tdm, M_DEVBUF, 0);
1362 
1363 	return (NULL);
1364 }
1365 
1366 void
1367 dwxe_dmamem_free(struct dwxe_softc *sc, struct dwxe_dmamem *tdm)
1368 {
1369 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
1370 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1371 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1372 	free(tdm, M_DEVBUF, 0);
1373 }
1374 
1375 struct mbuf *
1376 dwxe_alloc_mbuf(struct dwxe_softc *sc, bus_dmamap_t map)
1377 {
1378 	struct mbuf *m = NULL;
1379 
1380 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1381 	if (!m)
1382 		return (NULL);
1383 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1384 	m_adj(m, ETHER_ALIGN);
1385 
1386 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1387 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1388 		m_freem(m);
1389 		return (NULL);
1390 	}
1391 
1392 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1393 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1394 
1395 	return (m);
1396 }
1397 
1398 void
1399 dwxe_fill_rx_ring(struct dwxe_softc *sc)
1400 {
1401 	struct dwxe_desc *rxd;
1402 	struct dwxe_buf *rxb;
1403 	u_int slots;
1404 
1405 	for (slots = if_rxr_get(&sc->sc_rx_ring, DWXE_NRXDESC);
1406 	    slots > 0; slots--) {
1407 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1408 		rxb->tb_m = dwxe_alloc_mbuf(sc, rxb->tb_map);
1409 		if (rxb->tb_m == NULL)
1410 			break;
1411 
1412 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1413 		rxd->sd_len = rxb->tb_map->dm_segs[0].ds_len - 1;
1414 		rxd->sd_addr = rxb->tb_map->dm_segs[0].ds_addr;
1415 		rxd->sd_status = DWXE_RX_DESC_CTL;
1416 
1417 		if (sc->sc_rx_prod == (DWXE_NRXDESC - 1))
1418 			sc->sc_rx_prod = 0;
1419 		else
1420 			sc->sc_rx_prod++;
1421 	}
1422 	if_rxr_put(&sc->sc_rx_ring, slots);
1423 
1424 	if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
1425 		timeout_add(&sc->sc_rxto, 1);
1426 }
1427