xref: /openbsd-src/sys/dev/fdt/if_dwxe.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*	$OpenBSD: if_dwxe.c,v 1.20 2022/01/08 00:20:10 jmatthew Exp $	*/
2 /*
3  * Copyright (c) 2008 Mark Kettenis
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the ethernet controller on the Allwinner H3/A64 SoCs.
21  */
22 
23 #include "bpfilter.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/timeout.h>
35 
36 #include <machine/bus.h>
37 #include <machine/fdt.h>
38 
39 #include <net/if.h>
40 #include <net/if_media.h>
41 
42 #include <dev/ofw/openfirm.h>
43 #include <dev/ofw/ofw_clock.h>
44 #include <dev/ofw/ofw_misc.h>
45 #include <dev/ofw/ofw_pinctrl.h>
46 #include <dev/ofw/ofw_regulator.h>
47 #include <dev/ofw/fdt.h>
48 
49 #include <dev/mii/mii.h>
50 #include <dev/mii/miivar.h>
51 
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55 
56 #include <netinet/in.h>
57 #include <netinet/if_ether.h>
58 
59 /*
60  * DWXE registers.
61  */
62 
63 #define DWXE_BASIC_CTL0		0x00
64 #define  DWXE_BASIC_CTL0_DUPLEX		(1 << 0)
65 #define  DWXE_BASIC_CTL0_LOOPBACK		(1 << 1)
66 #define  DWXE_BASIC_CTL0_SPEED_1000		(0 << 2)
67 #define  DWXE_BASIC_CTL0_SPEED_10		(2 << 2)
68 #define  DWXE_BASIC_CTL0_SPEED_100		(3 << 2)
69 #define  DWXE_BASIC_CTL0_SPEED_MASK		(3 << 2)
70 #define DWXE_BASIC_CTL1		0x04
71 #define  DWXE_BASIC_CTL1_SOFT_RST		(1 << 0)
72 #define  DWXE_BASIC_CTL1_RX_TX_PRI		(1 << 1)
73 #define  DWXE_BASIC_CTL1_BURST_LEN_MASK	(0x3f << 24)
74 #define  DWXE_BASIC_CTL1_BURST_LEN(x)		((x) << 24)
75 #define DWXE_INT_STA			0x08
76 #define  DWXE_INT_STA_TX_INT			(1 << 0)
77 #define  DWXE_INT_STA_TX_DMA_STOP_INT		(1 << 1)
78 #define  DWXE_INT_STA_TX_BUF_UA_INT		(1 << 2)
79 #define  DWXE_INT_STA_TX_TIMEOUT_INT		(1 << 3)
80 #define  DWXE_INT_STA_TX_UNDERFLOW_INT	(1 << 4)
81 #define  DWXE_INT_STA_TX_EARLY_INT		(1 << 5)
82 #define  DWXE_INT_STA_RX_INT			(1 << 8)
83 #define  DWXE_INT_STA_RX_BUF_UA_INT		(1 << 9)
84 #define  DWXE_INT_STA_RX_DMA_STOP_INT		(1 << 10)
85 #define  DWXE_INT_STA_RX_TIMEOUT_INT		(1 << 11)
86 #define  DWXE_INT_STA_RX_OVERFLOW_INT		(1 << 12)
87 #define  DWXE_INT_STA_RX_EARLY_INT		(1 << 13)
88 #define  DWXE_INT_STA_RGMII_STA_INT		(1 << 16)
89 #define DWXE_INT_EN			0x0C
90 #define  DWXE_INT_EN_TX_INT			(1 << 0)
91 #define  DWXE_INT_EN_TX_DMA_STOP_INT		(1 << 1)
92 #define  DWXE_INT_EN_TX_BUF_UA_INT		(1 << 2)
93 #define  DWXE_INT_EN_TX_TIMEOUT_INT		(1 << 3)
94 #define  DWXE_INT_EN_TX_UNDERFLOW_INT		(1 << 4)
95 #define  DWXE_INT_EN_TX_EARLY_INT		(1 << 5)
96 #define  DWXE_INT_EN_RX_INT			(1 << 8)
97 #define  DWXE_INT_EN_RX_BUF_UA_INT		(1 << 9)
98 #define  DWXE_INT_EN_RX_DMA_STOP_INT		(1 << 10)
99 #define  DWXE_INT_EN_RX_TIMEOUT_INT		(1 << 11)
100 #define  DWXE_INT_EN_RX_OVERFLOW_INT		(1 << 12)
101 #define  DWXE_INT_EN_RX_EARLY_INT		(1 << 13)
102 #define  DWXE_INT_EN_RGMII_EN_INT		(1 << 16)
103 #define DWXE_TX_CTL0			0x10
104 #define  DWXE_TX_CTL0_TX_TRANSMITTER_EN	(1U << 31)
105 #define DWXE_TX_CTL1			0x14
106 #define  DWXE_TX_CTL1_TX_FIFO_FLUSH		(1 << 0)
107 #define  DWXE_TX_CTL1_TX_MD			(1 << 1)
108 #define  DWXE_TX_CTL1_TX_NEXT_FRM		(1 << 2)
109 #define  DWXE_TX_CTL1_TX_TH_MASK		(0x3 << 8)
110 #define  DWXE_TX_CTL1_TX_TH_64		0
111 #define  DWXE_TX_CTL1_TX_TH_128		(0x1 << 8)
112 #define  DWXE_TX_CTL1_TX_TH_192		(0x2 << 8)
113 #define  DWXE_TX_CTL1_TX_TH_256		(0x3 << 8)
114 #define  DWXE_TX_CTL1_TX_DMA_EN		(1 << 30)
115 #define  DWXE_TX_CTL1_TX_DMA_START		(1U << 31)
116 #define DWXE_TX_FLOW_CTL		0x1C
117 #define  DWXE_TX_FLOW_CTL_EN			(1 << 0)
118 #define DWXE_TX_DESC_LIST		0x20
119 #define DWXE_RX_CTL0			0x24
120 #define  DWXE_RX_CTL0_RX_FLOW_CTL_EN		(1 << 16)
121 #define  DWXE_RX_CTL0_RX_DO_CRC		(1 << 27)
122 #define  DWXE_RX_CTL0_RX_RECEIVER_EN		(1U << 31)
123 #define DWXE_RX_CTL1			0x28
124 #define  DWXE_RX_CTL1_RX_MD			(1 << 1)
125 #define  DWXE_RX_CTL1_RX_TH_MASK		(0x3 << 4)
126 #define  DWXE_RX_CTL1_RX_TH_32		(0x0 << 4)
127 #define  DWXE_RX_CTL1_RX_TH_64		(0x1 << 4)
128 #define  DWXE_RX_CTL1_RX_TH_96		(0x2 << 4)
129 #define  DWXE_RX_CTL1_RX_TH_128		(0x3 << 4)
130 #define  DWXE_RX_CTL1_RX_DMA_EN		(1 << 30)
131 #define  DWXE_RX_CTL1_RX_DMA_START		(1U << 31)
132 #define DWXE_RX_DESC_LIST		0x34
133 #define DWXE_RX_FRM_FLT		0x38
134 #define DWXE_RX_FRM_FLT_RX_ALL		(1 << 0)
135 #define DWXE_RX_FRM_FLT_HASH_UNICAST		(1 << 8)
136 #define DWXE_RX_FRM_FLT_HASH_MULTICAST	(1 << 9)
137 #define DWXE_RX_FRM_FLT_CTL			(1 << 13)
138 #define DWXE_RX_FRM_FLT_RX_ALL_MULTICAST	(1 << 16)
139 #define DWXE_RX_FRM_FLT_DIS_BROADCAST		(1 << 17)
140 #define DWXE_RX_FRM_FLT_DIS_ADDR_FILTER	(1U << 31)
141 #define DWXE_RX_HASH0			0x40
142 #define DWXE_RX_HASH1			0x44
143 #define DWXE_MDIO_CMD			0x48
144 #define  DWXE_MDIO_CMD_MII_BUSY		(1 << 0)
145 #define  DWXE_MDIO_CMD_MII_WRITE		(1 << 1)
146 #define  DWXE_MDIO_CMD_PHY_REG_SHIFT		4
147 #define  DWXE_MDIO_CMD_PHY_ADDR_SHIFT		12
148 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT	20
149 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_MASK	0x7
150 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_16	0
151 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_32	1
152 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_64	2
153 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_128	3
154 #define DWXE_MDIO_DATA		0x4C
155 #define DWXE_MACADDR_HI		0x50
156 #define DWXE_MACADDR_LO		0x54
157 #define DWXE_TX_DMA_STA		0xB0
158 #define DWXE_TX_CUR_DESC		0xB4
159 #define DWXE_TX_CUR_BUF		0xB8
160 #define DWXE_RX_DMA_STA		0xC0
161 #define DWXE_RX_CUR_DESC		0xC4
162 #define DWXE_RX_CUR_BUF		0xC8
163 
164 /*
165  * DWXE descriptors.
166  */
167 
168 struct dwxe_desc {
169 	uint32_t sd_status;
170 	uint32_t sd_len;
171 	uint32_t sd_addr;
172 	uint32_t sd_next;
173 };
174 
175 /* Tx status bits. */
176 #define DWXE_TX_DEFER			(1 << 0)
177 #define DWXE_TX_UNDERFLOW_ERR		(1 << 1)
178 #define DWXE_TX_DEFER_ERR		(1 << 2)
179 #define DWXE_TX_COL_CNT_MASK		(0xf << 3)
180 #define DWXE_TX_COL_CNT_SHIFT		3
181 #define DWXE_TX_COL_ERR_1		(1 << 8)
182 #define DWXE_TX_COL_ERR_0		(1 << 9)
183 #define DWXE_TX_CRS_ERR		(1 << 10)
184 #define DWXE_TX_PAYLOAD_ERR		(1 << 12)
185 #define DWXE_TX_LENGTH_ERR		(1 << 14)
186 #define DWXE_TX_HEADER_ERR		(1 << 16)
187 #define DWXE_TX_DESC_CTL		(1 << 31)
188 
189 /* Rx status bits */
190 #define DWXE_RX_PAYLOAD_ERR		(1 << 0)
191 #define DWXE_RX_CRC_ERR		(1 << 1)
192 #define DWXE_RX_PHY_ERR		(1 << 3)
193 #define DWXE_RX_LENGTH_ERR		(1 << 4)
194 #define DWXE_RX_FRM_TYPE		(1 << 5)
195 #define DWXE_RX_COL_ERR		(1 << 6)
196 #define DWXE_RX_HEADER_ERR		(1 << 7)
197 #define DWXE_RX_LAST_DESC		(1 << 8)
198 #define DWXE_RX_FIR_DESC		(1 << 9)
199 #define DWXE_RX_OVERFLOW_ERR		(1 << 11)
200 #define DWXE_RX_SAF_FAIL		(1 << 13)
201 #define DWXE_RX_NO_ENOUGH_BUF_ERR	(1 << 14)
202 #define DWXE_RX_FRM_LEN_MASK		0x3fff
203 #define DWXE_RX_FRM_LEN_SHIFT		16
204 #define DWXE_RX_DAF_FAIL		(1 << 30)
205 #define DWXE_RX_DESC_CTL		(1 << 31)
206 
207 /* Tx size bits */
208 #define DWXE_TX_BUF_SIZE		(0xfff << 0)
209 #define DWXE_TX_CRC_CTL		(1 << 26)
210 #define DWXE_TX_CHECKSUM_CTL_MASK	(0x3 << 27)
211 #define DWXE_TX_CHECKSUM_CTL_IP	(1 << 27)
212 #define DWXE_TX_CHECKSUM_CTL_NO_PSE	(2 << 27)
213 #define DWXE_TX_CHECKSUM_CTL_FULL	(3 << 27)
214 #define DWXE_TX_FIR_DESC		(1 << 29)
215 #define DWXE_TX_LAST_DESC		(1 << 30)
216 #define DWXE_TX_INT_CTL		(1 << 31)
217 
218 /* Rx size bits */
219 #define DWXE_RX_BUF_SIZE		(0xfff << 0)
220 #define DWXE_RX_INT_CTL		(1 << 31)
221 
222 /* EMAC syscon bits */
223 #define SYSCON_EMAC			0x30
224 #define SYSCON_ETCS_MASK		(0x3 << 0)
225 #define SYSCON_ETCS_MII			(0 << 0)
226 #define SYSCON_ETCS_EXT_GMII		(1 << 0)
227 #define SYSCON_ETCS_INT_GMII		(2 << 0)
228 #define SYSCON_EPIT			(1 << 2) /* 1: RGMII, 0: MII */
229 #define SYSCON_ERXDC_MASK		(0xf << 5)
230 #define SYSCON_ERXDC_SHIFT		5
231 #define SYSCON_ETXDC_MASK		(0x7 << 10)
232 #define SYSCON_ETXDC_SHIFT		10
233 #define SYSCON_RMII_EN			(1 << 13) /* 1: enable RMII (overrides EPIT) */
234 #define SYSCON_H3_EPHY_SELECT		(1 << 15) /* 1: internal PHY, 0: external PHY */
235 #define SYSCON_H3_EPHY_SHUTDOWN		(1 << 16) /* 1: shutdown, 0: power up */
236 #define SYSCON_H3_EPHY_LED_POL		(1 << 17) /* 1: active low, 0: active high */
237 #define SYSCON_H3_EPHY_CLK_SEL		(1 << 18) /* 1: 24MHz, 0: 25MHz */
238 #define SYSCON_H3_EPHY_ADDR_MASK	(0x1f << 20)
239 #define SYSCON_H3_EPHY_ADDR_SHIFT	20
240 
241 /* GMAC syscon bits (Allwinner R40) */
242 #define SYSCON_GMAC			0x00
243 #define SYSCON_GTCS_MASK		SYSCON_ETCS_MASK
244 #define SYSCON_GTCS_MII			SYSCON_ETCS_MII
245 #define SYSCON_GTCS_EXT_GMII		SYSCON_ETCS_EXT_GMII
246 #define SYSCON_GTCS_INT_GMII		SYSCON_ETCS_INT_GMII
247 #define SYSCON_GPIT			SYSCON_EPIT
248 #define SYSCON_GRXDC_MASK		(0x7 << 5)
249 #define SYSCON_GRXDC_SHIFT		5
250 
251 struct dwxe_buf {
252 	bus_dmamap_t	tb_map;
253 	struct mbuf	*tb_m;
254 };
255 
256 #define DWXE_NTXDESC	256
257 #define DWXE_NTXSEGS	16
258 
259 #define DWXE_NRXDESC	256
260 
261 struct dwxe_dmamem {
262 	bus_dmamap_t		tdm_map;
263 	bus_dma_segment_t	tdm_seg;
264 	size_t			tdm_size;
265 	caddr_t			tdm_kva;
266 };
267 #define DWXE_DMA_MAP(_tdm)	((_tdm)->tdm_map)
268 #define DWXE_DMA_LEN(_tdm)	((_tdm)->tdm_size)
269 #define DWXE_DMA_DVA(_tdm)	((_tdm)->tdm_map->dm_segs[0].ds_addr)
270 #define DWXE_DMA_KVA(_tdm)	((void *)(_tdm)->tdm_kva)
271 
272 struct dwxe_softc {
273 	struct device		sc_dev;
274 	int			sc_node;
275 	bus_space_tag_t		sc_iot;
276 	bus_space_handle_t	sc_ioh;
277 	bus_dma_tag_t		sc_dmat;
278 	void			*sc_ih;
279 
280 	struct arpcom		sc_ac;
281 #define sc_lladdr	sc_ac.ac_enaddr
282 	struct mii_data		sc_mii;
283 #define sc_media	sc_mii.mii_media
284 	int			sc_link;
285 	int			sc_phyloc;
286 
287 	struct dwxe_dmamem	*sc_txring;
288 	struct dwxe_buf		*sc_txbuf;
289 	struct dwxe_desc	*sc_txdesc;
290 	int			sc_tx_prod;
291 	int			sc_tx_cons;
292 
293 	struct dwxe_dmamem	*sc_rxring;
294 	struct dwxe_buf		*sc_rxbuf;
295 	struct dwxe_desc	*sc_rxdesc;
296 	int			sc_rx_prod;
297 	struct if_rxring	sc_rx_ring;
298 	int			sc_rx_cons;
299 
300 	struct timeout		sc_tick;
301 	struct timeout		sc_rxto;
302 
303 	uint32_t		sc_clk;
304 };
305 
306 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
307 
308 int	dwxe_match(struct device *, void *, void *);
309 void	dwxe_attach(struct device *, struct device *, void *);
310 void	dwxe_phy_setup_emac(struct dwxe_softc *);
311 void	dwxe_phy_setup_gmac(struct dwxe_softc *);
312 
313 const struct cfattach dwxe_ca = {
314 	sizeof(struct dwxe_softc), dwxe_match, dwxe_attach
315 };
316 
317 struct cfdriver dwxe_cd = {
318 	NULL, "dwxe", DV_IFNET
319 };
320 
321 uint32_t dwxe_read(struct dwxe_softc *, bus_addr_t);
322 void	dwxe_write(struct dwxe_softc *, bus_addr_t, uint32_t);
323 
324 int	dwxe_ioctl(struct ifnet *, u_long, caddr_t);
325 void	dwxe_start(struct ifqueue *);
326 void	dwxe_watchdog(struct ifnet *);
327 
328 int	dwxe_media_change(struct ifnet *);
329 void	dwxe_media_status(struct ifnet *, struct ifmediareq *);
330 
331 int	dwxe_mii_readreg(struct device *, int, int);
332 void	dwxe_mii_writereg(struct device *, int, int, int);
333 void	dwxe_mii_statchg(struct device *);
334 
335 void	dwxe_lladdr_read(struct dwxe_softc *, uint8_t *);
336 void	dwxe_lladdr_write(struct dwxe_softc *);
337 
338 void	dwxe_tick(void *);
339 void	dwxe_rxtick(void *);
340 
341 int	dwxe_intr(void *);
342 void	dwxe_tx_proc(struct dwxe_softc *);
343 void	dwxe_rx_proc(struct dwxe_softc *);
344 
345 void	dwxe_up(struct dwxe_softc *);
346 void	dwxe_down(struct dwxe_softc *);
347 void	dwxe_iff(struct dwxe_softc *);
348 int	dwxe_encap(struct dwxe_softc *, struct mbuf *, int *, int *);
349 
350 void	dwxe_reset(struct dwxe_softc *);
351 void	dwxe_stop_dma(struct dwxe_softc *);
352 
353 struct dwxe_dmamem *
354 	dwxe_dmamem_alloc(struct dwxe_softc *, bus_size_t, bus_size_t);
355 void	dwxe_dmamem_free(struct dwxe_softc *, struct dwxe_dmamem *);
356 struct mbuf *dwxe_alloc_mbuf(struct dwxe_softc *, bus_dmamap_t);
357 void	dwxe_fill_rx_ring(struct dwxe_softc *);
358 
359 int
360 dwxe_match(struct device *parent, void *cfdata, void *aux)
361 {
362 	struct fdt_attach_args *faa = aux;
363 
364 	return OF_is_compatible(faa->fa_node, "allwinner,sun8i-h3-emac") ||
365 	    OF_is_compatible(faa->fa_node, "allwinner,sun8i-r40-gmac") ||
366 	    OF_is_compatible(faa->fa_node, "allwinner,sun50i-a64-emac");
367 }
368 
369 void
370 dwxe_attach(struct device *parent, struct device *self, void *aux)
371 {
372 	struct dwxe_softc *sc = (void *)self;
373 	struct fdt_attach_args *faa = aux;
374 	struct ifnet *ifp;
375 	uint32_t phy, phy_supply;
376 	int node;
377 
378 	sc->sc_node = faa->fa_node;
379 	sc->sc_iot = faa->fa_iot;
380 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
381 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
382 		printf("%s: cannot map registers\n", self->dv_xname);
383 		return;
384 	}
385 	sc->sc_dmat = faa->fa_dmat;
386 
387 	/* Lookup PHY. */
388 	phy = OF_getpropint(faa->fa_node, "phy-handle", 0);
389 	node = OF_getnodebyphandle(phy);
390 	if (node)
391 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
392 	else
393 		sc->sc_phyloc = MII_PHY_ANY;
394 
395 	pinctrl_byname(faa->fa_node, "default");
396 
397 	/* Enable clock. */
398 	clock_enable(faa->fa_node, "stmmaceth");
399 	reset_deassert(faa->fa_node, "stmmaceth");
400 	delay(5000);
401 
402 	/* Power up PHY. */
403 	phy_supply = OF_getpropint(faa->fa_node, "phy-supply", 0);
404 	if (phy_supply)
405 		regulator_enable(phy_supply);
406 
407 	sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth");
408 	if (sc->sc_clk > 160000000)
409 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_128;
410 	else if (sc->sc_clk > 80000000)
411 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_64;
412 	else if (sc->sc_clk > 40000000)
413 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_32;
414 	else
415 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_16;
416 
417 	if (OF_getprop(faa->fa_node, "local-mac-address",
418 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
419 		dwxe_lladdr_read(sc, sc->sc_lladdr);
420 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
421 
422 	/* Do hardware specific initializations. */
423 	if (OF_is_compatible(faa->fa_node, "allwinner,sun8i-r40-gmac"))
424 		dwxe_phy_setup_gmac(sc);
425 	else
426 		dwxe_phy_setup_emac(sc);
427 
428 	timeout_set(&sc->sc_tick, dwxe_tick, sc);
429 	timeout_set(&sc->sc_rxto, dwxe_rxtick, sc);
430 
431 	ifp = &sc->sc_ac.ac_if;
432 	ifp->if_softc = sc;
433 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
434 	ifp->if_xflags = IFXF_MPSAFE;
435 	ifp->if_ioctl = dwxe_ioctl;
436 	ifp->if_qstart = dwxe_start;
437 	ifp->if_watchdog = dwxe_watchdog;
438 	ifq_set_maxlen(&ifp->if_snd, DWXE_NTXDESC - 1);
439 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
440 
441 	ifp->if_capabilities = IFCAP_VLAN_MTU;
442 
443 	sc->sc_mii.mii_ifp = ifp;
444 	sc->sc_mii.mii_readreg = dwxe_mii_readreg;
445 	sc->sc_mii.mii_writereg = dwxe_mii_writereg;
446 	sc->sc_mii.mii_statchg = dwxe_mii_statchg;
447 
448 	ifmedia_init(&sc->sc_media, 0, dwxe_media_change, dwxe_media_status);
449 
450 	dwxe_reset(sc);
451 
452 	mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
453 	    MII_OFFSET_ANY, MIIF_NOISOLATE);
454 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
455 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
456 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
457 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
458 	} else
459 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
460 
461 	if_attach(ifp);
462 	ether_ifattach(ifp);
463 
464 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
465 	    dwxe_intr, sc, sc->sc_dev.dv_xname);
466 	if (sc->sc_ih == NULL)
467 		printf("%s: can't establish interrupt\n", sc->sc_dev.dv_xname);
468 }
469 
470 void
471 dwxe_phy_setup_emac(struct dwxe_softc *sc)
472 {
473 	struct regmap *rm;
474 	uint32_t syscon;
475 	uint32_t tx_delay, rx_delay;
476 	char *phy_mode;
477 	int len;
478 
479 	rm = regmap_byphandle(OF_getpropint(sc->sc_node, "syscon", 0));
480 	if (rm == NULL)
481 		return;
482 
483 	syscon = regmap_read_4(rm, SYSCON_EMAC);
484 	syscon &= ~(SYSCON_ETCS_MASK|SYSCON_EPIT|SYSCON_RMII_EN);
485 	syscon &= ~(SYSCON_ETXDC_MASK | SYSCON_ERXDC_MASK);
486 	syscon &= ~SYSCON_H3_EPHY_SELECT;
487 
488 	if ((len = OF_getproplen(sc->sc_node, "phy-mode")) <= 0)
489 		return;
490 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
491 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
492 	if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
493 		syscon |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
494 	else if (!strncmp(phy_mode, "rmii", strlen("rmii")))
495 		syscon |= SYSCON_EPIT | SYSCON_ETCS_EXT_GMII;
496 	else if (!strncmp(phy_mode, "mii", strlen("mii")) &&
497 	    OF_is_compatible(sc->sc_node, "allwinner,sun8i-h3-emac")) {
498 		syscon &= ~SYSCON_H3_EPHY_SHUTDOWN;
499 		syscon |= SYSCON_H3_EPHY_SELECT | SYSCON_H3_EPHY_CLK_SEL;
500 		if (OF_getproplen(sc->sc_node, "allwinner,leds-active-low") == 0)
501 			syscon |= SYSCON_H3_EPHY_LED_POL;
502 		else
503 			syscon &= ~SYSCON_H3_EPHY_LED_POL;
504 		syscon &= ~SYSCON_H3_EPHY_ADDR_MASK;
505 		syscon |= (sc->sc_phyloc << SYSCON_H3_EPHY_ADDR_SHIFT);
506 	}
507 	free(phy_mode, M_TEMP, len);
508 
509 	tx_delay = OF_getpropint(sc->sc_node, "allwinner,tx-delay-ps", 0);
510 	rx_delay = OF_getpropint(sc->sc_node, "allwinner,rx-delay-ps", 0);
511 	syscon |= ((tx_delay / 100) << SYSCON_ETXDC_SHIFT) & SYSCON_ETXDC_MASK;
512 	syscon |= ((rx_delay / 100) << SYSCON_ERXDC_SHIFT) & SYSCON_ERXDC_MASK;
513 
514 	regmap_write_4(rm, SYSCON_EMAC, syscon);
515 	dwxe_reset(sc);
516 }
517 
518 void
519 dwxe_phy_setup_gmac(struct dwxe_softc *sc)
520 {
521 	struct regmap *rm;
522 	uint32_t syscon;
523 	uint32_t rx_delay;
524 	char *phy_mode;
525 	int len;
526 
527 	rm = regmap_byphandle(OF_getpropint(sc->sc_node, "syscon", 0));
528 	if (rm == NULL)
529 		return;
530 
531 	syscon = regmap_read_4(rm, SYSCON_GMAC);
532 	syscon &= ~(SYSCON_GTCS_MASK|SYSCON_GPIT|SYSCON_ERXDC_MASK);
533 
534 	if ((len = OF_getproplen(sc->sc_node, "phy-mode")) <= 0)
535 		return;
536 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
537 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
538 	if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
539 		syscon |= SYSCON_GPIT | SYSCON_GTCS_INT_GMII;
540 	else if (!strncmp(phy_mode, "rmii", strlen("rmii")))
541 		syscon |= SYSCON_GPIT | SYSCON_GTCS_EXT_GMII;
542 	free(phy_mode, M_TEMP, len);
543 
544 	rx_delay = OF_getpropint(sc->sc_node, "allwinner,rx-delay-ps", 0);
545 	syscon |= ((rx_delay / 100) << SYSCON_ERXDC_SHIFT) & SYSCON_ERXDC_MASK;
546 
547 	regmap_write_4(rm, SYSCON_GMAC, syscon);
548 	dwxe_reset(sc);
549 }
550 
551 uint32_t
552 dwxe_read(struct dwxe_softc *sc, bus_addr_t addr)
553 {
554 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr);
555 }
556 
557 void
558 dwxe_write(struct dwxe_softc *sc, bus_addr_t addr, uint32_t data)
559 {
560 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data);
561 }
562 
563 void
564 dwxe_lladdr_read(struct dwxe_softc *sc, uint8_t *lladdr)
565 {
566 	uint32_t machi, maclo;
567 
568 	machi = dwxe_read(sc, DWXE_MACADDR_HI);
569 	maclo = dwxe_read(sc, DWXE_MACADDR_LO);
570 
571 	lladdr[0] = (maclo >> 0) & 0xff;
572 	lladdr[1] = (maclo >> 8) & 0xff;
573 	lladdr[2] = (maclo >> 16) & 0xff;
574 	lladdr[3] = (maclo >> 24) & 0xff;
575 	lladdr[4] = (machi >> 0) & 0xff;
576 	lladdr[5] = (machi >> 8) & 0xff;
577 }
578 
579 void
580 dwxe_lladdr_write(struct dwxe_softc *sc)
581 {
582 	dwxe_write(sc, DWXE_MACADDR_HI,
583 	    sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0);
584 	dwxe_write(sc, DWXE_MACADDR_LO,
585 	    sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 |
586 	    sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0);
587 }
588 
589 void
590 dwxe_start(struct ifqueue *ifq)
591 {
592 	struct ifnet *ifp = ifq->ifq_if;
593 	struct dwxe_softc *sc = ifp->if_softc;
594 	struct mbuf *m;
595 	int error, idx, left, used;
596 
597 	if (!(ifp->if_flags & IFF_RUNNING))
598 		return;
599 	if (ifq_is_oactive(&ifp->if_snd))
600 		return;
601 	if (ifq_empty(&ifp->if_snd))
602 		return;
603 	if (!sc->sc_link)
604 		return;
605 
606 	idx = sc->sc_tx_prod;
607 	left = sc->sc_tx_cons;
608 	if (left <= idx)
609 		left += DWXE_NTXDESC;
610 	left -= idx;
611 	used = 0;
612 
613 	for (;;) {
614 		if (used + DWXE_NTXSEGS + 1 > left) {
615 			ifq_set_oactive(ifq);
616 			break;
617 		}
618 
619 		m = ifq_dequeue(ifq);
620 		if (m == NULL)
621 			break;
622 
623 		error = dwxe_encap(sc, m, &idx, &used);
624 		if (error == EFBIG) {
625 			m_freem(m); /* give up: drop it */
626 			ifp->if_oerrors++;
627 			continue;
628 		}
629 
630 #if NBPFILTER > 0
631 		if (ifp->if_bpf)
632 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
633 #endif
634 	}
635 
636 	if (sc->sc_tx_prod != idx) {
637 		sc->sc_tx_prod = idx;
638 
639 		/* Set a timeout in case the chip goes out to lunch. */
640 		ifp->if_timer = 5;
641 
642 		dwxe_write(sc, DWXE_TX_CTL1, dwxe_read(sc,
643 		     DWXE_TX_CTL1) | DWXE_TX_CTL1_TX_DMA_START);
644 	}
645 }
646 
647 int
648 dwxe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
649 {
650 	struct dwxe_softc *sc = ifp->if_softc;
651 	struct ifreq *ifr = (struct ifreq *)addr;
652 	int error = 0, s;
653 
654 	s = splnet();
655 
656 	switch (cmd) {
657 	case SIOCSIFADDR:
658 		ifp->if_flags |= IFF_UP;
659 		/* FALLTHROUGH */
660 	case SIOCSIFFLAGS:
661 		if (ifp->if_flags & IFF_UP) {
662 			if (ifp->if_flags & IFF_RUNNING)
663 				error = ENETRESET;
664 			else
665 				dwxe_up(sc);
666 		} else {
667 			if (ifp->if_flags & IFF_RUNNING)
668 				dwxe_down(sc);
669 		}
670 		break;
671 
672 	case SIOCGIFMEDIA:
673 	case SIOCSIFMEDIA:
674 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
675 		break;
676 
677 	case SIOCGIFRXR:
678 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
679 		    NULL, MCLBYTES, &sc->sc_rx_ring);
680 		break;
681 
682 	default:
683 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
684 		break;
685 	}
686 
687 	if (error == ENETRESET) {
688 		if (ifp->if_flags & IFF_RUNNING)
689 			dwxe_iff(sc);
690 		error = 0;
691 	}
692 
693 	splx(s);
694 	return (error);
695 }
696 
697 void
698 dwxe_watchdog(struct ifnet *ifp)
699 {
700 	printf("%s\n", __func__);
701 }
702 
703 int
704 dwxe_media_change(struct ifnet *ifp)
705 {
706 	struct dwxe_softc *sc = ifp->if_softc;
707 
708 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
709 		mii_mediachg(&sc->sc_mii);
710 
711 	return (0);
712 }
713 
714 void
715 dwxe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
716 {
717 	struct dwxe_softc *sc = ifp->if_softc;
718 
719 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
720 		mii_pollstat(&sc->sc_mii);
721 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
722 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
723 	}
724 }
725 
726 int
727 dwxe_mii_readreg(struct device *self, int phy, int reg)
728 {
729 	struct dwxe_softc *sc = (void *)self;
730 	int n;
731 
732 	dwxe_write(sc, DWXE_MDIO_CMD,
733 	    sc->sc_clk << DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT |
734 	    phy << DWXE_MDIO_CMD_PHY_ADDR_SHIFT |
735 	    reg << DWXE_MDIO_CMD_PHY_REG_SHIFT |
736 	    DWXE_MDIO_CMD_MII_BUSY);
737 	for (n = 0; n < 1000; n++) {
738 		if ((dwxe_read(sc, DWXE_MDIO_CMD) &
739 		    DWXE_MDIO_CMD_MII_BUSY) == 0)
740 			return dwxe_read(sc, DWXE_MDIO_DATA);
741 		delay(10);
742 	}
743 
744 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
745 	return (0);
746 }
747 
748 void
749 dwxe_mii_writereg(struct device *self, int phy, int reg, int val)
750 {
751 	struct dwxe_softc *sc = (void *)self;
752 	int n;
753 
754 	dwxe_write(sc, DWXE_MDIO_DATA, val);
755 	dwxe_write(sc, DWXE_MDIO_CMD,
756 	    sc->sc_clk << DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT |
757 	    phy << DWXE_MDIO_CMD_PHY_ADDR_SHIFT |
758 	    reg << DWXE_MDIO_CMD_PHY_REG_SHIFT |
759 	    DWXE_MDIO_CMD_MII_WRITE |
760 	    DWXE_MDIO_CMD_MII_BUSY);
761 	for (n = 0; n < 1000; n++) {
762 		if ((dwxe_read(sc, DWXE_MDIO_CMD) &
763 		    DWXE_MDIO_CMD_MII_BUSY) == 0)
764 			return;
765 		delay(10);
766 	}
767 
768 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
769 }
770 
771 void
772 dwxe_mii_statchg(struct device *self)
773 {
774 	struct dwxe_softc *sc = (void *)self;
775 	uint32_t basicctrl;
776 
777 	basicctrl = dwxe_read(sc, DWXE_BASIC_CTL0);
778 	basicctrl &= ~DWXE_BASIC_CTL0_SPEED_MASK;
779 
780 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
781 	case IFM_1000_SX:
782 	case IFM_1000_LX:
783 	case IFM_1000_CX:
784 	case IFM_1000_T:
785 		basicctrl |= DWXE_BASIC_CTL0_SPEED_1000;
786 		sc->sc_link = 1;
787 		break;
788 	case IFM_100_TX:
789 		basicctrl |= DWXE_BASIC_CTL0_SPEED_100;
790 		sc->sc_link = 1;
791 		break;
792 	case IFM_10_T:
793 		basicctrl |= DWXE_BASIC_CTL0_SPEED_10;
794 		sc->sc_link = 1;
795 		break;
796 	default:
797 		sc->sc_link = 0;
798 		return;
799 	}
800 
801 	if (sc->sc_link == 0)
802 		return;
803 
804 	basicctrl &= ~DWXE_BASIC_CTL0_DUPLEX;
805 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
806 		basicctrl |= DWXE_BASIC_CTL0_DUPLEX;
807 
808 	/* XXX: RX/TX flow control? */
809 
810 	dwxe_write(sc, DWXE_BASIC_CTL0, basicctrl);
811 }
812 
813 void
814 dwxe_tick(void *arg)
815 {
816 	struct dwxe_softc *sc = arg;
817 	int s;
818 
819 	s = splnet();
820 	mii_tick(&sc->sc_mii);
821 	splx(s);
822 
823 	timeout_add_sec(&sc->sc_tick, 1);
824 }
825 
826 void
827 dwxe_rxtick(void *arg)
828 {
829 	struct dwxe_softc *sc = arg;
830 	uint32_t ctl;
831 	int s;
832 
833 	s = splnet();
834 
835 	ctl = dwxe_read(sc, DWXE_RX_CTL1);
836 	dwxe_write(sc, DWXE_RX_CTL1, ctl & ~DWXE_RX_CTL1_RX_DMA_EN);
837 
838 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
839 	    0, DWXE_DMA_LEN(sc->sc_rxring),
840 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
841 
842 	dwxe_write(sc, DWXE_RX_DESC_LIST, 0);
843 
844 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
845 	dwxe_fill_rx_ring(sc);
846 
847 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
848 	    0, DWXE_DMA_LEN(sc->sc_rxring),
849 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
850 
851 	dwxe_write(sc, DWXE_RX_DESC_LIST, DWXE_DMA_DVA(sc->sc_rxring));
852 	dwxe_write(sc, DWXE_RX_CTL1, ctl);
853 
854 	splx(s);
855 }
856 
857 int
858 dwxe_intr(void *arg)
859 {
860 	struct dwxe_softc *sc = arg;
861 	uint32_t reg;
862 
863 	reg = dwxe_read(sc, DWXE_INT_STA);
864 	dwxe_write(sc, DWXE_INT_STA, reg);
865 
866 	if (reg & DWXE_INT_STA_RX_INT)
867 		dwxe_rx_proc(sc);
868 
869 	if (reg & DWXE_INT_STA_TX_INT ||
870 	    reg & DWXE_INT_STA_TX_BUF_UA_INT)
871 		dwxe_tx_proc(sc);
872 
873 	return (1);
874 }
875 
876 void
877 dwxe_tx_proc(struct dwxe_softc *sc)
878 {
879 	struct ifnet *ifp = &sc->sc_ac.ac_if;
880 	struct dwxe_desc *txd;
881 	struct dwxe_buf *txb;
882 	int idx, txfree;
883 
884 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring), 0,
885 	    DWXE_DMA_LEN(sc->sc_txring),
886 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
887 
888 	txfree = 0;
889 	while (sc->sc_tx_cons != sc->sc_tx_prod) {
890 		idx = sc->sc_tx_cons;
891 		KASSERT(idx < DWXE_NTXDESC);
892 
893 		txd = &sc->sc_txdesc[idx];
894 		if (txd->sd_status & DWXE_TX_DESC_CTL)
895 			break;
896 
897 		txb = &sc->sc_txbuf[idx];
898 		if (txb->tb_m) {
899 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
900 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
901 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
902 
903 			m_freem(txb->tb_m);
904 			txb->tb_m = NULL;
905 		}
906 
907 		txfree++;
908 
909 		if (sc->sc_tx_cons == (DWXE_NTXDESC - 1))
910 			sc->sc_tx_cons = 0;
911 		else
912 			sc->sc_tx_cons++;
913 
914 		txd->sd_status = 0;
915 	}
916 
917 	if (sc->sc_tx_cons == sc->sc_tx_prod)
918 		ifp->if_timer = 0;
919 
920 	if (txfree) {
921 		if (ifq_is_oactive(&ifp->if_snd))
922 			ifq_restart(&ifp->if_snd);
923 	}
924 }
925 
926 void
927 dwxe_rx_proc(struct dwxe_softc *sc)
928 {
929 	struct ifnet *ifp = &sc->sc_ac.ac_if;
930 	struct dwxe_desc *rxd;
931 	struct dwxe_buf *rxb;
932 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
933 	struct mbuf *m;
934 	int idx, len, cnt, put;
935 
936 	if ((ifp->if_flags & IFF_RUNNING) == 0)
937 		return;
938 
939 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring), 0,
940 	    DWXE_DMA_LEN(sc->sc_rxring),
941 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
942 
943 	cnt = if_rxr_inuse(&sc->sc_rx_ring);
944 	put = 0;
945 	while (put < cnt) {
946 		idx = sc->sc_rx_cons;
947 		KASSERT(idx < DWXE_NRXDESC);
948 
949 		rxd = &sc->sc_rxdesc[idx];
950 		if (rxd->sd_status & DWXE_RX_DESC_CTL)
951 			break;
952 
953 		len = (rxd->sd_status >> DWXE_RX_FRM_LEN_SHIFT)
954 		    & DWXE_RX_FRM_LEN_MASK;
955 		rxb = &sc->sc_rxbuf[idx];
956 		KASSERT(rxb->tb_m);
957 
958 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
959 		    len, BUS_DMASYNC_POSTREAD);
960 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
961 
962 		/* Strip off CRC. */
963 		len -= ETHER_CRC_LEN;
964 		KASSERT(len > 0);
965 
966 		m = rxb->tb_m;
967 		rxb->tb_m = NULL;
968 		m->m_pkthdr.len = m->m_len = len;
969 
970 		ml_enqueue(&ml, m);
971 
972 		put++;
973 		if (sc->sc_rx_cons == (DWXE_NRXDESC - 1))
974 			sc->sc_rx_cons = 0;
975 		else
976 			sc->sc_rx_cons++;
977 	}
978 
979 	if_rxr_put(&sc->sc_rx_ring, put);
980 	if (ifiq_input(&ifp->if_rcv, &ml))
981 		if_rxr_livelocked(&sc->sc_rx_ring);
982 
983 	dwxe_fill_rx_ring(sc);
984 
985 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring), 0,
986 	    DWXE_DMA_LEN(sc->sc_rxring),
987 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
988 }
989 
990 void
991 dwxe_up(struct dwxe_softc *sc)
992 {
993 	struct ifnet *ifp = &sc->sc_ac.ac_if;
994 	struct dwxe_buf *txb, *rxb;
995 	int i;
996 
997 	/* Allocate Tx descriptor ring. */
998 	sc->sc_txring = dwxe_dmamem_alloc(sc,
999 	    DWXE_NTXDESC * sizeof(struct dwxe_desc), 8);
1000 	sc->sc_txdesc = DWXE_DMA_KVA(sc->sc_txring);
1001 
1002 	sc->sc_txbuf = malloc(sizeof(struct dwxe_buf) * DWXE_NTXDESC,
1003 	    M_DEVBUF, M_WAITOK);
1004 	for (i = 0; i < DWXE_NTXDESC; i++) {
1005 		txb = &sc->sc_txbuf[i];
1006 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWXE_NTXSEGS,
1007 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1008 		txb->tb_m = NULL;
1009 
1010 		sc->sc_txdesc[i].sd_next =
1011 		    DWXE_DMA_DVA(sc->sc_txring) +
1012 		    ((i+1) % DWXE_NTXDESC) * sizeof(struct dwxe_desc);
1013 	}
1014 
1015 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1016 	    0, DWXE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
1017 
1018 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1019 
1020 	dwxe_write(sc, DWXE_TX_DESC_LIST, DWXE_DMA_DVA(sc->sc_txring));
1021 
1022 	/* Allocate  descriptor ring. */
1023 	sc->sc_rxring = dwxe_dmamem_alloc(sc,
1024 	    DWXE_NRXDESC * sizeof(struct dwxe_desc), 8);
1025 	sc->sc_rxdesc = DWXE_DMA_KVA(sc->sc_rxring);
1026 
1027 	sc->sc_rxbuf = malloc(sizeof(struct dwxe_buf) * DWXE_NRXDESC,
1028 	    M_DEVBUF, M_WAITOK);
1029 
1030 	for (i = 0; i < DWXE_NRXDESC; i++) {
1031 		rxb = &sc->sc_rxbuf[i];
1032 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1033 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1034 		rxb->tb_m = NULL;
1035 
1036 		sc->sc_rxdesc[i].sd_next =
1037 		    DWXE_DMA_DVA(sc->sc_rxring) +
1038 		    ((i+1) % DWXE_NRXDESC) * sizeof(struct dwxe_desc);
1039 	}
1040 
1041 	if_rxr_init(&sc->sc_rx_ring, 2, DWXE_NRXDESC);
1042 
1043 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
1044 	dwxe_fill_rx_ring(sc);
1045 
1046 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
1047 	    0, DWXE_DMA_LEN(sc->sc_rxring),
1048 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1049 
1050 	dwxe_write(sc, DWXE_RX_DESC_LIST, DWXE_DMA_DVA(sc->sc_rxring));
1051 
1052 	dwxe_lladdr_write(sc);
1053 
1054 	//dwxe_write(sc, DWXE_BASIC_CTL1, DWXE_BASIC_CTL1_SOFT_RST);
1055 
1056 	/* Configure media. */
1057 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1058 		mii_mediachg(&sc->sc_mii);
1059 
1060 	/* Program promiscuous mode and multicast filters. */
1061 	dwxe_iff(sc);
1062 
1063 	ifp->if_flags |= IFF_RUNNING;
1064 	ifq_clr_oactive(&ifp->if_snd);
1065 
1066 	dwxe_write(sc, DWXE_INT_EN, DWXE_INT_EN_RX_INT |
1067 	    DWXE_INT_EN_TX_INT | DWXE_INT_EN_TX_BUF_UA_INT);
1068 
1069 	dwxe_write(sc, DWXE_TX_CTL1, dwxe_read(sc, DWXE_TX_CTL1) |
1070 	    DWXE_TX_CTL1_TX_MD | DWXE_TX_CTL1_TX_NEXT_FRM |
1071 	    DWXE_TX_CTL1_TX_DMA_EN);
1072 	dwxe_write(sc, DWXE_RX_CTL1, dwxe_read(sc, DWXE_RX_CTL1) |
1073 	    DWXE_RX_CTL1_RX_MD | DWXE_RX_CTL1_RX_DMA_EN);
1074 
1075 	dwxe_write(sc, DWXE_TX_CTL0, dwxe_read(sc, DWXE_TX_CTL0) |
1076 	    DWXE_TX_CTL0_TX_TRANSMITTER_EN);
1077 	dwxe_write(sc, DWXE_RX_CTL0, dwxe_read(sc, DWXE_RX_CTL0) |
1078 	    DWXE_RX_CTL0_RX_RECEIVER_EN | DWXE_RX_CTL0_RX_DO_CRC);
1079 
1080 	timeout_add_sec(&sc->sc_tick, 1);
1081 }
1082 
1083 void
1084 dwxe_down(struct dwxe_softc *sc)
1085 {
1086 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1087 	struct dwxe_buf *txb, *rxb;
1088 	uint32_t dmactrl;
1089 	int i;
1090 
1091 	timeout_del(&sc->sc_rxto);
1092 	timeout_del(&sc->sc_tick);
1093 
1094 	ifp->if_flags &= ~IFF_RUNNING;
1095 	ifq_clr_oactive(&ifp->if_snd);
1096 	ifp->if_timer = 0;
1097 
1098 	dwxe_stop_dma(sc);
1099 
1100 	dwxe_write(sc, DWXE_TX_CTL0, dwxe_read(sc,
1101 	    DWXE_TX_CTL0) & ~DWXE_TX_CTL0_TX_TRANSMITTER_EN);
1102 
1103 	dwxe_write(sc, DWXE_RX_CTL0, dwxe_read(sc,
1104 	    DWXE_RX_CTL0) & ~DWXE_RX_CTL0_RX_RECEIVER_EN);
1105 
1106 	dmactrl = dwxe_read(sc, DWXE_TX_CTL1);
1107 	dmactrl &= ~DWXE_TX_CTL1_TX_DMA_EN;
1108 	dwxe_write(sc, DWXE_TX_CTL1, dmactrl);
1109 
1110 	dmactrl = dwxe_read(sc, DWXE_RX_CTL1);
1111 	dmactrl &= ~DWXE_RX_CTL1_RX_DMA_EN;
1112 	dwxe_write(sc, DWXE_RX_CTL1, dmactrl);
1113 
1114 	dwxe_write(sc, DWXE_INT_EN, 0);
1115 
1116 	intr_barrier(sc->sc_ih);
1117 	ifq_barrier(&ifp->if_snd);
1118 
1119 	for (i = 0; i < DWXE_NTXDESC; i++) {
1120 		txb = &sc->sc_txbuf[i];
1121 		if (txb->tb_m) {
1122 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1123 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1124 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1125 			m_freem(txb->tb_m);
1126 		}
1127 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1128 	}
1129 
1130 	dwxe_dmamem_free(sc, sc->sc_txring);
1131 	free(sc->sc_txbuf, M_DEVBUF, 0);
1132 
1133 	for (i = 0; i < DWXE_NRXDESC; i++) {
1134 		rxb = &sc->sc_rxbuf[i];
1135 		if (rxb->tb_m) {
1136 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1137 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1138 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1139 			m_freem(rxb->tb_m);
1140 		}
1141 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1142 	}
1143 
1144 	dwxe_dmamem_free(sc, sc->sc_rxring);
1145 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1146 }
1147 
1148 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
1149 static uint32_t
1150 bitrev32(uint32_t x)
1151 {
1152 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1153 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1154 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1155 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1156 
1157 	return (x >> 16) | (x << 16);
1158 }
1159 
1160 void
1161 dwxe_iff(struct dwxe_softc *sc)
1162 {
1163 	struct arpcom *ac = &sc->sc_ac;
1164 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1165 	struct ether_multi *enm;
1166 	struct ether_multistep step;
1167 	uint32_t crc, hash[2], hashbit, hashreg;
1168 	uint32_t reg;
1169 
1170 	reg = 0;
1171 
1172 	ifp->if_flags &= ~IFF_ALLMULTI;
1173 	bzero(hash, sizeof(hash));
1174 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1175 		ifp->if_flags |= IFF_ALLMULTI;
1176 		reg |= DWXE_RX_FRM_FLT_RX_ALL_MULTICAST;
1177 		if (ifp->if_flags & IFF_PROMISC)
1178 			reg |= DWXE_RX_FRM_FLT_DIS_ADDR_FILTER;
1179 	} else {
1180 		reg |= DWXE_RX_FRM_FLT_HASH_MULTICAST;
1181 		ETHER_FIRST_MULTI(step, ac, enm);
1182 		while (enm != NULL) {
1183 			crc = ether_crc32_le(enm->enm_addrlo,
1184 			    ETHER_ADDR_LEN) & 0x7f;
1185 
1186 			crc = bitrev32(~crc) >> 26;
1187 			hashreg = (crc >> 5);
1188 			hashbit = (crc & 0x1f);
1189 			hash[hashreg] |= (1 << hashbit);
1190 
1191 			ETHER_NEXT_MULTI(step, enm);
1192 		}
1193 	}
1194 
1195 	dwxe_lladdr_write(sc);
1196 
1197 	dwxe_write(sc, DWXE_RX_HASH0, hash[1]);
1198 	dwxe_write(sc, DWXE_RX_HASH1, hash[0]);
1199 
1200 	dwxe_write(sc, DWXE_RX_FRM_FLT, reg);
1201 }
1202 
1203 int
1204 dwxe_encap(struct dwxe_softc *sc, struct mbuf *m, int *idx, int *used)
1205 {
1206 	struct dwxe_desc *txd, *txd_start;
1207 	bus_dmamap_t map;
1208 	int cur, frag, i;
1209 
1210 	cur = frag = *idx;
1211 	map = sc->sc_txbuf[cur].tb_map;
1212 
1213 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1214 		if (m_defrag(m, M_DONTWAIT))
1215 			return (EFBIG);
1216 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1217 			return (EFBIG);
1218 	}
1219 
1220 	/* Sync the DMA map. */
1221 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1222 	    BUS_DMASYNC_PREWRITE);
1223 
1224 	txd = txd_start = &sc->sc_txdesc[frag];
1225 	for (i = 0; i < map->dm_nsegs; i++) {
1226 		txd->sd_addr = map->dm_segs[i].ds_addr;
1227 		txd->sd_len = map->dm_segs[i].ds_len;
1228 		if (i == 0)
1229 			txd->sd_len |= DWXE_TX_FIR_DESC;
1230 		if (i == (map->dm_nsegs - 1))
1231 			txd->sd_len |= DWXE_TX_LAST_DESC | DWXE_TX_INT_CTL;
1232 		if (i != 0)
1233 			txd->sd_status = DWXE_TX_DESC_CTL;
1234 
1235 		bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1236 		    frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1237 
1238 		cur = frag;
1239 		if (frag == (DWXE_NTXDESC - 1)) {
1240 			txd = &sc->sc_txdesc[0];
1241 			frag = 0;
1242 		} else {
1243 			txd++;
1244 			frag++;
1245 		}
1246 		KASSERT(frag != sc->sc_tx_cons);
1247 	}
1248 
1249 	txd_start->sd_status = DWXE_TX_DESC_CTL;
1250 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1251 	    *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1252 
1253 	KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
1254 	sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
1255 	sc->sc_txbuf[cur].tb_map = map;
1256 	sc->sc_txbuf[cur].tb_m = m;
1257 
1258 	*idx = frag;
1259 	*used += map->dm_nsegs;
1260 
1261 	return (0);
1262 }
1263 
1264 void
1265 dwxe_reset(struct dwxe_softc *sc)
1266 {
1267 	int n;
1268 
1269 	dwxe_stop_dma(sc);
1270 
1271 	dwxe_write(sc, DWXE_BASIC_CTL1, DWXE_BASIC_CTL1_SOFT_RST);
1272 
1273 	for (n = 0; n < 1000; n++) {
1274 		if ((dwxe_read(sc, DWXE_BASIC_CTL1) &
1275 		    DWXE_BASIC_CTL1_SOFT_RST) == 0)
1276 			return;
1277 		delay(10);
1278 	}
1279 
1280 	printf("%s: reset timeout\n", sc->sc_dev.dv_xname);
1281 }
1282 
1283 void
1284 dwxe_stop_dma(struct dwxe_softc *sc)
1285 {
1286 	uint32_t dmactrl;
1287 
1288 	/* Stop DMA. */
1289 	dmactrl = dwxe_read(sc, DWXE_TX_CTL1);
1290 	dmactrl &= ~DWXE_TX_CTL1_TX_DMA_EN;
1291 	dmactrl |= DWXE_TX_CTL1_TX_FIFO_FLUSH;
1292 	dwxe_write(sc, DWXE_TX_CTL1, dmactrl);
1293 }
1294 
1295 struct dwxe_dmamem *
1296 dwxe_dmamem_alloc(struct dwxe_softc *sc, bus_size_t size, bus_size_t align)
1297 {
1298 	struct dwxe_dmamem *tdm;
1299 	int nsegs;
1300 
1301 	tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
1302 	tdm->tdm_size = size;
1303 
1304 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1305 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1306 		goto tdmfree;
1307 
1308 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1,
1309 	    &nsegs, BUS_DMA_WAITOK) != 0)
1310 		goto destroy;
1311 
1312 	if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
1313 	    &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1314 		goto free;
1315 
1316 	if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
1317 	    NULL, BUS_DMA_WAITOK) != 0)
1318 		goto unmap;
1319 
1320 	bzero(tdm->tdm_kva, size);
1321 
1322 	return (tdm);
1323 
1324 unmap:
1325 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
1326 free:
1327 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1328 destroy:
1329 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1330 tdmfree:
1331 	free(tdm, M_DEVBUF, 0);
1332 
1333 	return (NULL);
1334 }
1335 
1336 void
1337 dwxe_dmamem_free(struct dwxe_softc *sc, struct dwxe_dmamem *tdm)
1338 {
1339 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
1340 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1341 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1342 	free(tdm, M_DEVBUF, 0);
1343 }
1344 
1345 struct mbuf *
1346 dwxe_alloc_mbuf(struct dwxe_softc *sc, bus_dmamap_t map)
1347 {
1348 	struct mbuf *m = NULL;
1349 
1350 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1351 	if (!m)
1352 		return (NULL);
1353 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1354 	m_adj(m, ETHER_ALIGN);
1355 
1356 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1357 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1358 		m_freem(m);
1359 		return (NULL);
1360 	}
1361 
1362 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1363 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1364 
1365 	return (m);
1366 }
1367 
1368 void
1369 dwxe_fill_rx_ring(struct dwxe_softc *sc)
1370 {
1371 	struct dwxe_desc *rxd;
1372 	struct dwxe_buf *rxb;
1373 	u_int slots;
1374 
1375 	for (slots = if_rxr_get(&sc->sc_rx_ring, DWXE_NRXDESC);
1376 	    slots > 0; slots--) {
1377 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1378 		rxb->tb_m = dwxe_alloc_mbuf(sc, rxb->tb_map);
1379 		if (rxb->tb_m == NULL)
1380 			break;
1381 
1382 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1383 		rxd->sd_len = rxb->tb_map->dm_segs[0].ds_len - 1;
1384 		rxd->sd_addr = rxb->tb_map->dm_segs[0].ds_addr;
1385 		rxd->sd_status = DWXE_RX_DESC_CTL;
1386 
1387 		if (sc->sc_rx_prod == (DWXE_NRXDESC - 1))
1388 			sc->sc_rx_prod = 0;
1389 		else
1390 			sc->sc_rx_prod++;
1391 	}
1392 	if_rxr_put(&sc->sc_rx_ring, slots);
1393 
1394 	if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
1395 		timeout_add(&sc->sc_rxto, 1);
1396 }
1397