xref: /openbsd-src/sys/dev/fdt/if_dwxe.c (revision 17712d2fc01b502e686cb0ed8ba202ddba3da8e9)
1 /*	$OpenBSD: if_dwxe.c,v 1.24 2024/02/27 10:47:20 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2008 Mark Kettenis
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the ethernet controller on the Allwinner H3/A64 SoCs.
21  */
22 
23 #include "bpfilter.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/timeout.h>
35 
36 #include <machine/bus.h>
37 #include <machine/fdt.h>
38 
39 #include <net/if.h>
40 #include <net/if_media.h>
41 
42 #include <dev/ofw/openfirm.h>
43 #include <dev/ofw/ofw_clock.h>
44 #include <dev/ofw/ofw_misc.h>
45 #include <dev/ofw/ofw_pinctrl.h>
46 #include <dev/ofw/ofw_regulator.h>
47 #include <dev/ofw/fdt.h>
48 
49 #include <dev/mii/mii.h>
50 #include <dev/mii/miivar.h>
51 
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55 
56 #include <netinet/in.h>
57 #include <netinet/if_ether.h>
58 
59 /*
60  * DWXE registers.
61  */
62 
63 #define DWXE_BASIC_CTL0		0x00
64 #define  DWXE_BASIC_CTL0_DUPLEX		(1 << 0)
65 #define  DWXE_BASIC_CTL0_LOOPBACK		(1 << 1)
66 #define  DWXE_BASIC_CTL0_SPEED_1000		(0 << 2)
67 #define  DWXE_BASIC_CTL0_SPEED_10		(2 << 2)
68 #define  DWXE_BASIC_CTL0_SPEED_100		(3 << 2)
69 #define  DWXE_BASIC_CTL0_SPEED_MASK		(3 << 2)
70 #define DWXE_BASIC_CTL1		0x04
71 #define  DWXE_BASIC_CTL1_SOFT_RST		(1 << 0)
72 #define  DWXE_BASIC_CTL1_RX_TX_PRI		(1 << 1)
73 #define  DWXE_BASIC_CTL1_BURST_LEN_MASK	(0x3f << 24)
74 #define  DWXE_BASIC_CTL1_BURST_LEN(x)		((x) << 24)
75 #define DWXE_INT_STA			0x08
76 #define  DWXE_INT_STA_TX_INT			(1 << 0)
77 #define  DWXE_INT_STA_TX_DMA_STOP_INT		(1 << 1)
78 #define  DWXE_INT_STA_TX_BUF_UA_INT		(1 << 2)
79 #define  DWXE_INT_STA_TX_TIMEOUT_INT		(1 << 3)
80 #define  DWXE_INT_STA_TX_UNDERFLOW_INT	(1 << 4)
81 #define  DWXE_INT_STA_TX_EARLY_INT		(1 << 5)
82 #define  DWXE_INT_STA_RX_INT			(1 << 8)
83 #define  DWXE_INT_STA_RX_BUF_UA_INT		(1 << 9)
84 #define  DWXE_INT_STA_RX_DMA_STOP_INT		(1 << 10)
85 #define  DWXE_INT_STA_RX_TIMEOUT_INT		(1 << 11)
86 #define  DWXE_INT_STA_RX_OVERFLOW_INT		(1 << 12)
87 #define  DWXE_INT_STA_RX_EARLY_INT		(1 << 13)
88 #define  DWXE_INT_STA_RGMII_STA_INT		(1 << 16)
89 #define DWXE_INT_EN			0x0C
90 #define  DWXE_INT_EN_TX_INT			(1 << 0)
91 #define  DWXE_INT_EN_TX_DMA_STOP_INT		(1 << 1)
92 #define  DWXE_INT_EN_TX_BUF_UA_INT		(1 << 2)
93 #define  DWXE_INT_EN_TX_TIMEOUT_INT		(1 << 3)
94 #define  DWXE_INT_EN_TX_UNDERFLOW_INT		(1 << 4)
95 #define  DWXE_INT_EN_TX_EARLY_INT		(1 << 5)
96 #define  DWXE_INT_EN_RX_INT			(1 << 8)
97 #define  DWXE_INT_EN_RX_BUF_UA_INT		(1 << 9)
98 #define  DWXE_INT_EN_RX_DMA_STOP_INT		(1 << 10)
99 #define  DWXE_INT_EN_RX_TIMEOUT_INT		(1 << 11)
100 #define  DWXE_INT_EN_RX_OVERFLOW_INT		(1 << 12)
101 #define  DWXE_INT_EN_RX_EARLY_INT		(1 << 13)
102 #define  DWXE_INT_EN_RGMII_EN_INT		(1 << 16)
103 #define DWXE_TX_CTL0			0x10
104 #define  DWXE_TX_CTL0_TX_TRANSMITTER_EN	(1U << 31)
105 #define DWXE_TX_CTL1			0x14
106 #define  DWXE_TX_CTL1_TX_FIFO_FLUSH		(1 << 0)
107 #define  DWXE_TX_CTL1_TX_MD			(1 << 1)
108 #define  DWXE_TX_CTL1_TX_NEXT_FRM		(1 << 2)
109 #define  DWXE_TX_CTL1_TX_TH_MASK		(0x3 << 8)
110 #define  DWXE_TX_CTL1_TX_TH_64		0
111 #define  DWXE_TX_CTL1_TX_TH_128		(0x1 << 8)
112 #define  DWXE_TX_CTL1_TX_TH_192		(0x2 << 8)
113 #define  DWXE_TX_CTL1_TX_TH_256		(0x3 << 8)
114 #define  DWXE_TX_CTL1_TX_DMA_EN		(1 << 30)
115 #define  DWXE_TX_CTL1_TX_DMA_START		(1U << 31)
116 #define DWXE_TX_FLOW_CTL		0x1C
117 #define  DWXE_TX_FLOW_CTL_EN			(1 << 0)
118 #define DWXE_TX_DESC_LIST		0x20
119 #define DWXE_RX_CTL0			0x24
120 #define  DWXE_RX_CTL0_RX_FLOW_CTL_EN		(1 << 16)
121 #define  DWXE_RX_CTL0_RX_DO_CRC		(1 << 27)
122 #define  DWXE_RX_CTL0_RX_RECEIVER_EN		(1U << 31)
123 #define DWXE_RX_CTL1			0x28
124 #define  DWXE_RX_CTL1_RX_MD			(1 << 1)
125 #define  DWXE_RX_CTL1_RX_TH_MASK		(0x3 << 4)
126 #define  DWXE_RX_CTL1_RX_TH_32		(0x0 << 4)
127 #define  DWXE_RX_CTL1_RX_TH_64		(0x1 << 4)
128 #define  DWXE_RX_CTL1_RX_TH_96		(0x2 << 4)
129 #define  DWXE_RX_CTL1_RX_TH_128		(0x3 << 4)
130 #define  DWXE_RX_CTL1_RX_DMA_EN		(1 << 30)
131 #define  DWXE_RX_CTL1_RX_DMA_START		(1U << 31)
132 #define DWXE_RX_DESC_LIST		0x34
133 #define DWXE_RX_FRM_FLT		0x38
134 #define DWXE_RX_FRM_FLT_RX_ALL		(1 << 0)
135 #define DWXE_RX_FRM_FLT_HASH_UNICAST		(1 << 8)
136 #define DWXE_RX_FRM_FLT_HASH_MULTICAST	(1 << 9)
137 #define DWXE_RX_FRM_FLT_CTL			(1 << 13)
138 #define DWXE_RX_FRM_FLT_RX_ALL_MULTICAST	(1 << 16)
139 #define DWXE_RX_FRM_FLT_DIS_BROADCAST		(1 << 17)
140 #define DWXE_RX_FRM_FLT_DIS_ADDR_FILTER	(1U << 31)
141 #define DWXE_RX_HASH0			0x40
142 #define DWXE_RX_HASH1			0x44
143 #define DWXE_MDIO_CMD			0x48
144 #define  DWXE_MDIO_CMD_MII_BUSY		(1 << 0)
145 #define  DWXE_MDIO_CMD_MII_WRITE		(1 << 1)
146 #define  DWXE_MDIO_CMD_PHY_REG_SHIFT		4
147 #define  DWXE_MDIO_CMD_PHY_ADDR_SHIFT		12
148 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT	20
149 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_MASK	0x7
150 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_16	0
151 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_32	1
152 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_64	2
153 #define  DWXE_MDIO_CMD_MDC_DIV_RATIO_M_128	3
154 #define DWXE_MDIO_DATA		0x4C
155 #define DWXE_MACADDR_HI		0x50
156 #define DWXE_MACADDR_LO		0x54
157 #define DWXE_TX_DMA_STA		0xB0
158 #define DWXE_TX_CUR_DESC		0xB4
159 #define DWXE_TX_CUR_BUF		0xB8
160 #define DWXE_RX_DMA_STA		0xC0
161 #define DWXE_RX_CUR_DESC		0xC4
162 #define DWXE_RX_CUR_BUF		0xC8
163 
164 /*
165  * DWXE descriptors.
166  */
167 
168 struct dwxe_desc {
169 	uint32_t sd_status;
170 	uint32_t sd_len;
171 	uint32_t sd_addr;
172 	uint32_t sd_next;
173 };
174 
175 /* Tx status bits. */
176 #define DWXE_TX_DEFER			(1 << 0)
177 #define DWXE_TX_UNDERFLOW_ERR		(1 << 1)
178 #define DWXE_TX_DEFER_ERR		(1 << 2)
179 #define DWXE_TX_COL_CNT_MASK		(0xf << 3)
180 #define DWXE_TX_COL_CNT_SHIFT		3
181 #define DWXE_TX_COL_ERR_1		(1 << 8)
182 #define DWXE_TX_COL_ERR_0		(1 << 9)
183 #define DWXE_TX_CRS_ERR		(1 << 10)
184 #define DWXE_TX_PAYLOAD_ERR		(1 << 12)
185 #define DWXE_TX_LENGTH_ERR		(1 << 14)
186 #define DWXE_TX_HEADER_ERR		(1 << 16)
187 #define DWXE_TX_DESC_CTL		(1U << 31)
188 
189 /* Rx status bits */
190 #define DWXE_RX_PAYLOAD_ERR		(1 << 0)
191 #define DWXE_RX_CRC_ERR		(1 << 1)
192 #define DWXE_RX_PHY_ERR		(1 << 3)
193 #define DWXE_RX_LENGTH_ERR		(1 << 4)
194 #define DWXE_RX_FRM_TYPE		(1 << 5)
195 #define DWXE_RX_COL_ERR		(1 << 6)
196 #define DWXE_RX_HEADER_ERR		(1 << 7)
197 #define DWXE_RX_LAST_DESC		(1 << 8)
198 #define DWXE_RX_FIR_DESC		(1 << 9)
199 #define DWXE_RX_OVERFLOW_ERR		(1 << 11)
200 #define DWXE_RX_SAF_FAIL		(1 << 13)
201 #define DWXE_RX_NO_ENOUGH_BUF_ERR	(1 << 14)
202 #define DWXE_RX_FRM_LEN_MASK		0x3fff
203 #define DWXE_RX_FRM_LEN_SHIFT		16
204 #define DWXE_RX_DAF_FAIL		(1 << 30)
205 #define DWXE_RX_DESC_CTL		(1U << 31)
206 
207 /* Tx size bits */
208 #define DWXE_TX_BUF_SIZE		(0xfff << 0)
209 #define DWXE_TX_CRC_CTL		(1 << 26)
210 #define DWXE_TX_CHECKSUM_CTL_MASK	(0x3 << 27)
211 #define DWXE_TX_CHECKSUM_CTL_IP	(1 << 27)
212 #define DWXE_TX_CHECKSUM_CTL_NO_PSE	(2 << 27)
213 #define DWXE_TX_CHECKSUM_CTL_FULL	(3 << 27)
214 #define DWXE_TX_FIR_DESC		(1 << 29)
215 #define DWXE_TX_LAST_DESC		(1 << 30)
216 #define DWXE_TX_INT_CTL		(1U << 31)
217 
218 /* Rx size bits */
219 #define DWXE_RX_BUF_SIZE		(0xfff << 0)
220 #define DWXE_RX_INT_CTL		(1U << 31)
221 
222 /* EMAC syscon bits */
223 #define SYSCON_EMAC			0x30
224 #define SYSCON_ETCS_MASK		(0x3 << 0)
225 #define SYSCON_ETCS_MII			(0 << 0)
226 #define SYSCON_ETCS_EXT_GMII		(1 << 0)
227 #define SYSCON_ETCS_INT_GMII		(2 << 0)
228 #define SYSCON_EPIT			(1 << 2) /* 1: RGMII, 0: MII */
229 #define SYSCON_ERXDC_MASK		(0xf << 5)
230 #define SYSCON_ERXDC_SHIFT		5
231 #define SYSCON_ETXDC_MASK		(0x7 << 10)
232 #define SYSCON_ETXDC_SHIFT		10
233 #define SYSCON_RMII_EN			(1 << 13) /* 1: enable RMII (overrides EPIT) */
234 #define SYSCON_H3_EPHY_SELECT		(1 << 15) /* 1: internal PHY, 0: external PHY */
235 #define SYSCON_H3_EPHY_SHUTDOWN		(1 << 16) /* 1: shutdown, 0: power up */
236 #define SYSCON_H3_EPHY_LED_POL		(1 << 17) /* 1: active low, 0: active high */
237 #define SYSCON_H3_EPHY_CLK_SEL		(1 << 18) /* 1: 24MHz, 0: 25MHz */
238 #define SYSCON_H3_EPHY_ADDR_MASK	(0x1f << 20)
239 #define SYSCON_H3_EPHY_ADDR_SHIFT	20
240 
241 /* GMAC syscon bits (Allwinner R40) */
242 #define SYSCON_GMAC			0x00
243 #define SYSCON_GTCS_MASK		SYSCON_ETCS_MASK
244 #define SYSCON_GTCS_MII			SYSCON_ETCS_MII
245 #define SYSCON_GTCS_EXT_GMII		SYSCON_ETCS_EXT_GMII
246 #define SYSCON_GTCS_INT_GMII		SYSCON_ETCS_INT_GMII
247 #define SYSCON_GPIT			SYSCON_EPIT
248 #define SYSCON_GRXDC_MASK		(0x7 << 5)
249 #define SYSCON_GRXDC_SHIFT		5
250 
251 struct dwxe_buf {
252 	bus_dmamap_t	tb_map;
253 	struct mbuf	*tb_m;
254 };
255 
256 #define DWXE_NTXDESC	256
257 #define DWXE_NTXSEGS	16
258 
259 #define DWXE_NRXDESC	256
260 
261 struct dwxe_dmamem {
262 	bus_dmamap_t		tdm_map;
263 	bus_dma_segment_t	tdm_seg;
264 	size_t			tdm_size;
265 	caddr_t			tdm_kva;
266 };
267 #define DWXE_DMA_MAP(_tdm)	((_tdm)->tdm_map)
268 #define DWXE_DMA_LEN(_tdm)	((_tdm)->tdm_size)
269 #define DWXE_DMA_DVA(_tdm)	((_tdm)->tdm_map->dm_segs[0].ds_addr)
270 #define DWXE_DMA_KVA(_tdm)	((void *)(_tdm)->tdm_kva)
271 
272 struct dwxe_softc {
273 	struct device		sc_dev;
274 	int			sc_node;
275 	bus_space_tag_t		sc_iot;
276 	bus_space_handle_t	sc_ioh;
277 	bus_dma_tag_t		sc_dmat;
278 	void			*sc_ih;
279 
280 	struct arpcom		sc_ac;
281 #define sc_lladdr	sc_ac.ac_enaddr
282 	struct mii_data		sc_mii;
283 #define sc_media	sc_mii.mii_media
284 	int			sc_link;
285 	int			sc_phyloc;
286 
287 	struct dwxe_dmamem	*sc_txring;
288 	struct dwxe_buf		*sc_txbuf;
289 	struct dwxe_desc	*sc_txdesc;
290 	int			sc_tx_prod;
291 	int			sc_tx_cons;
292 
293 	struct dwxe_dmamem	*sc_rxring;
294 	struct dwxe_buf		*sc_rxbuf;
295 	struct dwxe_desc	*sc_rxdesc;
296 	int			sc_rx_prod;
297 	struct if_rxring	sc_rx_ring;
298 	int			sc_rx_cons;
299 
300 	struct timeout		sc_tick;
301 	struct timeout		sc_rxto;
302 
303 	uint32_t		sc_clk;
304 };
305 
306 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
307 
308 int	dwxe_match(struct device *, void *, void *);
309 void	dwxe_attach(struct device *, struct device *, void *);
310 int	dwxe_activate(struct device *, int);
311 void	dwxe_init(struct dwxe_softc *sc);
312 void	dwxe_phy_setup_emac(struct dwxe_softc *);
313 void	dwxe_phy_setup_gmac(struct dwxe_softc *);
314 
315 const struct cfattach dwxe_ca = {
316 	sizeof(struct dwxe_softc), dwxe_match, dwxe_attach,
317 	NULL, dwxe_activate
318 };
319 
320 struct cfdriver dwxe_cd = {
321 	NULL, "dwxe", DV_IFNET
322 };
323 
324 uint32_t dwxe_read(struct dwxe_softc *, bus_addr_t);
325 void	dwxe_write(struct dwxe_softc *, bus_addr_t, uint32_t);
326 
327 int	dwxe_ioctl(struct ifnet *, u_long, caddr_t);
328 void	dwxe_start(struct ifqueue *);
329 void	dwxe_watchdog(struct ifnet *);
330 
331 int	dwxe_media_change(struct ifnet *);
332 void	dwxe_media_status(struct ifnet *, struct ifmediareq *);
333 
334 int	dwxe_mii_readreg(struct device *, int, int);
335 void	dwxe_mii_writereg(struct device *, int, int, int);
336 void	dwxe_mii_statchg(struct device *);
337 
338 void	dwxe_lladdr_read(struct dwxe_softc *, uint8_t *);
339 void	dwxe_lladdr_write(struct dwxe_softc *);
340 
341 void	dwxe_tick(void *);
342 void	dwxe_rxtick(void *);
343 
344 int	dwxe_intr(void *);
345 void	dwxe_tx_proc(struct dwxe_softc *);
346 void	dwxe_rx_proc(struct dwxe_softc *);
347 
348 void	dwxe_up(struct dwxe_softc *);
349 void	dwxe_down(struct dwxe_softc *);
350 void	dwxe_iff(struct dwxe_softc *);
351 int	dwxe_encap(struct dwxe_softc *, struct mbuf *, int *, int *);
352 
353 void	dwxe_reset(struct dwxe_softc *);
354 void	dwxe_stop_dma(struct dwxe_softc *);
355 
356 struct dwxe_dmamem *
357 	dwxe_dmamem_alloc(struct dwxe_softc *, bus_size_t, bus_size_t);
358 void	dwxe_dmamem_free(struct dwxe_softc *, struct dwxe_dmamem *);
359 struct mbuf *dwxe_alloc_mbuf(struct dwxe_softc *, bus_dmamap_t);
360 void	dwxe_fill_rx_ring(struct dwxe_softc *);
361 
362 int
dwxe_match(struct device * parent,void * cfdata,void * aux)363 dwxe_match(struct device *parent, void *cfdata, void *aux)
364 {
365 	struct fdt_attach_args *faa = aux;
366 
367 	return OF_is_compatible(faa->fa_node, "allwinner,sun8i-h3-emac") ||
368 	    OF_is_compatible(faa->fa_node, "allwinner,sun8i-r40-gmac") ||
369 	    OF_is_compatible(faa->fa_node, "allwinner,sun50i-a64-emac");
370 }
371 
372 void
dwxe_attach(struct device * parent,struct device * self,void * aux)373 dwxe_attach(struct device *parent, struct device *self, void *aux)
374 {
375 	struct dwxe_softc *sc = (void *)self;
376 	struct fdt_attach_args *faa = aux;
377 	char phy_mode[16] = { 0 };
378 	struct ifnet *ifp;
379 	uint32_t phy;
380 	int mii_flags = 0;
381 	int node;
382 
383 	sc->sc_node = faa->fa_node;
384 	sc->sc_iot = faa->fa_iot;
385 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
386 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
387 		printf("%s: cannot map registers\n", self->dv_xname);
388 		return;
389 	}
390 	sc->sc_dmat = faa->fa_dmat;
391 
392 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, sizeof(phy_mode));
393 	if (strcmp(phy_mode, "rgmii") == 0)
394 		mii_flags |= MIIF_SETDELAY;
395 	else if (strcmp(phy_mode, "rgmii-rxid") == 0)
396 		mii_flags |= MIIF_SETDELAY | MIIF_RXID;
397 	else if (strcmp(phy_mode, "rgmii-txid") == 0)
398 		mii_flags |= MIIF_SETDELAY | MIIF_TXID;
399 	else if (strcmp(phy_mode, "rgmii-id") == 0)
400 		mii_flags |= MIIF_SETDELAY | MIIF_RXID | MIIF_TXID;
401 
402 	/* Lookup PHY. */
403 	phy = OF_getpropint(faa->fa_node, "phy-handle", 0);
404 	node = OF_getnodebyphandle(phy);
405 	if (node)
406 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
407 	else
408 		sc->sc_phyloc = MII_PHY_ANY;
409 	sc->sc_mii.mii_node = node;
410 
411 	sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth");
412 	if (sc->sc_clk > 160000000)
413 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_128;
414 	else if (sc->sc_clk > 80000000)
415 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_64;
416 	else if (sc->sc_clk > 40000000)
417 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_32;
418 	else
419 		sc->sc_clk = DWXE_MDIO_CMD_MDC_DIV_RATIO_M_16;
420 
421 	if (OF_getprop(faa->fa_node, "local-mac-address",
422 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
423 		dwxe_lladdr_read(sc, sc->sc_lladdr);
424 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
425 
426 	dwxe_init(sc);
427 
428 	timeout_set(&sc->sc_tick, dwxe_tick, sc);
429 	timeout_set(&sc->sc_rxto, dwxe_rxtick, sc);
430 
431 	ifp = &sc->sc_ac.ac_if;
432 	ifp->if_softc = sc;
433 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
434 	ifp->if_xflags = IFXF_MPSAFE;
435 	ifp->if_ioctl = dwxe_ioctl;
436 	ifp->if_qstart = dwxe_start;
437 	ifp->if_watchdog = dwxe_watchdog;
438 	ifq_init_maxlen(&ifp->if_snd, DWXE_NTXDESC - 1);
439 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
440 
441 	ifp->if_capabilities = IFCAP_VLAN_MTU;
442 
443 	sc->sc_mii.mii_ifp = ifp;
444 	sc->sc_mii.mii_readreg = dwxe_mii_readreg;
445 	sc->sc_mii.mii_writereg = dwxe_mii_writereg;
446 	sc->sc_mii.mii_statchg = dwxe_mii_statchg;
447 
448 	ifmedia_init(&sc->sc_media, 0, dwxe_media_change, dwxe_media_status);
449 
450 	mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
451 	    MII_OFFSET_ANY, MIIF_NOISOLATE | mii_flags);
452 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
453 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
454 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
455 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
456 	} else
457 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
458 
459 	if_attach(ifp);
460 	ether_ifattach(ifp);
461 
462 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
463 	    dwxe_intr, sc, sc->sc_dev.dv_xname);
464 	if (sc->sc_ih == NULL)
465 		printf("%s: can't establish interrupt\n", sc->sc_dev.dv_xname);
466 }
467 
468 int
dwxe_activate(struct device * self,int act)469 dwxe_activate(struct device *self, int act)
470 {
471 	struct dwxe_softc *sc = (struct dwxe_softc *)self;
472 	struct ifnet *ifp = &sc->sc_ac.ac_if;
473 
474 	switch (act) {
475 	case DVACT_SUSPEND:
476 		if (ifp->if_flags & IFF_RUNNING)
477 			dwxe_down(sc);
478 		break;
479 	case DVACT_RESUME:
480 		dwxe_init(sc);
481 		if (ifp->if_flags & IFF_UP)
482 			dwxe_up(sc);
483 		break;
484 	}
485 
486 	return 0;
487 }
488 
489 void
dwxe_init(struct dwxe_softc * sc)490 dwxe_init(struct dwxe_softc *sc)
491 {
492 	uint32_t phy_supply;
493 
494 	pinctrl_byname(sc->sc_node, "default");
495 
496 	/* Enable clock. */
497 	clock_enable(sc->sc_node, "stmmaceth");
498 	reset_deassert(sc->sc_node, "stmmaceth");
499 	delay(5000);
500 
501 	/* Power up PHY. */
502 	phy_supply = OF_getpropint(sc->sc_node, "phy-supply", 0);
503 	if (phy_supply)
504 		regulator_enable(phy_supply);
505 
506 	/* Do hardware specific initializations. */
507 	if (OF_is_compatible(sc->sc_node, "allwinner,sun8i-r40-gmac"))
508 		dwxe_phy_setup_gmac(sc);
509 	else
510 		dwxe_phy_setup_emac(sc);
511 
512 	dwxe_reset(sc);
513 }
514 
515 void
dwxe_phy_setup_emac(struct dwxe_softc * sc)516 dwxe_phy_setup_emac(struct dwxe_softc *sc)
517 {
518 	struct regmap *rm;
519 	uint32_t syscon;
520 	uint32_t tx_delay, rx_delay;
521 	char *phy_mode;
522 	int len;
523 
524 	rm = regmap_byphandle(OF_getpropint(sc->sc_node, "syscon", 0));
525 	if (rm == NULL)
526 		return;
527 
528 	syscon = regmap_read_4(rm, SYSCON_EMAC);
529 	syscon &= ~(SYSCON_ETCS_MASK|SYSCON_EPIT|SYSCON_RMII_EN);
530 	syscon &= ~(SYSCON_ETXDC_MASK | SYSCON_ERXDC_MASK);
531 	syscon &= ~SYSCON_H3_EPHY_SELECT;
532 
533 	if ((len = OF_getproplen(sc->sc_node, "phy-mode")) <= 0)
534 		return;
535 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
536 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
537 	if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
538 		syscon |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
539 	else if (!strncmp(phy_mode, "rmii", strlen("rmii")))
540 		syscon |= SYSCON_EPIT | SYSCON_ETCS_EXT_GMII;
541 	else if (!strncmp(phy_mode, "mii", strlen("mii")) &&
542 	    OF_is_compatible(sc->sc_node, "allwinner,sun8i-h3-emac")) {
543 		syscon &= ~SYSCON_H3_EPHY_SHUTDOWN;
544 		syscon |= SYSCON_H3_EPHY_SELECT | SYSCON_H3_EPHY_CLK_SEL;
545 		if (OF_getproplen(sc->sc_node, "allwinner,leds-active-low") == 0)
546 			syscon |= SYSCON_H3_EPHY_LED_POL;
547 		else
548 			syscon &= ~SYSCON_H3_EPHY_LED_POL;
549 		syscon &= ~SYSCON_H3_EPHY_ADDR_MASK;
550 		syscon |= (sc->sc_phyloc << SYSCON_H3_EPHY_ADDR_SHIFT);
551 	}
552 	free(phy_mode, M_TEMP, len);
553 
554 	tx_delay = OF_getpropint(sc->sc_node, "allwinner,tx-delay-ps", 0);
555 	rx_delay = OF_getpropint(sc->sc_node, "allwinner,rx-delay-ps", 0);
556 	syscon |= ((tx_delay / 100) << SYSCON_ETXDC_SHIFT) & SYSCON_ETXDC_MASK;
557 	syscon |= ((rx_delay / 100) << SYSCON_ERXDC_SHIFT) & SYSCON_ERXDC_MASK;
558 
559 	regmap_write_4(rm, SYSCON_EMAC, syscon);
560 }
561 
562 void
dwxe_phy_setup_gmac(struct dwxe_softc * sc)563 dwxe_phy_setup_gmac(struct dwxe_softc *sc)
564 {
565 	struct regmap *rm;
566 	uint32_t syscon;
567 	uint32_t rx_delay;
568 	char *phy_mode;
569 	int len;
570 
571 	rm = regmap_byphandle(OF_getpropint(sc->sc_node, "syscon", 0));
572 	if (rm == NULL)
573 		return;
574 
575 	syscon = regmap_read_4(rm, SYSCON_GMAC);
576 	syscon &= ~(SYSCON_GTCS_MASK|SYSCON_GPIT|SYSCON_ERXDC_MASK);
577 
578 	if ((len = OF_getproplen(sc->sc_node, "phy-mode")) <= 0)
579 		return;
580 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
581 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
582 	if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
583 		syscon |= SYSCON_GPIT | SYSCON_GTCS_INT_GMII;
584 	else if (!strncmp(phy_mode, "rmii", strlen("rmii")))
585 		syscon |= SYSCON_GPIT | SYSCON_GTCS_EXT_GMII;
586 	free(phy_mode, M_TEMP, len);
587 
588 	rx_delay = OF_getpropint(sc->sc_node, "allwinner,rx-delay-ps", 0);
589 	syscon |= ((rx_delay / 100) << SYSCON_ERXDC_SHIFT) & SYSCON_ERXDC_MASK;
590 
591 	regmap_write_4(rm, SYSCON_GMAC, syscon);
592 }
593 
594 uint32_t
dwxe_read(struct dwxe_softc * sc,bus_addr_t addr)595 dwxe_read(struct dwxe_softc *sc, bus_addr_t addr)
596 {
597 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr);
598 }
599 
600 void
dwxe_write(struct dwxe_softc * sc,bus_addr_t addr,uint32_t data)601 dwxe_write(struct dwxe_softc *sc, bus_addr_t addr, uint32_t data)
602 {
603 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data);
604 }
605 
606 void
dwxe_lladdr_read(struct dwxe_softc * sc,uint8_t * lladdr)607 dwxe_lladdr_read(struct dwxe_softc *sc, uint8_t *lladdr)
608 {
609 	uint32_t machi, maclo;
610 
611 	machi = dwxe_read(sc, DWXE_MACADDR_HI);
612 	maclo = dwxe_read(sc, DWXE_MACADDR_LO);
613 
614 	lladdr[0] = (maclo >> 0) & 0xff;
615 	lladdr[1] = (maclo >> 8) & 0xff;
616 	lladdr[2] = (maclo >> 16) & 0xff;
617 	lladdr[3] = (maclo >> 24) & 0xff;
618 	lladdr[4] = (machi >> 0) & 0xff;
619 	lladdr[5] = (machi >> 8) & 0xff;
620 }
621 
622 void
dwxe_lladdr_write(struct dwxe_softc * sc)623 dwxe_lladdr_write(struct dwxe_softc *sc)
624 {
625 	dwxe_write(sc, DWXE_MACADDR_HI,
626 	    sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0);
627 	dwxe_write(sc, DWXE_MACADDR_LO,
628 	    sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 |
629 	    sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0);
630 }
631 
632 void
dwxe_start(struct ifqueue * ifq)633 dwxe_start(struct ifqueue *ifq)
634 {
635 	struct ifnet *ifp = ifq->ifq_if;
636 	struct dwxe_softc *sc = ifp->if_softc;
637 	struct mbuf *m;
638 	int error, idx, left, used;
639 
640 	if (!(ifp->if_flags & IFF_RUNNING))
641 		return;
642 	if (ifq_is_oactive(&ifp->if_snd))
643 		return;
644 	if (ifq_empty(&ifp->if_snd))
645 		return;
646 	if (!sc->sc_link)
647 		return;
648 
649 	idx = sc->sc_tx_prod;
650 	left = sc->sc_tx_cons;
651 	if (left <= idx)
652 		left += DWXE_NTXDESC;
653 	left -= idx;
654 	used = 0;
655 
656 	for (;;) {
657 		if (used + DWXE_NTXSEGS + 1 > left) {
658 			ifq_set_oactive(ifq);
659 			break;
660 		}
661 
662 		m = ifq_dequeue(ifq);
663 		if (m == NULL)
664 			break;
665 
666 		error = dwxe_encap(sc, m, &idx, &used);
667 		if (error == EFBIG) {
668 			m_freem(m); /* give up: drop it */
669 			ifp->if_oerrors++;
670 			continue;
671 		}
672 
673 #if NBPFILTER > 0
674 		if (ifp->if_bpf)
675 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
676 #endif
677 	}
678 
679 	if (sc->sc_tx_prod != idx) {
680 		sc->sc_tx_prod = idx;
681 
682 		/* Set a timeout in case the chip goes out to lunch. */
683 		ifp->if_timer = 5;
684 
685 		dwxe_write(sc, DWXE_TX_CTL1, dwxe_read(sc,
686 		     DWXE_TX_CTL1) | DWXE_TX_CTL1_TX_DMA_START);
687 	}
688 }
689 
690 int
dwxe_ioctl(struct ifnet * ifp,u_long cmd,caddr_t addr)691 dwxe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
692 {
693 	struct dwxe_softc *sc = ifp->if_softc;
694 	struct ifreq *ifr = (struct ifreq *)addr;
695 	int error = 0, s;
696 
697 	s = splnet();
698 
699 	switch (cmd) {
700 	case SIOCSIFADDR:
701 		ifp->if_flags |= IFF_UP;
702 		/* FALLTHROUGH */
703 	case SIOCSIFFLAGS:
704 		if (ifp->if_flags & IFF_UP) {
705 			if (ifp->if_flags & IFF_RUNNING)
706 				error = ENETRESET;
707 			else
708 				dwxe_up(sc);
709 		} else {
710 			if (ifp->if_flags & IFF_RUNNING)
711 				dwxe_down(sc);
712 		}
713 		break;
714 
715 	case SIOCGIFMEDIA:
716 	case SIOCSIFMEDIA:
717 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
718 		break;
719 
720 	case SIOCGIFRXR:
721 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
722 		    NULL, MCLBYTES, &sc->sc_rx_ring);
723 		break;
724 
725 	default:
726 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
727 		break;
728 	}
729 
730 	if (error == ENETRESET) {
731 		if (ifp->if_flags & IFF_RUNNING)
732 			dwxe_iff(sc);
733 		error = 0;
734 	}
735 
736 	splx(s);
737 	return (error);
738 }
739 
740 void
dwxe_watchdog(struct ifnet * ifp)741 dwxe_watchdog(struct ifnet *ifp)
742 {
743 	printf("%s\n", __func__);
744 }
745 
746 int
dwxe_media_change(struct ifnet * ifp)747 dwxe_media_change(struct ifnet *ifp)
748 {
749 	struct dwxe_softc *sc = ifp->if_softc;
750 
751 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
752 		mii_mediachg(&sc->sc_mii);
753 
754 	return (0);
755 }
756 
757 void
dwxe_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)758 dwxe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
759 {
760 	struct dwxe_softc *sc = ifp->if_softc;
761 
762 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
763 		mii_pollstat(&sc->sc_mii);
764 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
765 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
766 	}
767 }
768 
769 int
dwxe_mii_readreg(struct device * self,int phy,int reg)770 dwxe_mii_readreg(struct device *self, int phy, int reg)
771 {
772 	struct dwxe_softc *sc = (void *)self;
773 	int n;
774 
775 	dwxe_write(sc, DWXE_MDIO_CMD,
776 	    sc->sc_clk << DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT |
777 	    phy << DWXE_MDIO_CMD_PHY_ADDR_SHIFT |
778 	    reg << DWXE_MDIO_CMD_PHY_REG_SHIFT |
779 	    DWXE_MDIO_CMD_MII_BUSY);
780 	for (n = 0; n < 1000; n++) {
781 		if ((dwxe_read(sc, DWXE_MDIO_CMD) &
782 		    DWXE_MDIO_CMD_MII_BUSY) == 0)
783 			return dwxe_read(sc, DWXE_MDIO_DATA);
784 		delay(10);
785 	}
786 
787 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
788 	return (0);
789 }
790 
791 void
dwxe_mii_writereg(struct device * self,int phy,int reg,int val)792 dwxe_mii_writereg(struct device *self, int phy, int reg, int val)
793 {
794 	struct dwxe_softc *sc = (void *)self;
795 	int n;
796 
797 	dwxe_write(sc, DWXE_MDIO_DATA, val);
798 	dwxe_write(sc, DWXE_MDIO_CMD,
799 	    sc->sc_clk << DWXE_MDIO_CMD_MDC_DIV_RATIO_M_SHIFT |
800 	    phy << DWXE_MDIO_CMD_PHY_ADDR_SHIFT |
801 	    reg << DWXE_MDIO_CMD_PHY_REG_SHIFT |
802 	    DWXE_MDIO_CMD_MII_WRITE |
803 	    DWXE_MDIO_CMD_MII_BUSY);
804 	for (n = 0; n < 1000; n++) {
805 		if ((dwxe_read(sc, DWXE_MDIO_CMD) &
806 		    DWXE_MDIO_CMD_MII_BUSY) == 0)
807 			return;
808 		delay(10);
809 	}
810 
811 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
812 }
813 
814 void
dwxe_mii_statchg(struct device * self)815 dwxe_mii_statchg(struct device *self)
816 {
817 	struct dwxe_softc *sc = (void *)self;
818 	uint32_t basicctrl;
819 
820 	basicctrl = dwxe_read(sc, DWXE_BASIC_CTL0);
821 	basicctrl &= ~DWXE_BASIC_CTL0_SPEED_MASK;
822 
823 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
824 	case IFM_1000_SX:
825 	case IFM_1000_LX:
826 	case IFM_1000_CX:
827 	case IFM_1000_T:
828 		basicctrl |= DWXE_BASIC_CTL0_SPEED_1000;
829 		sc->sc_link = 1;
830 		break;
831 	case IFM_100_TX:
832 		basicctrl |= DWXE_BASIC_CTL0_SPEED_100;
833 		sc->sc_link = 1;
834 		break;
835 	case IFM_10_T:
836 		basicctrl |= DWXE_BASIC_CTL0_SPEED_10;
837 		sc->sc_link = 1;
838 		break;
839 	default:
840 		sc->sc_link = 0;
841 		return;
842 	}
843 
844 	if (sc->sc_link == 0)
845 		return;
846 
847 	basicctrl &= ~DWXE_BASIC_CTL0_DUPLEX;
848 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
849 		basicctrl |= DWXE_BASIC_CTL0_DUPLEX;
850 
851 	/* XXX: RX/TX flow control? */
852 
853 	dwxe_write(sc, DWXE_BASIC_CTL0, basicctrl);
854 }
855 
856 void
dwxe_tick(void * arg)857 dwxe_tick(void *arg)
858 {
859 	struct dwxe_softc *sc = arg;
860 	int s;
861 
862 	s = splnet();
863 	mii_tick(&sc->sc_mii);
864 	splx(s);
865 
866 	timeout_add_sec(&sc->sc_tick, 1);
867 }
868 
869 void
dwxe_rxtick(void * arg)870 dwxe_rxtick(void *arg)
871 {
872 	struct dwxe_softc *sc = arg;
873 	uint32_t ctl;
874 	int s;
875 
876 	s = splnet();
877 
878 	ctl = dwxe_read(sc, DWXE_RX_CTL1);
879 	dwxe_write(sc, DWXE_RX_CTL1, ctl & ~DWXE_RX_CTL1_RX_DMA_EN);
880 
881 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
882 	    0, DWXE_DMA_LEN(sc->sc_rxring),
883 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
884 
885 	dwxe_write(sc, DWXE_RX_DESC_LIST, 0);
886 
887 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
888 	dwxe_fill_rx_ring(sc);
889 
890 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
891 	    0, DWXE_DMA_LEN(sc->sc_rxring),
892 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
893 
894 	dwxe_write(sc, DWXE_RX_DESC_LIST, DWXE_DMA_DVA(sc->sc_rxring));
895 	dwxe_write(sc, DWXE_RX_CTL1, ctl);
896 
897 	splx(s);
898 }
899 
900 int
dwxe_intr(void * arg)901 dwxe_intr(void *arg)
902 {
903 	struct dwxe_softc *sc = arg;
904 	uint32_t reg;
905 
906 	reg = dwxe_read(sc, DWXE_INT_STA);
907 	dwxe_write(sc, DWXE_INT_STA, reg);
908 
909 	if (reg & DWXE_INT_STA_RX_INT)
910 		dwxe_rx_proc(sc);
911 
912 	if (reg & DWXE_INT_STA_TX_INT ||
913 	    reg & DWXE_INT_STA_TX_BUF_UA_INT)
914 		dwxe_tx_proc(sc);
915 
916 	return (1);
917 }
918 
919 void
dwxe_tx_proc(struct dwxe_softc * sc)920 dwxe_tx_proc(struct dwxe_softc *sc)
921 {
922 	struct ifnet *ifp = &sc->sc_ac.ac_if;
923 	struct dwxe_desc *txd;
924 	struct dwxe_buf *txb;
925 	int idx, txfree;
926 
927 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring), 0,
928 	    DWXE_DMA_LEN(sc->sc_txring),
929 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
930 
931 	txfree = 0;
932 	while (sc->sc_tx_cons != sc->sc_tx_prod) {
933 		idx = sc->sc_tx_cons;
934 		KASSERT(idx < DWXE_NTXDESC);
935 
936 		txd = &sc->sc_txdesc[idx];
937 		if (txd->sd_status & DWXE_TX_DESC_CTL)
938 			break;
939 
940 		txb = &sc->sc_txbuf[idx];
941 		if (txb->tb_m) {
942 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
943 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
944 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
945 
946 			m_freem(txb->tb_m);
947 			txb->tb_m = NULL;
948 		}
949 
950 		txfree++;
951 
952 		if (sc->sc_tx_cons == (DWXE_NTXDESC - 1))
953 			sc->sc_tx_cons = 0;
954 		else
955 			sc->sc_tx_cons++;
956 
957 		txd->sd_status = 0;
958 	}
959 
960 	if (sc->sc_tx_cons == sc->sc_tx_prod)
961 		ifp->if_timer = 0;
962 
963 	if (txfree) {
964 		if (ifq_is_oactive(&ifp->if_snd))
965 			ifq_restart(&ifp->if_snd);
966 	}
967 }
968 
969 void
dwxe_rx_proc(struct dwxe_softc * sc)970 dwxe_rx_proc(struct dwxe_softc *sc)
971 {
972 	struct ifnet *ifp = &sc->sc_ac.ac_if;
973 	struct dwxe_desc *rxd;
974 	struct dwxe_buf *rxb;
975 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
976 	struct mbuf *m;
977 	int idx, len, cnt, put;
978 
979 	if ((ifp->if_flags & IFF_RUNNING) == 0)
980 		return;
981 
982 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring), 0,
983 	    DWXE_DMA_LEN(sc->sc_rxring),
984 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
985 
986 	cnt = if_rxr_inuse(&sc->sc_rx_ring);
987 	put = 0;
988 	while (put < cnt) {
989 		idx = sc->sc_rx_cons;
990 		KASSERT(idx < DWXE_NRXDESC);
991 
992 		rxd = &sc->sc_rxdesc[idx];
993 		if (rxd->sd_status & DWXE_RX_DESC_CTL)
994 			break;
995 
996 		len = (rxd->sd_status >> DWXE_RX_FRM_LEN_SHIFT)
997 		    & DWXE_RX_FRM_LEN_MASK;
998 		rxb = &sc->sc_rxbuf[idx];
999 		KASSERT(rxb->tb_m);
1000 
1001 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1002 		    len, BUS_DMASYNC_POSTREAD);
1003 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1004 
1005 		/* Strip off CRC. */
1006 		len -= ETHER_CRC_LEN;
1007 		KASSERT(len > 0);
1008 
1009 		m = rxb->tb_m;
1010 		rxb->tb_m = NULL;
1011 		m->m_pkthdr.len = m->m_len = len;
1012 
1013 		ml_enqueue(&ml, m);
1014 
1015 		put++;
1016 		if (sc->sc_rx_cons == (DWXE_NRXDESC - 1))
1017 			sc->sc_rx_cons = 0;
1018 		else
1019 			sc->sc_rx_cons++;
1020 	}
1021 
1022 	if_rxr_put(&sc->sc_rx_ring, put);
1023 	if (ifiq_input(&ifp->if_rcv, &ml))
1024 		if_rxr_livelocked(&sc->sc_rx_ring);
1025 
1026 	dwxe_fill_rx_ring(sc);
1027 
1028 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring), 0,
1029 	    DWXE_DMA_LEN(sc->sc_rxring),
1030 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1031 }
1032 
1033 void
dwxe_up(struct dwxe_softc * sc)1034 dwxe_up(struct dwxe_softc *sc)
1035 {
1036 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1037 	struct dwxe_buf *txb, *rxb;
1038 	int i;
1039 
1040 	/* Allocate Tx descriptor ring. */
1041 	sc->sc_txring = dwxe_dmamem_alloc(sc,
1042 	    DWXE_NTXDESC * sizeof(struct dwxe_desc), 8);
1043 	sc->sc_txdesc = DWXE_DMA_KVA(sc->sc_txring);
1044 
1045 	sc->sc_txbuf = malloc(sizeof(struct dwxe_buf) * DWXE_NTXDESC,
1046 	    M_DEVBUF, M_WAITOK);
1047 	for (i = 0; i < DWXE_NTXDESC; i++) {
1048 		txb = &sc->sc_txbuf[i];
1049 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWXE_NTXSEGS,
1050 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1051 		txb->tb_m = NULL;
1052 
1053 		sc->sc_txdesc[i].sd_next =
1054 		    DWXE_DMA_DVA(sc->sc_txring) +
1055 		    ((i+1) % DWXE_NTXDESC) * sizeof(struct dwxe_desc);
1056 	}
1057 
1058 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1059 	    0, DWXE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
1060 
1061 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1062 
1063 	dwxe_write(sc, DWXE_TX_DESC_LIST, DWXE_DMA_DVA(sc->sc_txring));
1064 
1065 	/* Allocate  descriptor ring. */
1066 	sc->sc_rxring = dwxe_dmamem_alloc(sc,
1067 	    DWXE_NRXDESC * sizeof(struct dwxe_desc), 8);
1068 	sc->sc_rxdesc = DWXE_DMA_KVA(sc->sc_rxring);
1069 
1070 	sc->sc_rxbuf = malloc(sizeof(struct dwxe_buf) * DWXE_NRXDESC,
1071 	    M_DEVBUF, M_WAITOK);
1072 
1073 	for (i = 0; i < DWXE_NRXDESC; i++) {
1074 		rxb = &sc->sc_rxbuf[i];
1075 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1076 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1077 		rxb->tb_m = NULL;
1078 
1079 		sc->sc_rxdesc[i].sd_next =
1080 		    DWXE_DMA_DVA(sc->sc_rxring) +
1081 		    ((i+1) % DWXE_NRXDESC) * sizeof(struct dwxe_desc);
1082 	}
1083 
1084 	if_rxr_init(&sc->sc_rx_ring, 2, DWXE_NRXDESC);
1085 
1086 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
1087 	dwxe_fill_rx_ring(sc);
1088 
1089 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_rxring),
1090 	    0, DWXE_DMA_LEN(sc->sc_rxring),
1091 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1092 
1093 	dwxe_write(sc, DWXE_RX_DESC_LIST, DWXE_DMA_DVA(sc->sc_rxring));
1094 
1095 	dwxe_lladdr_write(sc);
1096 
1097 	//dwxe_write(sc, DWXE_BASIC_CTL1, DWXE_BASIC_CTL1_SOFT_RST);
1098 
1099 	/* Configure media. */
1100 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1101 		mii_mediachg(&sc->sc_mii);
1102 
1103 	/* Program promiscuous mode and multicast filters. */
1104 	dwxe_iff(sc);
1105 
1106 	ifp->if_flags |= IFF_RUNNING;
1107 	ifq_clr_oactive(&ifp->if_snd);
1108 
1109 	dwxe_write(sc, DWXE_INT_EN, DWXE_INT_EN_RX_INT |
1110 	    DWXE_INT_EN_TX_INT | DWXE_INT_EN_TX_BUF_UA_INT);
1111 
1112 	dwxe_write(sc, DWXE_TX_CTL1, dwxe_read(sc, DWXE_TX_CTL1) |
1113 	    DWXE_TX_CTL1_TX_MD | DWXE_TX_CTL1_TX_NEXT_FRM |
1114 	    DWXE_TX_CTL1_TX_DMA_EN);
1115 	dwxe_write(sc, DWXE_RX_CTL1, dwxe_read(sc, DWXE_RX_CTL1) |
1116 	    DWXE_RX_CTL1_RX_MD | DWXE_RX_CTL1_RX_DMA_EN);
1117 
1118 	dwxe_write(sc, DWXE_TX_CTL0, dwxe_read(sc, DWXE_TX_CTL0) |
1119 	    DWXE_TX_CTL0_TX_TRANSMITTER_EN);
1120 	dwxe_write(sc, DWXE_RX_CTL0, dwxe_read(sc, DWXE_RX_CTL0) |
1121 	    DWXE_RX_CTL0_RX_RECEIVER_EN | DWXE_RX_CTL0_RX_DO_CRC);
1122 
1123 	timeout_add_sec(&sc->sc_tick, 1);
1124 }
1125 
1126 void
dwxe_down(struct dwxe_softc * sc)1127 dwxe_down(struct dwxe_softc *sc)
1128 {
1129 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1130 	struct dwxe_buf *txb, *rxb;
1131 	uint32_t dmactrl;
1132 	int i;
1133 
1134 	timeout_del(&sc->sc_rxto);
1135 	timeout_del(&sc->sc_tick);
1136 
1137 	ifp->if_flags &= ~IFF_RUNNING;
1138 	ifq_clr_oactive(&ifp->if_snd);
1139 	ifp->if_timer = 0;
1140 
1141 	dwxe_stop_dma(sc);
1142 
1143 	dwxe_write(sc, DWXE_TX_CTL0, dwxe_read(sc,
1144 	    DWXE_TX_CTL0) & ~DWXE_TX_CTL0_TX_TRANSMITTER_EN);
1145 
1146 	dwxe_write(sc, DWXE_RX_CTL0, dwxe_read(sc,
1147 	    DWXE_RX_CTL0) & ~DWXE_RX_CTL0_RX_RECEIVER_EN);
1148 
1149 	dmactrl = dwxe_read(sc, DWXE_TX_CTL1);
1150 	dmactrl &= ~DWXE_TX_CTL1_TX_DMA_EN;
1151 	dwxe_write(sc, DWXE_TX_CTL1, dmactrl);
1152 
1153 	dmactrl = dwxe_read(sc, DWXE_RX_CTL1);
1154 	dmactrl &= ~DWXE_RX_CTL1_RX_DMA_EN;
1155 	dwxe_write(sc, DWXE_RX_CTL1, dmactrl);
1156 
1157 	dwxe_write(sc, DWXE_INT_EN, 0);
1158 
1159 	intr_barrier(sc->sc_ih);
1160 	ifq_barrier(&ifp->if_snd);
1161 
1162 	for (i = 0; i < DWXE_NTXDESC; i++) {
1163 		txb = &sc->sc_txbuf[i];
1164 		if (txb->tb_m) {
1165 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1166 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1167 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1168 			m_freem(txb->tb_m);
1169 		}
1170 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1171 	}
1172 
1173 	dwxe_dmamem_free(sc, sc->sc_txring);
1174 	free(sc->sc_txbuf, M_DEVBUF, 0);
1175 
1176 	for (i = 0; i < DWXE_NRXDESC; i++) {
1177 		rxb = &sc->sc_rxbuf[i];
1178 		if (rxb->tb_m) {
1179 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1180 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1181 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1182 			m_freem(rxb->tb_m);
1183 		}
1184 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1185 	}
1186 
1187 	dwxe_dmamem_free(sc, sc->sc_rxring);
1188 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1189 }
1190 
1191 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
1192 static uint32_t
bitrev32(uint32_t x)1193 bitrev32(uint32_t x)
1194 {
1195 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1196 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1197 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1198 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1199 
1200 	return (x >> 16) | (x << 16);
1201 }
1202 
1203 void
dwxe_iff(struct dwxe_softc * sc)1204 dwxe_iff(struct dwxe_softc *sc)
1205 {
1206 	struct arpcom *ac = &sc->sc_ac;
1207 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1208 	struct ether_multi *enm;
1209 	struct ether_multistep step;
1210 	uint32_t crc, hash[2], hashbit, hashreg;
1211 	uint32_t reg;
1212 
1213 	reg = 0;
1214 
1215 	ifp->if_flags &= ~IFF_ALLMULTI;
1216 	bzero(hash, sizeof(hash));
1217 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1218 		ifp->if_flags |= IFF_ALLMULTI;
1219 		reg |= DWXE_RX_FRM_FLT_RX_ALL_MULTICAST;
1220 		if (ifp->if_flags & IFF_PROMISC)
1221 			reg |= DWXE_RX_FRM_FLT_DIS_ADDR_FILTER;
1222 	} else {
1223 		reg |= DWXE_RX_FRM_FLT_HASH_MULTICAST;
1224 		ETHER_FIRST_MULTI(step, ac, enm);
1225 		while (enm != NULL) {
1226 			crc = ether_crc32_le(enm->enm_addrlo,
1227 			    ETHER_ADDR_LEN) & 0x7f;
1228 
1229 			crc = bitrev32(~crc) >> 26;
1230 			hashreg = (crc >> 5);
1231 			hashbit = (crc & 0x1f);
1232 			hash[hashreg] |= (1 << hashbit);
1233 
1234 			ETHER_NEXT_MULTI(step, enm);
1235 		}
1236 	}
1237 
1238 	dwxe_lladdr_write(sc);
1239 
1240 	dwxe_write(sc, DWXE_RX_HASH0, hash[1]);
1241 	dwxe_write(sc, DWXE_RX_HASH1, hash[0]);
1242 
1243 	dwxe_write(sc, DWXE_RX_FRM_FLT, reg);
1244 }
1245 
1246 int
dwxe_encap(struct dwxe_softc * sc,struct mbuf * m,int * idx,int * used)1247 dwxe_encap(struct dwxe_softc *sc, struct mbuf *m, int *idx, int *used)
1248 {
1249 	struct dwxe_desc *txd, *txd_start;
1250 	bus_dmamap_t map;
1251 	int cur, frag, i;
1252 
1253 	cur = frag = *idx;
1254 	map = sc->sc_txbuf[cur].tb_map;
1255 
1256 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1257 		if (m_defrag(m, M_DONTWAIT))
1258 			return (EFBIG);
1259 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1260 			return (EFBIG);
1261 	}
1262 
1263 	/* Sync the DMA map. */
1264 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1265 	    BUS_DMASYNC_PREWRITE);
1266 
1267 	txd = txd_start = &sc->sc_txdesc[frag];
1268 	for (i = 0; i < map->dm_nsegs; i++) {
1269 		txd->sd_addr = map->dm_segs[i].ds_addr;
1270 		txd->sd_len = map->dm_segs[i].ds_len;
1271 		if (i == 0)
1272 			txd->sd_len |= DWXE_TX_FIR_DESC;
1273 		if (i == (map->dm_nsegs - 1))
1274 			txd->sd_len |= DWXE_TX_LAST_DESC | DWXE_TX_INT_CTL;
1275 		if (i != 0)
1276 			txd->sd_status = DWXE_TX_DESC_CTL;
1277 
1278 		bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1279 		    frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1280 
1281 		cur = frag;
1282 		if (frag == (DWXE_NTXDESC - 1)) {
1283 			txd = &sc->sc_txdesc[0];
1284 			frag = 0;
1285 		} else {
1286 			txd++;
1287 			frag++;
1288 		}
1289 		KASSERT(frag != sc->sc_tx_cons);
1290 	}
1291 
1292 	txd_start->sd_status = DWXE_TX_DESC_CTL;
1293 	bus_dmamap_sync(sc->sc_dmat, DWXE_DMA_MAP(sc->sc_txring),
1294 	    *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1295 
1296 	KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
1297 	sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
1298 	sc->sc_txbuf[cur].tb_map = map;
1299 	sc->sc_txbuf[cur].tb_m = m;
1300 
1301 	*idx = frag;
1302 	*used += map->dm_nsegs;
1303 
1304 	return (0);
1305 }
1306 
1307 void
dwxe_reset(struct dwxe_softc * sc)1308 dwxe_reset(struct dwxe_softc *sc)
1309 {
1310 	int n;
1311 
1312 	dwxe_stop_dma(sc);
1313 
1314 	dwxe_write(sc, DWXE_BASIC_CTL1, DWXE_BASIC_CTL1_SOFT_RST);
1315 
1316 	for (n = 0; n < 1000; n++) {
1317 		if ((dwxe_read(sc, DWXE_BASIC_CTL1) &
1318 		    DWXE_BASIC_CTL1_SOFT_RST) == 0)
1319 			return;
1320 		delay(10);
1321 	}
1322 
1323 	printf("%s: reset timeout\n", sc->sc_dev.dv_xname);
1324 }
1325 
1326 void
dwxe_stop_dma(struct dwxe_softc * sc)1327 dwxe_stop_dma(struct dwxe_softc *sc)
1328 {
1329 	uint32_t dmactrl;
1330 
1331 	/* Stop DMA. */
1332 	dmactrl = dwxe_read(sc, DWXE_TX_CTL1);
1333 	dmactrl &= ~DWXE_TX_CTL1_TX_DMA_EN;
1334 	dmactrl |= DWXE_TX_CTL1_TX_FIFO_FLUSH;
1335 	dwxe_write(sc, DWXE_TX_CTL1, dmactrl);
1336 }
1337 
1338 struct dwxe_dmamem *
dwxe_dmamem_alloc(struct dwxe_softc * sc,bus_size_t size,bus_size_t align)1339 dwxe_dmamem_alloc(struct dwxe_softc *sc, bus_size_t size, bus_size_t align)
1340 {
1341 	struct dwxe_dmamem *tdm;
1342 	int nsegs;
1343 
1344 	tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
1345 	tdm->tdm_size = size;
1346 
1347 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1348 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1349 		goto tdmfree;
1350 
1351 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1,
1352 	    &nsegs, BUS_DMA_WAITOK) != 0)
1353 		goto destroy;
1354 
1355 	if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
1356 	    &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1357 		goto free;
1358 
1359 	if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
1360 	    NULL, BUS_DMA_WAITOK) != 0)
1361 		goto unmap;
1362 
1363 	bzero(tdm->tdm_kva, size);
1364 
1365 	return (tdm);
1366 
1367 unmap:
1368 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
1369 free:
1370 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1371 destroy:
1372 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1373 tdmfree:
1374 	free(tdm, M_DEVBUF, 0);
1375 
1376 	return (NULL);
1377 }
1378 
1379 void
dwxe_dmamem_free(struct dwxe_softc * sc,struct dwxe_dmamem * tdm)1380 dwxe_dmamem_free(struct dwxe_softc *sc, struct dwxe_dmamem *tdm)
1381 {
1382 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
1383 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1384 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1385 	free(tdm, M_DEVBUF, 0);
1386 }
1387 
1388 struct mbuf *
dwxe_alloc_mbuf(struct dwxe_softc * sc,bus_dmamap_t map)1389 dwxe_alloc_mbuf(struct dwxe_softc *sc, bus_dmamap_t map)
1390 {
1391 	struct mbuf *m = NULL;
1392 
1393 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1394 	if (!m)
1395 		return (NULL);
1396 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1397 	m_adj(m, ETHER_ALIGN);
1398 
1399 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1400 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1401 		m_freem(m);
1402 		return (NULL);
1403 	}
1404 
1405 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1406 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1407 
1408 	return (m);
1409 }
1410 
1411 void
dwxe_fill_rx_ring(struct dwxe_softc * sc)1412 dwxe_fill_rx_ring(struct dwxe_softc *sc)
1413 {
1414 	struct dwxe_desc *rxd;
1415 	struct dwxe_buf *rxb;
1416 	u_int slots;
1417 
1418 	for (slots = if_rxr_get(&sc->sc_rx_ring, DWXE_NRXDESC);
1419 	    slots > 0; slots--) {
1420 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1421 		rxb->tb_m = dwxe_alloc_mbuf(sc, rxb->tb_map);
1422 		if (rxb->tb_m == NULL)
1423 			break;
1424 
1425 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1426 		rxd->sd_len = rxb->tb_map->dm_segs[0].ds_len - 1;
1427 		rxd->sd_addr = rxb->tb_map->dm_segs[0].ds_addr;
1428 		rxd->sd_status = DWXE_RX_DESC_CTL;
1429 
1430 		if (sc->sc_rx_prod == (DWXE_NRXDESC - 1))
1431 			sc->sc_rx_prod = 0;
1432 		else
1433 			sc->sc_rx_prod++;
1434 	}
1435 	if_rxr_put(&sc->sc_rx_ring, slots);
1436 
1437 	if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
1438 		timeout_add(&sc->sc_rxto, 1);
1439 }
1440