xref: /openbsd-src/sys/dev/fdt/if_fec.c (revision 897fc685943471cf985a0fe38ba076ea6fe74fa5)
1 /* $OpenBSD: if_fec.c,v 1.2 2018/04/02 17:52:36 patrick Exp $ */
2 /*
3  * Copyright (c) 2012-2013 Patrick Wildt <patrick@blueri.se>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/sockio.h>
21 #include <sys/queue.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/evcount.h>
25 #include <sys/socket.h>
26 #include <sys/timeout.h>
27 #include <sys/mbuf.h>
28 #include <machine/intr.h>
29 #include <machine/bus.h>
30 #include <machine/fdt.h>
31 
32 #include "bpfilter.h"
33 
34 #include <net/if.h>
35 #include <net/if_media.h>
36 #if NBPFILTER > 0
37 #include <net/bpf.h>
38 #endif
39 
40 #include <netinet/in.h>
41 #include <netinet/if_ether.h>
42 
43 #include <dev/mii/mii.h>
44 #include <dev/mii/miivar.h>
45 #include <dev/mii/miidevs.h>
46 
47 #include <dev/ofw/openfirm.h>
48 #include <dev/ofw/ofw_clock.h>
49 #include <dev/ofw/ofw_gpio.h>
50 #include <dev/ofw/ofw_pinctrl.h>
51 #include <dev/ofw/fdt.h>
52 
53 /* configuration registers */
54 #define ENET_EIR		0x004
55 #define ENET_EIMR		0x008
56 #define ENET_RDAR		0x010
57 #define ENET_TDAR		0x014
58 #define ENET_ECR		0x024
59 #define ENET_MMFR		0x040
60 #define ENET_MSCR		0x044
61 #define ENET_MIBC		0x064
62 #define ENET_RCR		0x084
63 #define ENET_TCR		0x0C4
64 #define ENET_PALR		0x0E4
65 #define ENET_PAUR		0x0E8
66 #define ENET_OPD		0x0EC
67 #define ENET_IAUR		0x118
68 #define ENET_IALR		0x11C
69 #define ENET_GAUR		0x120
70 #define ENET_GALR		0x124
71 #define ENET_TFWR		0x144
72 #define ENET_RDSR		0x180
73 #define ENET_TDSR		0x184
74 #define ENET_MRBR		0x188
75 #define ENET_RSFL		0x190
76 #define ENET_RSEM		0x194
77 #define ENET_RAEM		0x198
78 #define ENET_RAFL		0x19C
79 #define ENET_TSEM		0x1A0
80 #define ENET_TAEM		0x1A4
81 #define ENET_TAFL		0x1A8
82 #define ENET_TIPG		0x1AC
83 #define ENET_FTRL		0x1B0
84 #define ENET_TACC		0x1C0
85 #define ENET_RACC		0x1C4
86 
87 #define ENET_RDAR_RDAR		(1 << 24)
88 #define ENET_TDAR_TDAR		(1 << 24)
89 #define ENET_ECR_RESET		(1 << 0)
90 #define ENET_ECR_ETHEREN	(1 << 1)
91 #define ENET_ECR_EN1588		(1 << 4)
92 #define ENET_ECR_SPEED		(1 << 5)
93 #define ENET_ECR_DBSWP		(1 << 8)
94 #define ENET_MMFR_TA		(2 << 16)
95 #define ENET_MMFR_RA_SHIFT	18
96 #define ENET_MMFR_PA_SHIFT	23
97 #define ENET_MMFR_OP_WR		(1 << 28)
98 #define ENET_MMFR_OP_RD		(2 << 28)
99 #define ENET_MMFR_ST		(1 << 30)
100 #define ENET_RCR_MII_MODE	(1 << 2)
101 #define ENET_RCR_PROM		(1 << 3)
102 #define ENET_RCR_FCE		(1 << 5)
103 #define ENET_RCR_RGMII_MODE	(1 << 6)
104 #define ENET_RCR_RMII_10T	(1 << 9)
105 #define ENET_RCR_MAX_FL(x)	(((x) & 0x3fff) << 16)
106 #define ENET_TCR_FDEN		(1 << 2)
107 #define ENET_EIR_MII		(1 << 23)
108 #define ENET_EIR_RXF		(1 << 25)
109 #define ENET_EIR_TXF		(1 << 27)
110 #define ENET_TFWR_STRFWD	(1 << 8)
111 
112 /* statistics counters */
113 
114 /* 1588 control */
115 #define ENET_ATCR		0x400
116 #define ENET_ATVR		0x404
117 #define ENET_ATOFF		0x408
118 #define ENET_ATPER		0x40C
119 #define ENET_ATCOR		0x410
120 #define ENET_ATINC		0x414
121 #define ENET_ATSTMP		0x418
122 
123 /* capture / compare block */
124 #define ENET_TGSR		0x604
125 #define ENET_TCSR0		0x608
126 #define ENET_TCCR0		0x60C
127 #define ENET_TCSR1		0x610
128 #define ENET_TCCR1		0x614
129 #define ENET_TCSR2		0x618
130 #define ENET_TCCR2		0x61C
131 #define ENET_TCSR3		0x620
132 #define ENET_TCCR3		0x624
133 
134 #define ENET_MII_CLK		2500000
135 #define ENET_ALIGNMENT		16
136 
137 #define HREAD4(sc, reg)							\
138 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
139 #define HWRITE4(sc, reg, val)						\
140 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
141 #define HSET4(sc, reg, bits)						\
142 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
143 #define HCLR4(sc, reg, bits)						\
144 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
145 
146 /* what should we use? */
147 #define ENET_MAX_TXD		32
148 #define ENET_MAX_RXD		32
149 
150 #define ENET_MAX_PKT_SIZE	1536
151 
152 #define ENET_ROUNDUP(size, unit) (((size) + (unit) - 1) & ~((unit) - 1))
153 
154 /* buffer descriptor status bits */
155 #define ENET_RXD_EMPTY		(1 << 15)
156 #define ENET_RXD_WRAP		(1 << 13)
157 #define ENET_RXD_LAST		(1 << 11)
158 #define ENET_RXD_MISS		(1 << 8)
159 #define ENET_RXD_BC		(1 << 7)
160 #define ENET_RXD_MC		(1 << 6)
161 #define ENET_RXD_LG		(1 << 5)
162 #define ENET_RXD_NO		(1 << 4)
163 #define ENET_RXD_CR		(1 << 2)
164 #define ENET_RXD_OV		(1 << 1)
165 #define ENET_RXD_TR		(1 << 0)
166 
167 #define ENET_TXD_READY		(1 << 15)
168 #define ENET_TXD_WRAP		(1 << 13)
169 #define ENET_TXD_LAST		(1 << 11)
170 #define ENET_TXD_TC		(1 << 10)
171 #define ENET_TXD_ABC		(1 << 9)
172 #define ENET_TXD_STATUS_MASK	0x3ff
173 
174 #ifdef ENET_ENHANCED_BD
175 /* enhanced */
176 #define ENET_RXD_INT		(1 << 23)
177 
178 #define ENET_TXD_INT		(1 << 30)
179 #endif
180 
181 /*
182  * Bus dma allocation structure used by
183  * fec_dma_malloc and fec_dma_free.
184  */
185 struct fec_dma_alloc {
186 	bus_addr_t		dma_paddr;
187 	caddr_t			dma_vaddr;
188 	bus_dma_tag_t		dma_tag;
189 	bus_dmamap_t		dma_map;
190 	bus_dma_segment_t	dma_seg;
191 	bus_size_t		dma_size;
192 	int			dma_nseg;
193 };
194 
195 struct fec_buf_desc {
196 	uint16_t data_length;		/* payload's length in bytes */
197 	uint16_t status;		/* BD's status (see datasheet) */
198 	uint32_t data_pointer;		/* payload's buffer address */
199 #ifdef ENET_ENHANCED_BD
200 	uint32_t enhanced_status;	/* enhanced status with IEEE 1588 */
201 	uint32_t reserved0;		/* reserved */
202 	uint32_t update_done;		/* buffer descriptor update done */
203 	uint32_t timestamp;		/* IEEE 1588 timestamp */
204 	uint32_t reserved1[2];		/* reserved */
205 #endif
206 };
207 
208 struct fec_buffer {
209 	uint8_t data[ENET_MAX_PKT_SIZE];
210 };
211 
212 struct fec_softc {
213 	struct device		sc_dev;
214 	struct arpcom		sc_ac;
215 	struct mii_data		sc_mii;
216 	int			sc_node;
217 	bus_space_tag_t		sc_iot;
218 	bus_space_handle_t	sc_ioh;
219 	void			*sc_ih; /* Interrupt handler */
220 	bus_dma_tag_t		sc_dma_tag;
221 	struct fec_dma_alloc	txdma;		/* bus_dma glue for tx desc */
222 	struct fec_buf_desc	*tx_desc_base;
223 	struct fec_dma_alloc	rxdma;		/* bus_dma glue for rx desc */
224 	struct fec_buf_desc	*rx_desc_base;
225 	struct fec_dma_alloc	tbdma;		/* bus_dma glue for packets */
226 	struct fec_buffer	*tx_buffer_base;
227 	struct fec_dma_alloc	rbdma;		/* bus_dma glue for packets */
228 	struct fec_buffer	*rx_buffer_base;
229 	int			cur_tx;
230 	int			cur_rx;
231 	struct timeout		sc_tick;
232 	uint32_t		sc_phy_speed;
233 };
234 
235 struct fec_softc *fec_sc;
236 
237 int fec_match(struct device *, void *, void *);
238 void fec_attach(struct device *, struct device *, void *);
239 void fec_phy_init(struct fec_softc *, struct mii_softc *);
240 int fec_ioctl(struct ifnet *, u_long, caddr_t);
241 void fec_start(struct ifnet *);
242 int fec_encap(struct fec_softc *, struct mbuf *);
243 void fec_init_txd(struct fec_softc *);
244 void fec_init_rxd(struct fec_softc *);
245 void fec_init(struct fec_softc *);
246 void fec_stop(struct fec_softc *);
247 void fec_iff(struct fec_softc *);
248 struct mbuf * fec_newbuf(void);
249 int fec_intr(void *);
250 void fec_recv(struct fec_softc *);
251 void fec_tick(void *);
252 int fec_miibus_readreg(struct device *, int, int);
253 void fec_miibus_writereg(struct device *, int, int, int);
254 void fec_miibus_statchg(struct device *);
255 int fec_ifmedia_upd(struct ifnet *);
256 void fec_ifmedia_sts(struct ifnet *, struct ifmediareq *);
257 int fec_dma_malloc(struct fec_softc *, bus_size_t, struct fec_dma_alloc *);
258 void fec_dma_free(struct fec_softc *, struct fec_dma_alloc *);
259 
260 struct cfattach fec_ca = {
261 	sizeof (struct fec_softc), fec_match, fec_attach
262 };
263 
264 struct cfdriver fec_cd = {
265 	NULL, "fec", DV_IFNET
266 };
267 
268 int
269 fec_match(struct device *parent, void *match, void *aux)
270 {
271 	struct fdt_attach_args *faa = aux;
272 
273 	return (OF_is_compatible(faa->fa_node, "fsl,imx6q-fec") ||
274 	    OF_is_compatible(faa->fa_node, "fsl,imx8mq-fec"));
275 }
276 
277 void
278 fec_attach(struct device *parent, struct device *self, void *aux)
279 {
280 	struct fec_softc *sc = (struct fec_softc *) self;
281 	struct fdt_attach_args *faa = aux;
282 	struct mii_data *mii;
283 	struct mii_softc *child;
284 	struct ifnet *ifp;
285 	int tsize, rsize, tbsize, rbsize, s;
286 	uint32_t phy_reset_gpio[3];
287 	uint32_t phy_reset_duration;
288 
289 	if (faa->fa_nreg < 1)
290 		return;
291 
292 	sc->sc_node = faa->fa_node;
293 	sc->sc_iot = faa->fa_iot;
294 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
295 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
296 		panic("fec_attach: bus_space_map failed!");
297 
298 	sc->sc_dma_tag = faa->fa_dmat;
299 
300 	pinctrl_byname(faa->fa_node, "default");
301 
302 	/* power it up */
303 	clock_enable_all(faa->fa_node);
304 
305 	/* reset PHY */
306 	if (OF_getpropintarray(faa->fa_node, "phy-reset-gpios", phy_reset_gpio,
307 	    sizeof(phy_reset_gpio)) == sizeof(phy_reset_gpio)) {
308 		phy_reset_duration = OF_getpropint(faa->fa_node,
309 		    "phy-reset-duration", 1);
310 		if (phy_reset_duration > 1000)
311 			phy_reset_duration = 1;
312 
313 		/*
314 		 * The Linux people really screwed the pooch here.
315 		 * The Linux kernel always treats the gpio as
316 		 * active-low, even if it is marked as active-high in
317 		 * the device tree.  As a result the device tree for
318 		 * many boards incorrectly marks the gpio as
319 		 * active-high.
320 		 */
321 		phy_reset_gpio[2] = GPIO_ACTIVE_LOW;
322 		gpio_controller_config_pin(phy_reset_gpio, GPIO_CONFIG_OUTPUT);
323 
324 		/*
325 		 * On some Cubox-i machines we need to hold the PHY in
326 		 * reset a little bit longer than specified.
327 		 */
328 		gpio_controller_set_pin(phy_reset_gpio, 1);
329 		delay((phy_reset_duration + 1) * 1000);
330 		gpio_controller_set_pin(phy_reset_gpio, 0);
331 		delay(1000);
332 	}
333 	printf("\n");
334 
335 	/* Figure out the hardware address. Must happen before reset. */
336 	OF_getprop(faa->fa_node, "local-mac-address", sc->sc_ac.ac_enaddr,
337 	    sizeof(sc->sc_ac.ac_enaddr));
338 
339 	/* reset the controller */
340 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
341 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
342 		continue;
343 
344 	HWRITE4(sc, ENET_EIMR, 0);
345 	HWRITE4(sc, ENET_EIR, 0xffffffff);
346 
347 	sc->sc_ih = arm_intr_establish_fdt(faa->fa_node, IPL_NET,
348 	    fec_intr, sc, sc->sc_dev.dv_xname);
349 
350 	tsize = ENET_MAX_TXD * sizeof(struct fec_buf_desc);
351 	tsize = ENET_ROUNDUP(tsize, PAGE_SIZE);
352 
353 	if (fec_dma_malloc(sc, tsize, &sc->txdma)) {
354 		printf("%s: Unable to allocate tx_desc memory\n",
355 		    sc->sc_dev.dv_xname);
356 		goto bad;
357 	}
358 	sc->tx_desc_base = (struct fec_buf_desc *)sc->txdma.dma_vaddr;
359 
360 	rsize = ENET_MAX_RXD * sizeof(struct fec_buf_desc);
361 	rsize = ENET_ROUNDUP(rsize, PAGE_SIZE);
362 
363 	if (fec_dma_malloc(sc, rsize, &sc->rxdma)) {
364 		printf("%s: Unable to allocate rx_desc memory\n",
365 		    sc->sc_dev.dv_xname);
366 		goto txdma;
367 	}
368 	sc->rx_desc_base = (struct fec_buf_desc *)sc->rxdma.dma_vaddr;
369 
370 	tbsize = ENET_MAX_TXD * ENET_MAX_PKT_SIZE;
371 	tbsize = ENET_ROUNDUP(tbsize, PAGE_SIZE);
372 
373 	if (fec_dma_malloc(sc, tbsize, &sc->tbdma)) {
374 		printf("%s: Unable to allocate tx_buffer memory\n",
375 		    sc->sc_dev.dv_xname);
376 		goto rxdma;
377 	}
378 	sc->tx_buffer_base = (struct fec_buffer *)sc->tbdma.dma_vaddr;
379 
380 	rbsize = ENET_MAX_RXD * ENET_MAX_PKT_SIZE;
381 	rbsize = ENET_ROUNDUP(rbsize, PAGE_SIZE);
382 
383 	if (fec_dma_malloc(sc, rbsize, &sc->rbdma)) {
384 		printf("%s: Unable to allocate rx_buffer memory\n",
385 		    sc->sc_dev.dv_xname);
386 		goto tbdma;
387 	}
388 	sc->rx_buffer_base = (struct fec_buffer *)sc->rbdma.dma_vaddr;
389 
390 	sc->cur_tx = 0;
391 	sc->cur_rx = 0;
392 
393 	s = splnet();
394 
395 	ifp = &sc->sc_ac.ac_if;
396 	ifp->if_softc = sc;
397 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
398 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
399 	ifp->if_ioctl = fec_ioctl;
400 	ifp->if_start = fec_start;
401 	ifp->if_capabilities = IFCAP_VLAN_MTU;
402 
403 	printf("%s: address %s\n", sc->sc_dev.dv_xname,
404 	    ether_sprintf(sc->sc_ac.ac_enaddr));
405 
406 	/*
407 	 * Initialize the MII clock.  The formula is:
408 	 *
409 	 * ENET_MII_CLK = ref_freq / ((phy_speed + 1) x 2)
410 	 * phy_speed = (((ref_freq / ENET_MII_CLK) / 2) - 1)
411 	 */
412 	sc->sc_phy_speed = clock_get_frequency(sc->sc_node, "ipg");
413 	sc->sc_phy_speed = (sc->sc_phy_speed + (ENET_MII_CLK - 1)) / ENET_MII_CLK;
414 	sc->sc_phy_speed = (sc->sc_phy_speed / 2) - 1;
415 	HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
416 
417 	/* Initialize MII/media info. */
418 	mii = &sc->sc_mii;
419 	mii->mii_ifp = ifp;
420 	mii->mii_readreg = fec_miibus_readreg;
421 	mii->mii_writereg = fec_miibus_writereg;
422 	mii->mii_statchg = fec_miibus_statchg;
423 
424 	ifmedia_init(&mii->mii_media, 0, fec_ifmedia_upd, fec_ifmedia_sts);
425 	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
426 
427 	child = LIST_FIRST(&mii->mii_phys);
428 	if (child)
429 		fec_phy_init(sc, child);
430 
431 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
432 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
433 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
434 	} else
435 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
436 
437 	if_attach(ifp);
438 	ether_ifattach(ifp);
439 	splx(s);
440 
441 	timeout_set(&sc->sc_tick, fec_tick, sc);
442 
443 	fec_sc = sc;
444 	return;
445 
446 tbdma:
447 	fec_dma_free(sc, &sc->tbdma);
448 rxdma:
449 	fec_dma_free(sc, &sc->rxdma);
450 txdma:
451 	fec_dma_free(sc, &sc->txdma);
452 bad:
453 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
454 }
455 
456 void
457 fec_phy_init(struct fec_softc *sc, struct mii_softc *child)
458 {
459 	struct device *dev = (struct device *)sc;
460 	int phy = child->mii_phy;
461 	uint32_t reg;
462 
463 	if (child->mii_oui == MII_OUI_ATHEROS &&
464 	    child->mii_model == MII_MODEL_ATHEROS_AR8035) {
465 		/* disable SmartEEE */
466 		fec_miibus_writereg(dev, phy, 0x0d, 0x0003);
467 		fec_miibus_writereg(dev, phy, 0x0e, 0x805d);
468 		fec_miibus_writereg(dev, phy, 0x0d, 0x4003);
469 		reg = fec_miibus_readreg(dev, phy, 0x0e);
470 		fec_miibus_writereg(dev, phy, 0x0e, reg & ~0x0100);
471 
472 		/* enable 125MHz clk output */
473 		fec_miibus_writereg(dev, phy, 0x0d, 0x0007);
474 		fec_miibus_writereg(dev, phy, 0x0e, 0x8016);
475 		fec_miibus_writereg(dev, phy, 0x0d, 0x4007);
476 
477 		reg = fec_miibus_readreg(dev, phy, 0x0e) & 0xffe3;
478 		fec_miibus_writereg(dev, phy, 0x0e, reg | 0x18);
479 
480 		/* tx clock delay */
481 		fec_miibus_writereg(dev, phy, 0x1d, 0x0005);
482 		reg = fec_miibus_readreg(dev, phy, 0x1e);
483 		fec_miibus_writereg(dev, phy, 0x1e, reg | 0x0100);
484 
485 		PHY_RESET(child);
486 	}
487 
488 	if (child->mii_oui == MII_OUI_MICREL &&
489 	    child->mii_model == MII_MODEL_MICREL_KSZ9021) {
490 		uint32_t rxc, rxdv, txc, txen;
491 		uint32_t rxd0, rxd1, rxd2, rxd3;
492 		uint32_t txd0, txd1, txd2, txd3;
493 		uint32_t val;
494 
495 		rxc = OF_getpropint(sc->sc_node, "rxc-skew-ps", 1400) / 200;
496 		rxdv = OF_getpropint(sc->sc_node, "rxdv-skew-ps", 1400) / 200;
497 		txc = OF_getpropint(sc->sc_node, "txc-skew-ps", 1400) / 200;
498 		txen = OF_getpropint(sc->sc_node, "txen-skew-ps", 1400) / 200;
499 		rxd0 = OF_getpropint(sc->sc_node, "rxd0-skew-ps", 1400) / 200;
500 		rxd1 = OF_getpropint(sc->sc_node, "rxd1-skew-ps", 1400) / 200;
501 		rxd2 = OF_getpropint(sc->sc_node, "rxd2-skew-ps", 1400) / 200;
502 		rxd3 = OF_getpropint(sc->sc_node, "rxd3-skew-ps", 1400) / 200;
503 		txd0 = OF_getpropint(sc->sc_node, "txd0-skew-ps", 1400) / 200;
504 		txd1 = OF_getpropint(sc->sc_node, "txd1-skew-ps", 1400) / 200;
505 		txd2 = OF_getpropint(sc->sc_node, "txd2-skew-ps", 1400) / 200;
506 		txd3 = OF_getpropint(sc->sc_node, "txd3-skew-ps", 1400) / 200;
507 
508 		val = ((rxc & 0xf) << 12) | ((rxdv & 0xf) << 8) |
509 		    ((txc & 0xf) << 4) | ((txen & 0xf) << 0);
510 		fec_miibus_writereg(dev, phy, 0x0b, 0x8104);
511 		fec_miibus_writereg(dev, phy, 0x0c, val);
512 
513 		val = ((rxd3 & 0xf) << 12) | ((rxd2 & 0xf) << 8) |
514 		    ((rxd1 & 0xf) << 4) | ((rxd0 & 0xf) << 0);
515 		fec_miibus_writereg(dev, phy, 0x0b, 0x8105);
516 		fec_miibus_writereg(dev, phy, 0x0c, val);
517 
518 		val = ((txd3 & 0xf) << 12) | ((txd2 & 0xf) << 8) |
519 		    ((txd1 & 0xf) << 4) | ((txd0 & 0xf) << 0);
520 		fec_miibus_writereg(dev, phy, 0x0b, 0x8106);
521 		fec_miibus_writereg(dev, phy, 0x0c, val);
522 	}
523 
524 	if (child->mii_oui == MII_OUI_MICREL &&
525 	    child->mii_model == MII_MODEL_MICREL_KSZ9031) {
526 		uint32_t rxc, rxdv, txc, txen;
527 		uint32_t rxd0, rxd1, rxd2, rxd3;
528 		uint32_t txd0, txd1, txd2, txd3;
529 		uint32_t val;
530 
531 		rxc = OF_getpropint(sc->sc_node, "rxc-skew-ps", 900) / 60;
532 		rxdv = OF_getpropint(sc->sc_node, "rxdv-skew-ps", 420) / 60;
533 		txc = OF_getpropint(sc->sc_node, "txc-skew-ps", 900) / 60;
534 		txen = OF_getpropint(sc->sc_node, "txen-skew-ps", 420) / 60;
535 		rxd0 = OF_getpropint(sc->sc_node, "rxd0-skew-ps", 420) / 60;
536 		rxd1 = OF_getpropint(sc->sc_node, "rxd1-skew-ps", 420) / 60;
537 		rxd2 = OF_getpropint(sc->sc_node, "rxd2-skew-ps", 420) / 60;
538 		rxd3 = OF_getpropint(sc->sc_node, "rxd3-skew-ps", 420) / 60;
539 		txd0 = OF_getpropint(sc->sc_node, "txd0-skew-ps", 420) / 60;
540 		txd1 = OF_getpropint(sc->sc_node, "txd1-skew-ps", 420) / 60;
541 		txd2 = OF_getpropint(sc->sc_node, "txd2-skew-ps", 420) / 60;
542 		txd3 = OF_getpropint(sc->sc_node, "txd3-skew-ps", 420) / 60;
543 
544 		val = ((rxdv & 0xf) << 4) || ((txen & 0xf) << 0);
545 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
546 		fec_miibus_writereg(dev, phy, 0x0e, 0x0004);
547 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
548 		fec_miibus_writereg(dev, phy, 0x0e, val);
549 
550 		val = ((rxd3 & 0xf) << 12) | ((rxd2 & 0xf) << 8) |
551 		    ((rxd1 & 0xf) << 4) | ((rxd0 & 0xf) << 0);
552 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
553 		fec_miibus_writereg(dev, phy, 0x0e, 0x0005);
554 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
555 		fec_miibus_writereg(dev, phy, 0x0e, val);
556 
557 		val = ((txd3 & 0xf) << 12) | ((txd2 & 0xf) << 8) |
558 		    ((txd1 & 0xf) << 4) | ((txd0 & 0xf) << 0);
559 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
560 		fec_miibus_writereg(dev, phy, 0x0e, 0x0006);
561 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
562 		fec_miibus_writereg(dev, phy, 0x0e, val);
563 
564 		val = ((txc & 0x1f) << 5) || ((rxc & 0x1f) << 0);
565 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
566 		fec_miibus_writereg(dev, phy, 0x0e, 0x0008);
567 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
568 		fec_miibus_writereg(dev, phy, 0x0e, val);
569 	}
570 }
571 
572 void
573 fec_init_rxd(struct fec_softc *sc)
574 {
575 	int i;
576 
577 	memset(sc->rx_desc_base, 0, ENET_MAX_RXD * sizeof(struct fec_buf_desc));
578 
579 	for (i = 0; i < ENET_MAX_RXD; i++)
580 	{
581 		sc->rx_desc_base[i].status = ENET_RXD_EMPTY;
582 		sc->rx_desc_base[i].data_pointer = sc->rbdma.dma_paddr + i * ENET_MAX_PKT_SIZE;
583 #ifdef ENET_ENHANCED_BD
584 		sc->rx_desc_base[i].enhanced_status = ENET_RXD_INT;
585 #endif
586 	}
587 
588 	sc->rx_desc_base[i - 1].status |= ENET_RXD_WRAP;
589 }
590 
591 void
592 fec_init_txd(struct fec_softc *sc)
593 {
594 	int i;
595 
596 	memset(sc->tx_desc_base, 0, ENET_MAX_TXD * sizeof(struct fec_buf_desc));
597 
598 	for (i = 0; i < ENET_MAX_TXD; i++)
599 	{
600 		sc->tx_desc_base[i].data_pointer = sc->tbdma.dma_paddr + i * ENET_MAX_PKT_SIZE;
601 	}
602 
603 	sc->tx_desc_base[i - 1].status |= ENET_TXD_WRAP;
604 }
605 
606 void
607 fec_init(struct fec_softc *sc)
608 {
609 	struct ifnet *ifp = &sc->sc_ac.ac_if;
610 	int speed = 0;
611 
612 	/* reset the controller */
613 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
614 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
615 		continue;
616 
617 	/* set hw address */
618 	HWRITE4(sc, ENET_PALR,
619 	    (sc->sc_ac.ac_enaddr[0] << 24) |
620 	    (sc->sc_ac.ac_enaddr[1] << 16) |
621 	    (sc->sc_ac.ac_enaddr[2] << 8) |
622 	     sc->sc_ac.ac_enaddr[3]);
623 	HWRITE4(sc, ENET_PAUR,
624 	    (sc->sc_ac.ac_enaddr[4] << 24) |
625 	    (sc->sc_ac.ac_enaddr[5] << 16));
626 
627 	/* clear outstanding interrupts */
628 	HWRITE4(sc, ENET_EIR, 0xffffffff);
629 
630 	/* set max receive buffer size, 3-0 bits always zero for alignment */
631 	HWRITE4(sc, ENET_MRBR, ENET_MAX_PKT_SIZE);
632 
633 	/* set descriptor */
634 	HWRITE4(sc, ENET_TDSR, sc->txdma.dma_paddr);
635 	HWRITE4(sc, ENET_RDSR, sc->rxdma.dma_paddr);
636 
637 	/* init descriptor */
638 	fec_init_txd(sc);
639 	fec_init_rxd(sc);
640 
641 	/* set it to full-duplex */
642 	HWRITE4(sc, ENET_TCR, ENET_TCR_FDEN);
643 
644 	/*
645 	 * Set max frame length to 1518 or 1522 with VLANs,
646 	 * pause frames and promisc mode.
647 	 * XXX: RGMII mode - phy dependant
648 	 */
649 	HWRITE4(sc, ENET_RCR,
650 	    ENET_RCR_MAX_FL(1522) | ENET_RCR_RGMII_MODE | ENET_RCR_MII_MODE |
651 	    ENET_RCR_FCE);
652 
653 	HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
654 
655 	/* RX FIFO treshold and pause */
656 	HWRITE4(sc, ENET_RSEM, 0x84);
657 	HWRITE4(sc, ENET_RSFL, 16);
658 	HWRITE4(sc, ENET_RAEM, 8);
659 	HWRITE4(sc, ENET_RAFL, 8);
660 	HWRITE4(sc, ENET_OPD, 0xFFF0);
661 
662 	/* do store and forward, only i.MX6, needs to be set correctly else */
663 	HWRITE4(sc, ENET_TFWR, ENET_TFWR_STRFWD);
664 
665 	/* enable gigabit-ethernet and set it to support little-endian */
666 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
667 	case IFM_1000_T:  /* Gigabit */
668 		speed |= ENET_ECR_SPEED;
669 		break;
670 	default:
671 		speed &= ~ENET_ECR_SPEED;
672 	}
673 	HWRITE4(sc, ENET_ECR, ENET_ECR_ETHEREN | speed | ENET_ECR_DBSWP);
674 
675 #ifdef ENET_ENHANCED_BD
676 	HSET4(sc, ENET_ECR, ENET_ECR_EN1588);
677 #endif
678 
679 	/* rx descriptors are ready */
680 	HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
681 
682 	/* program promiscuous mode and multicast filters */
683 	fec_iff(sc);
684 
685 	timeout_add_sec(&sc->sc_tick, 1);
686 
687 	/* Indicate we are up and running. */
688 	ifp->if_flags |= IFF_RUNNING;
689 	ifq_clr_oactive(&ifp->if_snd);
690 
691 	/* enable interrupts for tx/rx */
692 	HWRITE4(sc, ENET_EIMR, ENET_EIR_TXF | ENET_EIR_RXF);
693 
694 	fec_start(ifp);
695 }
696 
697 void
698 fec_stop(struct fec_softc *sc)
699 {
700 	struct ifnet *ifp = &sc->sc_ac.ac_if;
701 
702 	/*
703 	 * Mark the interface down and cancel the watchdog timer.
704 	 */
705 	ifp->if_flags &= ~IFF_RUNNING;
706 	ifp->if_timer = 0;
707 	ifq_clr_oactive(&ifp->if_snd);
708 
709 	timeout_del(&sc->sc_tick);
710 
711 	/* reset the controller */
712 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
713 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
714 		continue;
715 }
716 
717 void
718 fec_iff(struct fec_softc *sc)
719 {
720 	struct arpcom *ac = &sc->sc_ac;
721 	struct ifnet *ifp = &sc->sc_ac.ac_if;
722 	struct ether_multi *enm;
723 	struct ether_multistep step;
724 	uint64_t ghash = 0, ihash = 0;
725 	uint32_t h;
726 
727 	ifp->if_flags &= ~IFF_ALLMULTI;
728 
729 	if (ifp->if_flags & IFF_PROMISC) {
730 		ifp->if_flags |= IFF_ALLMULTI;
731 		ihash = 0xffffffffffffffffLLU;
732 	} else if (ac->ac_multirangecnt > 0) {
733 		ifp->if_flags |= IFF_ALLMULTI;
734 		ghash = 0xffffffffffffffffLLU;
735 	} else {
736 		ETHER_FIRST_MULTI(step, ac, enm);
737 		while (enm != NULL) {
738 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
739 
740 			ghash |= 1LLU << (((uint8_t *)&h)[3] >> 2);
741 
742 			ETHER_NEXT_MULTI(step, enm);
743 		}
744 	}
745 
746 	HWRITE4(sc, ENET_GAUR, (uint32_t)(ghash >> 32));
747 	HWRITE4(sc, ENET_GALR, (uint32_t)ghash);
748 
749 	HWRITE4(sc, ENET_IAUR, (uint32_t)(ihash >> 32));
750 	HWRITE4(sc, ENET_IALR, (uint32_t)ihash);
751 }
752 
753 int
754 fec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
755 {
756 	struct fec_softc *sc = ifp->if_softc;
757 	struct ifreq *ifr = (struct ifreq *)data;
758 	int s, error = 0;
759 
760 	s = splnet();
761 
762 	switch (cmd) {
763 	case SIOCSIFADDR:
764 		ifp->if_flags |= IFF_UP;
765 		if (!(ifp->if_flags & IFF_RUNNING))
766 			fec_init(sc);
767 		break;
768 
769 	case SIOCSIFFLAGS:
770 		if (ifp->if_flags & IFF_UP) {
771 			if (ifp->if_flags & IFF_RUNNING)
772 				error = ENETRESET;
773 			else
774 				fec_init(sc);
775 		} else {
776 			if (ifp->if_flags & IFF_RUNNING)
777 				fec_stop(sc);
778 		}
779 		break;
780 
781 	case SIOCGIFMEDIA:
782 	case SIOCSIFMEDIA:
783 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
784 		break;
785 
786 	default:
787 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
788 	}
789 
790 	if (error == ENETRESET) {
791 		if (ifp->if_flags & IFF_RUNNING)
792 			fec_iff(sc);
793 		error = 0;
794 	}
795 
796 	splx(s);
797 	return(error);
798 }
799 
800 void
801 fec_start(struct ifnet *ifp)
802 {
803 	struct fec_softc *sc = ifp->if_softc;
804 	struct mbuf *m_head = NULL;
805 
806 	if (ifq_is_oactive(&ifp->if_snd) || !(ifp->if_flags & IFF_RUNNING))
807 		return;
808 
809 	for (;;) {
810 		m_head = ifq_deq_begin(&ifp->if_snd);
811 		if (m_head == NULL)
812 			break;
813 
814 		if (fec_encap(sc, m_head)) {
815 			ifq_deq_rollback(&ifp->if_snd, m_head);
816 			ifq_set_oactive(&ifp->if_snd);
817 			break;
818 		}
819 
820 		ifq_deq_commit(&ifp->if_snd, m_head);
821 
822 #if NBPFILTER > 0
823 		if (ifp->if_bpf)
824 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
825 #endif
826 
827 		m_freem(m_head);
828 	}
829 }
830 
831 int
832 fec_encap(struct fec_softc *sc, struct mbuf *m)
833 {
834 	if (sc->tx_desc_base[sc->cur_tx].status & ENET_TXD_READY) {
835 		printf("fec: tx queue full!\n");
836 		return EIO;
837 	}
838 
839 	if (m->m_pkthdr.len > ENET_MAX_PKT_SIZE) {
840 		printf("fec: packet too big\n");
841 		return EIO;
842 	}
843 
844 	/* copy in the actual packet */
845 	m_copydata(m, 0, m->m_pkthdr.len, (caddr_t)sc->tx_buffer_base[sc->cur_tx].data);
846 
847 	sc->tx_desc_base[sc->cur_tx].data_length = m->m_pkthdr.len;
848 
849 	sc->tx_desc_base[sc->cur_tx].status &= ~ENET_TXD_STATUS_MASK;
850 	sc->tx_desc_base[sc->cur_tx].status |= (ENET_TXD_READY | ENET_TXD_LAST | ENET_TXD_TC);
851 
852 #ifdef ENET_ENHANCED_BD
853 	sc->tx_desc_base[sc->cur_tx].enhanced_status = ENET_TXD_INT;
854 	sc->tx_desc_base[sc->cur_tx].update_done = 0;
855 #endif
856 
857 	bus_dmamap_sync(sc->tbdma.dma_tag, sc->tbdma.dma_map,
858 	    ENET_MAX_PKT_SIZE * sc->cur_tx, ENET_MAX_PKT_SIZE,
859 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
860 
861 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map,
862 	    sizeof(struct fec_buf_desc) * sc->cur_tx,
863 	    sizeof(struct fec_buf_desc),
864 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
865 
866 
867 	/* tx descriptors are ready */
868 	HWRITE4(sc, ENET_TDAR, ENET_TDAR_TDAR);
869 
870 	if (sc->tx_desc_base[sc->cur_tx].status & ENET_TXD_WRAP)
871 		sc->cur_tx = 0;
872 	else
873 		sc->cur_tx++;
874 
875 	return 0;
876 }
877 
878 struct mbuf *
879 fec_newbuf(void)
880 {
881 	struct mbuf *m;
882 
883 	MGETHDR(m, M_DONTWAIT, MT_DATA);
884 	if (m == NULL)
885 		return (NULL);
886 
887 	MCLGET(m, M_DONTWAIT);
888 	if (!(m->m_flags & M_EXT)) {
889 		m_freem(m);
890 		return (NULL);
891 	}
892 
893 	return (m);
894 }
895 
896 /*
897  * Established by attachment driver at interrupt priority IPL_NET.
898  */
899 int
900 fec_intr(void *arg)
901 {
902 	struct fec_softc *sc = arg;
903 	struct ifnet *ifp = &sc->sc_ac.ac_if;
904 	u_int32_t status;
905 
906 	/* Find out which interrupts are pending. */
907 	status = HREAD4(sc, ENET_EIR);
908 
909 	/* Acknowledge the interrupts we are about to handle. */
910 	HWRITE4(sc, ENET_EIR, status);
911 
912 	/*
913 	 * Handle incoming packets.
914 	 */
915 	if (ISSET(status, ENET_EIR_RXF)) {
916 		if (ifp->if_flags & IFF_RUNNING)
917 			fec_recv(sc);
918 	}
919 
920 	/* Try to transmit. */
921 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
922 		fec_start(ifp);
923 
924 	return 1;
925 }
926 
927 void
928 fec_recv(struct fec_softc *sc)
929 {
930 	struct ifnet *ifp = &sc->sc_ac.ac_if;
931 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
932 
933 	bus_dmamap_sync(sc->rbdma.dma_tag, sc->rbdma.dma_map,
934 	    0, sc->rbdma.dma_size,
935 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
936 
937 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
938 	    0, sc->rxdma.dma_size,
939 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
940 
941 	while (!(sc->rx_desc_base[sc->cur_rx].status & ENET_RXD_EMPTY))
942 	{
943 		struct mbuf *m;
944 		m = fec_newbuf();
945 
946 		if (m == NULL) {
947 			ifp->if_ierrors++;
948 			goto done;
949 		}
950 
951 		m->m_pkthdr.len = m->m_len = sc->rx_desc_base[sc->cur_rx].data_length;
952 		m_adj(m, ETHER_ALIGN);
953 
954 		memcpy(mtod(m, char *), sc->rx_buffer_base[sc->cur_rx].data,
955 		    sc->rx_desc_base[sc->cur_rx].data_length);
956 
957 		sc->rx_desc_base[sc->cur_rx].status |= ENET_RXD_EMPTY;
958 		sc->rx_desc_base[sc->cur_rx].data_length = 0;
959 
960 		bus_dmamap_sync(sc->rbdma.dma_tag, sc->rbdma.dma_map,
961 		    ENET_MAX_PKT_SIZE * sc->cur_rx, ENET_MAX_PKT_SIZE,
962 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
963 
964 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
965 		    sizeof(struct fec_buf_desc) * sc->cur_rx,
966 		    sizeof(struct fec_buf_desc),
967 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
968 
969 		if (sc->rx_desc_base[sc->cur_rx].status & ENET_RXD_WRAP)
970 			sc->cur_rx = 0;
971 		else
972 			sc->cur_rx++;
973 
974 		ml_enqueue(&ml, m);
975 	}
976 
977 done:
978 	/* rx descriptors are ready */
979 	HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
980 
981 	if_input(ifp, &ml);
982 }
983 
984 void
985 fec_tick(void *arg)
986 {
987 	struct fec_softc *sc = arg;
988 	int s;
989 
990 	s = splnet();
991 	mii_tick(&sc->sc_mii);
992 	splx(s);
993 
994 	timeout_add_sec(&sc->sc_tick, 1);
995 }
996 
997 /*
998  * MII
999  * Interrupts need ENET_ECR_ETHEREN to be set,
1000  * so we just read the interrupt status registers.
1001  */
1002 int
1003 fec_miibus_readreg(struct device *dev, int phy, int reg)
1004 {
1005 	int r = 0;
1006 	struct fec_softc *sc = (struct fec_softc *)dev;
1007 
1008 	HSET4(sc, ENET_EIR, ENET_EIR_MII);
1009 
1010 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR,
1011 	    ENET_MMFR_ST | ENET_MMFR_OP_RD | ENET_MMFR_TA |
1012 	    phy << ENET_MMFR_PA_SHIFT | reg << ENET_MMFR_RA_SHIFT);
1013 
1014 	while(!(HREAD4(sc, ENET_EIR) & ENET_EIR_MII));
1015 
1016 	r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR);
1017 
1018 	return (r & 0xffff);
1019 }
1020 
1021 void
1022 fec_miibus_writereg(struct device *dev, int phy, int reg, int val)
1023 {
1024 	struct fec_softc *sc = (struct fec_softc *)dev;
1025 
1026 	HSET4(sc, ENET_EIR, ENET_EIR_MII);
1027 
1028 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR,
1029 	    ENET_MMFR_ST | ENET_MMFR_OP_WR | ENET_MMFR_TA |
1030 	    phy << ENET_MMFR_PA_SHIFT | reg << ENET_MMFR_RA_SHIFT |
1031 	    (val & 0xffff));
1032 
1033 	while(!(HREAD4(sc, ENET_EIR) & ENET_EIR_MII));
1034 
1035 	return;
1036 }
1037 
1038 void
1039 fec_miibus_statchg(struct device *dev)
1040 {
1041 	struct fec_softc *sc = (struct fec_softc *)dev;
1042 	uint32_t ecr, rcr;
1043 
1044 	ecr = HREAD4(sc, ENET_ECR) & ~ENET_ECR_SPEED;
1045 	rcr = HREAD4(sc, ENET_RCR) & ~ENET_RCR_RMII_10T;
1046 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1047 	case IFM_1000_T:  /* Gigabit */
1048 		ecr |= ENET_ECR_SPEED;
1049 		break;
1050 	case IFM_100_TX:
1051 		break;
1052 	case IFM_10_T:
1053 		rcr |= ENET_RCR_RMII_10T;
1054 		break;
1055 	}
1056 	HWRITE4(sc, ENET_ECR, ecr);
1057 	HWRITE4(sc, ENET_RCR, rcr);
1058 
1059 	return;
1060 }
1061 
1062 int
1063 fec_ifmedia_upd(struct ifnet *ifp)
1064 {
1065 	struct fec_softc *sc = ifp->if_softc;
1066 	struct mii_data *mii = &sc->sc_mii;
1067 	int err;
1068 	if (mii->mii_instance) {
1069 		struct mii_softc *miisc;
1070 
1071 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1072 			mii_phy_reset(miisc);
1073 	}
1074 	err = mii_mediachg(mii);
1075 	return (err);
1076 }
1077 
1078 void
1079 fec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1080 {
1081 	struct fec_softc *sc = ifp->if_softc;
1082 	struct mii_data *mii = &sc->sc_mii;
1083 
1084 	mii_pollstat(mii);
1085 
1086 	ifmr->ifm_active = mii->mii_media_active;
1087 	ifmr->ifm_status = mii->mii_media_status;
1088 }
1089 
1090 /*
1091  * Manage DMA'able memory.
1092  */
1093 int
1094 fec_dma_malloc(struct fec_softc *sc, bus_size_t size,
1095     struct fec_dma_alloc *dma)
1096 {
1097 	int r;
1098 
1099 	dma->dma_tag = sc->sc_dma_tag;
1100 	r = bus_dmamem_alloc(dma->dma_tag, size, ENET_ALIGNMENT, 0, &dma->dma_seg,
1101 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1102 	if (r != 0) {
1103 		printf("%s: fec_dma_malloc: bus_dmammem_alloc failed; "
1104 			"size %lu, error %d\n", sc->sc_dev.dv_xname,
1105 			(unsigned long)size, r);
1106 		goto fail_0;
1107 	}
1108 
1109 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1110 	    &dma->dma_vaddr, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1111 	if (r != 0) {
1112 		printf("%s: fec_dma_malloc: bus_dmammem_map failed; "
1113 			"size %lu, error %d\n", sc->sc_dev.dv_xname,
1114 			(unsigned long)size, r);
1115 		goto fail_1;
1116 	}
1117 
1118 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1119 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1120 	if (r != 0) {
1121 		printf("%s: fec_dma_malloc: bus_dmamap_create failed; "
1122 			"error %u\n", sc->sc_dev.dv_xname, r);
1123 		goto fail_2;
1124 	}
1125 
1126 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map,
1127 			    dma->dma_vaddr, size, NULL,
1128 			    BUS_DMA_NOWAIT);
1129 	if (r != 0) {
1130 		printf("%s: fec_dma_malloc: bus_dmamap_load failed; "
1131 			"error %u\n", sc->sc_dev.dv_xname, r);
1132 		goto fail_3;
1133 	}
1134 
1135 	dma->dma_size = size;
1136 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1137 	return (0);
1138 
1139 fail_3:
1140 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1141 fail_2:
1142 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1143 fail_1:
1144 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1145 fail_0:
1146 	dma->dma_map = NULL;
1147 	dma->dma_tag = NULL;
1148 
1149 	return (r);
1150 }
1151 
1152 void
1153 fec_dma_free(struct fec_softc *sc, struct fec_dma_alloc *dma)
1154 {
1155 	if (dma->dma_tag == NULL)
1156 		return;
1157 
1158 	if (dma->dma_map != NULL) {
1159 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1160 		    dma->dma_map->dm_mapsize,
1161 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1162 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1163 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1164 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1165 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1166 	}
1167 	dma->dma_tag = NULL;
1168 }
1169