xref: /openbsd-src/sys/dev/fdt/if_fec.c (revision d1df930ffab53da22f3324c32bed7ac5709915e6)
1 /* $OpenBSD: if_fec.c,v 1.6 2018/08/06 10:52:30 patrick Exp $ */
2 /*
3  * Copyright (c) 2012-2013 Patrick Wildt <patrick@blueri.se>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/sockio.h>
21 #include <sys/queue.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/evcount.h>
25 #include <sys/socket.h>
26 #include <sys/timeout.h>
27 #include <sys/mbuf.h>
28 #include <machine/intr.h>
29 #include <machine/bus.h>
30 #include <machine/fdt.h>
31 
32 #include "bpfilter.h"
33 
34 #include <net/if.h>
35 #include <net/if_media.h>
36 #if NBPFILTER > 0
37 #include <net/bpf.h>
38 #endif
39 
40 #include <netinet/in.h>
41 #include <netinet/if_ether.h>
42 
43 #include <dev/mii/mii.h>
44 #include <dev/mii/miivar.h>
45 #include <dev/mii/miidevs.h>
46 
47 #include <dev/ofw/openfirm.h>
48 #include <dev/ofw/ofw_clock.h>
49 #include <dev/ofw/ofw_gpio.h>
50 #include <dev/ofw/ofw_pinctrl.h>
51 #include <dev/ofw/fdt.h>
52 
53 /* configuration registers */
54 #define ENET_EIR		0x004
55 #define ENET_EIMR		0x008
56 #define ENET_RDAR		0x010
57 #define ENET_TDAR		0x014
58 #define ENET_ECR		0x024
59 #define ENET_MMFR		0x040
60 #define ENET_MSCR		0x044
61 #define ENET_MIBC		0x064
62 #define ENET_RCR		0x084
63 #define ENET_TCR		0x0C4
64 #define ENET_PALR		0x0E4
65 #define ENET_PAUR		0x0E8
66 #define ENET_OPD		0x0EC
67 #define ENET_IAUR		0x118
68 #define ENET_IALR		0x11C
69 #define ENET_GAUR		0x120
70 #define ENET_GALR		0x124
71 #define ENET_TFWR		0x144
72 #define ENET_RDSR		0x180
73 #define ENET_TDSR		0x184
74 #define ENET_MRBR		0x188
75 #define ENET_RSFL		0x190
76 #define ENET_RSEM		0x194
77 #define ENET_RAEM		0x198
78 #define ENET_RAFL		0x19C
79 #define ENET_TSEM		0x1A0
80 #define ENET_TAEM		0x1A4
81 #define ENET_TAFL		0x1A8
82 #define ENET_TIPG		0x1AC
83 #define ENET_FTRL		0x1B0
84 #define ENET_TACC		0x1C0
85 #define ENET_RACC		0x1C4
86 
87 #define ENET_RDAR_RDAR		(1 << 24)
88 #define ENET_TDAR_TDAR		(1 << 24)
89 #define ENET_ECR_RESET		(1 << 0)
90 #define ENET_ECR_ETHEREN	(1 << 1)
91 #define ENET_ECR_EN1588		(1 << 4)
92 #define ENET_ECR_SPEED		(1 << 5)
93 #define ENET_ECR_DBSWP		(1 << 8)
94 #define ENET_MMFR_TA		(2 << 16)
95 #define ENET_MMFR_RA_SHIFT	18
96 #define ENET_MMFR_PA_SHIFT	23
97 #define ENET_MMFR_OP_WR		(1 << 28)
98 #define ENET_MMFR_OP_RD		(2 << 28)
99 #define ENET_MMFR_ST		(1 << 30)
100 #define ENET_RCR_MII_MODE	(1 << 2)
101 #define ENET_RCR_PROM		(1 << 3)
102 #define ENET_RCR_FCE		(1 << 5)
103 #define ENET_RCR_RGMII_MODE	(1 << 6)
104 #define ENET_RCR_RMII_10T	(1 << 9)
105 #define ENET_RCR_MAX_FL(x)	(((x) & 0x3fff) << 16)
106 #define ENET_TCR_FDEN		(1 << 2)
107 #define ENET_EIR_MII		(1 << 23)
108 #define ENET_EIR_RXF		(1 << 25)
109 #define ENET_EIR_TXF		(1 << 27)
110 #define ENET_TFWR_STRFWD	(1 << 8)
111 
112 /* statistics counters */
113 
114 /* 1588 control */
115 #define ENET_ATCR		0x400
116 #define ENET_ATVR		0x404
117 #define ENET_ATOFF		0x408
118 #define ENET_ATPER		0x40C
119 #define ENET_ATCOR		0x410
120 #define ENET_ATINC		0x414
121 #define ENET_ATSTMP		0x418
122 
123 /* capture / compare block */
124 #define ENET_TGSR		0x604
125 #define ENET_TCSR0		0x608
126 #define ENET_TCCR0		0x60C
127 #define ENET_TCSR1		0x610
128 #define ENET_TCCR1		0x614
129 #define ENET_TCSR2		0x618
130 #define ENET_TCCR2		0x61C
131 #define ENET_TCSR3		0x620
132 #define ENET_TCCR3		0x624
133 
134 #define ENET_MII_CLK		2500000
135 #define ENET_ALIGNMENT		16
136 
137 #define HREAD4(sc, reg)							\
138 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
139 #define HWRITE4(sc, reg, val)						\
140 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
141 #define HSET4(sc, reg, bits)						\
142 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
143 #define HCLR4(sc, reg, bits)						\
144 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
145 
146 /* what should we use? */
147 #define ENET_MAX_TXD		32
148 #define ENET_MAX_RXD		32
149 
150 #define ENET_MAX_PKT_SIZE	1536
151 
152 #define ENET_ROUNDUP(size, unit) (((size) + (unit) - 1) & ~((unit) - 1))
153 
154 /* buffer descriptor status bits */
155 #define ENET_RXD_EMPTY		(1 << 15)
156 #define ENET_RXD_WRAP		(1 << 13)
157 #define ENET_RXD_LAST		(1 << 11)
158 #define ENET_RXD_MISS		(1 << 8)
159 #define ENET_RXD_BC		(1 << 7)
160 #define ENET_RXD_MC		(1 << 6)
161 #define ENET_RXD_LG		(1 << 5)
162 #define ENET_RXD_NO		(1 << 4)
163 #define ENET_RXD_CR		(1 << 2)
164 #define ENET_RXD_OV		(1 << 1)
165 #define ENET_RXD_TR		(1 << 0)
166 
167 #define ENET_TXD_READY		(1 << 15)
168 #define ENET_TXD_WRAP		(1 << 13)
169 #define ENET_TXD_LAST		(1 << 11)
170 #define ENET_TXD_TC		(1 << 10)
171 #define ENET_TXD_ABC		(1 << 9)
172 #define ENET_TXD_STATUS_MASK	0x3ff
173 
174 #ifdef ENET_ENHANCED_BD
175 /* enhanced */
176 #define ENET_RXD_INT		(1 << 23)
177 
178 #define ENET_TXD_INT		(1 << 30)
179 #endif
180 
181 /*
182  * Bus dma allocation structure used by
183  * fec_dma_malloc and fec_dma_free.
184  */
185 struct fec_dma_alloc {
186 	bus_addr_t		dma_paddr;
187 	caddr_t			dma_vaddr;
188 	bus_dma_tag_t		dma_tag;
189 	bus_dmamap_t		dma_map;
190 	bus_dma_segment_t	dma_seg;
191 	bus_size_t		dma_size;
192 	int			dma_nseg;
193 };
194 
195 struct fec_buf_desc {
196 	uint16_t data_length;		/* payload's length in bytes */
197 	uint16_t status;		/* BD's status (see datasheet) */
198 	uint32_t data_pointer;		/* payload's buffer address */
199 #ifdef ENET_ENHANCED_BD
200 	uint32_t enhanced_status;	/* enhanced status with IEEE 1588 */
201 	uint32_t reserved0;		/* reserved */
202 	uint32_t update_done;		/* buffer descriptor update done */
203 	uint32_t timestamp;		/* IEEE 1588 timestamp */
204 	uint32_t reserved1[2];		/* reserved */
205 #endif
206 };
207 
208 struct fec_buffer {
209 	uint8_t data[ENET_MAX_PKT_SIZE];
210 };
211 
212 struct fec_softc {
213 	struct device		sc_dev;
214 	struct arpcom		sc_ac;
215 	struct mii_data		sc_mii;
216 	int			sc_node;
217 	bus_space_tag_t		sc_iot;
218 	bus_space_handle_t	sc_ioh;
219 	void			*sc_ih[3]; /* Interrupt handler */
220 	bus_dma_tag_t		sc_dma_tag;
221 	struct fec_dma_alloc	txdma;		/* bus_dma glue for tx desc */
222 	struct fec_buf_desc	*tx_desc_base;
223 	struct fec_dma_alloc	rxdma;		/* bus_dma glue for rx desc */
224 	struct fec_buf_desc	*rx_desc_base;
225 	struct fec_dma_alloc	tbdma;		/* bus_dma glue for packets */
226 	struct fec_buffer	*tx_buffer_base;
227 	struct fec_dma_alloc	rbdma;		/* bus_dma glue for packets */
228 	struct fec_buffer	*rx_buffer_base;
229 	int			cur_tx;
230 	int			cur_rx;
231 	struct timeout		sc_tick;
232 	uint32_t		sc_phy_speed;
233 };
234 
235 struct fec_softc *fec_sc;
236 
237 int fec_match(struct device *, void *, void *);
238 void fec_attach(struct device *, struct device *, void *);
239 void fec_phy_init(struct fec_softc *, struct mii_softc *);
240 int fec_ioctl(struct ifnet *, u_long, caddr_t);
241 void fec_start(struct ifnet *);
242 int fec_encap(struct fec_softc *, struct mbuf *);
243 void fec_init_txd(struct fec_softc *);
244 void fec_init_rxd(struct fec_softc *);
245 void fec_init(struct fec_softc *);
246 void fec_stop(struct fec_softc *);
247 void fec_iff(struct fec_softc *);
248 struct mbuf * fec_newbuf(void);
249 int fec_intr(void *);
250 void fec_recv(struct fec_softc *);
251 void fec_tick(void *);
252 int fec_miibus_readreg(struct device *, int, int);
253 void fec_miibus_writereg(struct device *, int, int, int);
254 void fec_miibus_statchg(struct device *);
255 int fec_ifmedia_upd(struct ifnet *);
256 void fec_ifmedia_sts(struct ifnet *, struct ifmediareq *);
257 int fec_dma_malloc(struct fec_softc *, bus_size_t, struct fec_dma_alloc *);
258 void fec_dma_free(struct fec_softc *, struct fec_dma_alloc *);
259 
260 struct cfattach fec_ca = {
261 	sizeof (struct fec_softc), fec_match, fec_attach
262 };
263 
264 struct cfdriver fec_cd = {
265 	NULL, "fec", DV_IFNET
266 };
267 
268 int
269 fec_match(struct device *parent, void *match, void *aux)
270 {
271 	struct fdt_attach_args *faa = aux;
272 
273 	return (OF_is_compatible(faa->fa_node, "fsl,imx6q-fec") ||
274 	    OF_is_compatible(faa->fa_node, "fsl,imx6sx-fec") ||
275 	    OF_is_compatible(faa->fa_node, "fsl,imx8mq-fec"));
276 }
277 
278 void
279 fec_attach(struct device *parent, struct device *self, void *aux)
280 {
281 	struct fec_softc *sc = (struct fec_softc *) self;
282 	struct fdt_attach_args *faa = aux;
283 	struct mii_data *mii;
284 	struct mii_softc *child;
285 	struct ifnet *ifp;
286 	int tsize, rsize, tbsize, rbsize, s;
287 	uint32_t phy_reset_gpio[3];
288 	uint32_t phy_reset_duration;
289 
290 	if (faa->fa_nreg < 1)
291 		return;
292 
293 	sc->sc_node = faa->fa_node;
294 	sc->sc_iot = faa->fa_iot;
295 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
296 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
297 		panic("fec_attach: bus_space_map failed!");
298 
299 	sc->sc_dma_tag = faa->fa_dmat;
300 
301 	pinctrl_byname(faa->fa_node, "default");
302 
303 	/* power it up */
304 	clock_enable_all(faa->fa_node);
305 
306 	/* reset PHY */
307 	if (OF_getpropintarray(faa->fa_node, "phy-reset-gpios", phy_reset_gpio,
308 	    sizeof(phy_reset_gpio)) == sizeof(phy_reset_gpio)) {
309 		phy_reset_duration = OF_getpropint(faa->fa_node,
310 		    "phy-reset-duration", 1);
311 		if (phy_reset_duration > 1000)
312 			phy_reset_duration = 1;
313 
314 		/*
315 		 * The Linux people really screwed the pooch here.
316 		 * The Linux kernel always treats the gpio as
317 		 * active-low, even if it is marked as active-high in
318 		 * the device tree.  As a result the device tree for
319 		 * many boards incorrectly marks the gpio as
320 		 * active-high.
321 		 */
322 		phy_reset_gpio[2] = GPIO_ACTIVE_LOW;
323 		gpio_controller_config_pin(phy_reset_gpio, GPIO_CONFIG_OUTPUT);
324 
325 		/*
326 		 * On some Cubox-i machines we need to hold the PHY in
327 		 * reset a little bit longer than specified.
328 		 */
329 		gpio_controller_set_pin(phy_reset_gpio, 1);
330 		delay((phy_reset_duration + 1) * 1000);
331 		gpio_controller_set_pin(phy_reset_gpio, 0);
332 		delay(1000);
333 	}
334 	printf("\n");
335 
336 	/* Figure out the hardware address. Must happen before reset. */
337 	OF_getprop(faa->fa_node, "local-mac-address", sc->sc_ac.ac_enaddr,
338 	    sizeof(sc->sc_ac.ac_enaddr));
339 
340 	/* reset the controller */
341 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
342 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
343 		continue;
344 
345 	HWRITE4(sc, ENET_EIMR, 0);
346 	HWRITE4(sc, ENET_EIR, 0xffffffff);
347 
348 	sc->sc_ih[0] = fdt_intr_establish_idx(faa->fa_node, 0, IPL_NET,
349 	    fec_intr, sc, sc->sc_dev.dv_xname);
350 	sc->sc_ih[1] = fdt_intr_establish_idx(faa->fa_node, 1, IPL_NET,
351 	    fec_intr, sc, sc->sc_dev.dv_xname);
352 	sc->sc_ih[2] = fdt_intr_establish_idx(faa->fa_node, 2, IPL_NET,
353 	    fec_intr, sc, sc->sc_dev.dv_xname);
354 
355 	tsize = ENET_MAX_TXD * sizeof(struct fec_buf_desc);
356 	tsize = ENET_ROUNDUP(tsize, PAGE_SIZE);
357 
358 	if (fec_dma_malloc(sc, tsize, &sc->txdma)) {
359 		printf("%s: Unable to allocate tx_desc memory\n",
360 		    sc->sc_dev.dv_xname);
361 		goto bad;
362 	}
363 	sc->tx_desc_base = (struct fec_buf_desc *)sc->txdma.dma_vaddr;
364 
365 	rsize = ENET_MAX_RXD * sizeof(struct fec_buf_desc);
366 	rsize = ENET_ROUNDUP(rsize, PAGE_SIZE);
367 
368 	if (fec_dma_malloc(sc, rsize, &sc->rxdma)) {
369 		printf("%s: Unable to allocate rx_desc memory\n",
370 		    sc->sc_dev.dv_xname);
371 		goto txdma;
372 	}
373 	sc->rx_desc_base = (struct fec_buf_desc *)sc->rxdma.dma_vaddr;
374 
375 	tbsize = ENET_MAX_TXD * ENET_MAX_PKT_SIZE;
376 	tbsize = ENET_ROUNDUP(tbsize, PAGE_SIZE);
377 
378 	if (fec_dma_malloc(sc, tbsize, &sc->tbdma)) {
379 		printf("%s: Unable to allocate tx_buffer memory\n",
380 		    sc->sc_dev.dv_xname);
381 		goto rxdma;
382 	}
383 	sc->tx_buffer_base = (struct fec_buffer *)sc->tbdma.dma_vaddr;
384 
385 	rbsize = ENET_MAX_RXD * ENET_MAX_PKT_SIZE;
386 	rbsize = ENET_ROUNDUP(rbsize, PAGE_SIZE);
387 
388 	if (fec_dma_malloc(sc, rbsize, &sc->rbdma)) {
389 		printf("%s: Unable to allocate rx_buffer memory\n",
390 		    sc->sc_dev.dv_xname);
391 		goto tbdma;
392 	}
393 	sc->rx_buffer_base = (struct fec_buffer *)sc->rbdma.dma_vaddr;
394 
395 	sc->cur_tx = 0;
396 	sc->cur_rx = 0;
397 
398 	s = splnet();
399 
400 	ifp = &sc->sc_ac.ac_if;
401 	ifp->if_softc = sc;
402 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
403 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
404 	ifp->if_ioctl = fec_ioctl;
405 	ifp->if_start = fec_start;
406 	ifp->if_capabilities = IFCAP_VLAN_MTU;
407 
408 	printf("%s: address %s\n", sc->sc_dev.dv_xname,
409 	    ether_sprintf(sc->sc_ac.ac_enaddr));
410 
411 	/*
412 	 * Initialize the MII clock.  The formula is:
413 	 *
414 	 * ENET_MII_CLK = ref_freq / ((phy_speed + 1) x 2)
415 	 * phy_speed = (((ref_freq / ENET_MII_CLK) / 2) - 1)
416 	 */
417 	sc->sc_phy_speed = clock_get_frequency(sc->sc_node, "ipg");
418 	sc->sc_phy_speed = (sc->sc_phy_speed + (ENET_MII_CLK - 1)) / ENET_MII_CLK;
419 	sc->sc_phy_speed = (sc->sc_phy_speed / 2) - 1;
420 	HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
421 
422 	/* Initialize MII/media info. */
423 	mii = &sc->sc_mii;
424 	mii->mii_ifp = ifp;
425 	mii->mii_readreg = fec_miibus_readreg;
426 	mii->mii_writereg = fec_miibus_writereg;
427 	mii->mii_statchg = fec_miibus_statchg;
428 
429 	ifmedia_init(&mii->mii_media, 0, fec_ifmedia_upd, fec_ifmedia_sts);
430 	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
431 
432 	child = LIST_FIRST(&mii->mii_phys);
433 	if (child)
434 		fec_phy_init(sc, child);
435 
436 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
437 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
438 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
439 	} else
440 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
441 
442 	if_attach(ifp);
443 	ether_ifattach(ifp);
444 	splx(s);
445 
446 	timeout_set(&sc->sc_tick, fec_tick, sc);
447 
448 	fec_sc = sc;
449 	return;
450 
451 tbdma:
452 	fec_dma_free(sc, &sc->tbdma);
453 rxdma:
454 	fec_dma_free(sc, &sc->rxdma);
455 txdma:
456 	fec_dma_free(sc, &sc->txdma);
457 bad:
458 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
459 }
460 
461 void
462 fec_phy_init(struct fec_softc *sc, struct mii_softc *child)
463 {
464 	struct device *dev = (struct device *)sc;
465 	int phy = child->mii_phy;
466 	uint32_t reg;
467 
468 	if (child->mii_oui == MII_OUI_ATHEROS &&
469 	    child->mii_model == MII_MODEL_ATHEROS_AR8035) {
470 		/* disable SmartEEE */
471 		fec_miibus_writereg(dev, phy, 0x0d, 0x0003);
472 		fec_miibus_writereg(dev, phy, 0x0e, 0x805d);
473 		fec_miibus_writereg(dev, phy, 0x0d, 0x4003);
474 		reg = fec_miibus_readreg(dev, phy, 0x0e);
475 		fec_miibus_writereg(dev, phy, 0x0e, reg & ~0x0100);
476 
477 		/* enable 125MHz clk output */
478 		fec_miibus_writereg(dev, phy, 0x0d, 0x0007);
479 		fec_miibus_writereg(dev, phy, 0x0e, 0x8016);
480 		fec_miibus_writereg(dev, phy, 0x0d, 0x4007);
481 
482 		reg = fec_miibus_readreg(dev, phy, 0x0e) & 0xffe3;
483 		fec_miibus_writereg(dev, phy, 0x0e, reg | 0x18);
484 
485 		/* tx clock delay */
486 		fec_miibus_writereg(dev, phy, 0x1d, 0x0005);
487 		reg = fec_miibus_readreg(dev, phy, 0x1e);
488 		fec_miibus_writereg(dev, phy, 0x1e, reg | 0x0100);
489 
490 		PHY_RESET(child);
491 	}
492 
493 	if (child->mii_oui == MII_OUI_MICREL &&
494 	    child->mii_model == MII_MODEL_MICREL_KSZ9021) {
495 		uint32_t rxc, rxdv, txc, txen;
496 		uint32_t rxd0, rxd1, rxd2, rxd3;
497 		uint32_t txd0, txd1, txd2, txd3;
498 		uint32_t val;
499 
500 		rxc = OF_getpropint(sc->sc_node, "rxc-skew-ps", 1400) / 200;
501 		rxdv = OF_getpropint(sc->sc_node, "rxdv-skew-ps", 1400) / 200;
502 		txc = OF_getpropint(sc->sc_node, "txc-skew-ps", 1400) / 200;
503 		txen = OF_getpropint(sc->sc_node, "txen-skew-ps", 1400) / 200;
504 		rxd0 = OF_getpropint(sc->sc_node, "rxd0-skew-ps", 1400) / 200;
505 		rxd1 = OF_getpropint(sc->sc_node, "rxd1-skew-ps", 1400) / 200;
506 		rxd2 = OF_getpropint(sc->sc_node, "rxd2-skew-ps", 1400) / 200;
507 		rxd3 = OF_getpropint(sc->sc_node, "rxd3-skew-ps", 1400) / 200;
508 		txd0 = OF_getpropint(sc->sc_node, "txd0-skew-ps", 1400) / 200;
509 		txd1 = OF_getpropint(sc->sc_node, "txd1-skew-ps", 1400) / 200;
510 		txd2 = OF_getpropint(sc->sc_node, "txd2-skew-ps", 1400) / 200;
511 		txd3 = OF_getpropint(sc->sc_node, "txd3-skew-ps", 1400) / 200;
512 
513 		val = ((rxc & 0xf) << 12) | ((rxdv & 0xf) << 8) |
514 		    ((txc & 0xf) << 4) | ((txen & 0xf) << 0);
515 		fec_miibus_writereg(dev, phy, 0x0b, 0x8104);
516 		fec_miibus_writereg(dev, phy, 0x0c, val);
517 
518 		val = ((rxd3 & 0xf) << 12) | ((rxd2 & 0xf) << 8) |
519 		    ((rxd1 & 0xf) << 4) | ((rxd0 & 0xf) << 0);
520 		fec_miibus_writereg(dev, phy, 0x0b, 0x8105);
521 		fec_miibus_writereg(dev, phy, 0x0c, val);
522 
523 		val = ((txd3 & 0xf) << 12) | ((txd2 & 0xf) << 8) |
524 		    ((txd1 & 0xf) << 4) | ((txd0 & 0xf) << 0);
525 		fec_miibus_writereg(dev, phy, 0x0b, 0x8106);
526 		fec_miibus_writereg(dev, phy, 0x0c, val);
527 	}
528 
529 	if (child->mii_oui == MII_OUI_MICREL &&
530 	    child->mii_model == MII_MODEL_MICREL_KSZ9031) {
531 		uint32_t rxc, rxdv, txc, txen;
532 		uint32_t rxd0, rxd1, rxd2, rxd3;
533 		uint32_t txd0, txd1, txd2, txd3;
534 		uint32_t val;
535 
536 		rxc = OF_getpropint(sc->sc_node, "rxc-skew-ps", 900) / 60;
537 		rxdv = OF_getpropint(sc->sc_node, "rxdv-skew-ps", 420) / 60;
538 		txc = OF_getpropint(sc->sc_node, "txc-skew-ps", 900) / 60;
539 		txen = OF_getpropint(sc->sc_node, "txen-skew-ps", 420) / 60;
540 		rxd0 = OF_getpropint(sc->sc_node, "rxd0-skew-ps", 420) / 60;
541 		rxd1 = OF_getpropint(sc->sc_node, "rxd1-skew-ps", 420) / 60;
542 		rxd2 = OF_getpropint(sc->sc_node, "rxd2-skew-ps", 420) / 60;
543 		rxd3 = OF_getpropint(sc->sc_node, "rxd3-skew-ps", 420) / 60;
544 		txd0 = OF_getpropint(sc->sc_node, "txd0-skew-ps", 420) / 60;
545 		txd1 = OF_getpropint(sc->sc_node, "txd1-skew-ps", 420) / 60;
546 		txd2 = OF_getpropint(sc->sc_node, "txd2-skew-ps", 420) / 60;
547 		txd3 = OF_getpropint(sc->sc_node, "txd3-skew-ps", 420) / 60;
548 
549 		val = ((rxdv & 0xf) << 4) || ((txen & 0xf) << 0);
550 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
551 		fec_miibus_writereg(dev, phy, 0x0e, 0x0004);
552 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
553 		fec_miibus_writereg(dev, phy, 0x0e, val);
554 
555 		val = ((rxd3 & 0xf) << 12) | ((rxd2 & 0xf) << 8) |
556 		    ((rxd1 & 0xf) << 4) | ((rxd0 & 0xf) << 0);
557 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
558 		fec_miibus_writereg(dev, phy, 0x0e, 0x0005);
559 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
560 		fec_miibus_writereg(dev, phy, 0x0e, val);
561 
562 		val = ((txd3 & 0xf) << 12) | ((txd2 & 0xf) << 8) |
563 		    ((txd1 & 0xf) << 4) | ((txd0 & 0xf) << 0);
564 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
565 		fec_miibus_writereg(dev, phy, 0x0e, 0x0006);
566 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
567 		fec_miibus_writereg(dev, phy, 0x0e, val);
568 
569 		val = ((txc & 0x1f) << 5) || ((rxc & 0x1f) << 0);
570 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
571 		fec_miibus_writereg(dev, phy, 0x0e, 0x0008);
572 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
573 		fec_miibus_writereg(dev, phy, 0x0e, val);
574 	}
575 }
576 
577 void
578 fec_init_rxd(struct fec_softc *sc)
579 {
580 	int i;
581 
582 	memset(sc->rx_desc_base, 0, ENET_MAX_RXD * sizeof(struct fec_buf_desc));
583 
584 	for (i = 0; i < ENET_MAX_RXD; i++)
585 	{
586 		sc->rx_desc_base[i].status = ENET_RXD_EMPTY;
587 		sc->rx_desc_base[i].data_pointer = sc->rbdma.dma_paddr + i * ENET_MAX_PKT_SIZE;
588 #ifdef ENET_ENHANCED_BD
589 		sc->rx_desc_base[i].enhanced_status = ENET_RXD_INT;
590 #endif
591 	}
592 
593 	sc->rx_desc_base[i - 1].status |= ENET_RXD_WRAP;
594 }
595 
596 void
597 fec_init_txd(struct fec_softc *sc)
598 {
599 	int i;
600 
601 	memset(sc->tx_desc_base, 0, ENET_MAX_TXD * sizeof(struct fec_buf_desc));
602 
603 	for (i = 0; i < ENET_MAX_TXD; i++)
604 	{
605 		sc->tx_desc_base[i].data_pointer = sc->tbdma.dma_paddr + i * ENET_MAX_PKT_SIZE;
606 	}
607 
608 	sc->tx_desc_base[i - 1].status |= ENET_TXD_WRAP;
609 }
610 
611 void
612 fec_init(struct fec_softc *sc)
613 {
614 	struct ifnet *ifp = &sc->sc_ac.ac_if;
615 	int speed = 0;
616 
617 	/* reset the controller */
618 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
619 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
620 		continue;
621 
622 	/* set hw address */
623 	HWRITE4(sc, ENET_PALR,
624 	    (sc->sc_ac.ac_enaddr[0] << 24) |
625 	    (sc->sc_ac.ac_enaddr[1] << 16) |
626 	    (sc->sc_ac.ac_enaddr[2] << 8) |
627 	     sc->sc_ac.ac_enaddr[3]);
628 	HWRITE4(sc, ENET_PAUR,
629 	    (sc->sc_ac.ac_enaddr[4] << 24) |
630 	    (sc->sc_ac.ac_enaddr[5] << 16));
631 
632 	/* clear outstanding interrupts */
633 	HWRITE4(sc, ENET_EIR, 0xffffffff);
634 
635 	/* set max receive buffer size, 3-0 bits always zero for alignment */
636 	HWRITE4(sc, ENET_MRBR, ENET_MAX_PKT_SIZE);
637 
638 	/* set descriptor */
639 	HWRITE4(sc, ENET_TDSR, sc->txdma.dma_paddr);
640 	HWRITE4(sc, ENET_RDSR, sc->rxdma.dma_paddr);
641 
642 	/* init descriptor */
643 	fec_init_txd(sc);
644 	fec_init_rxd(sc);
645 
646 	/* set it to full-duplex */
647 	HWRITE4(sc, ENET_TCR, ENET_TCR_FDEN);
648 
649 	/*
650 	 * Set max frame length to 1518 or 1522 with VLANs,
651 	 * pause frames and promisc mode.
652 	 * XXX: RGMII mode - phy dependant
653 	 */
654 	HWRITE4(sc, ENET_RCR,
655 	    ENET_RCR_MAX_FL(1522) | ENET_RCR_RGMII_MODE | ENET_RCR_MII_MODE |
656 	    ENET_RCR_FCE);
657 
658 	HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
659 
660 	/* RX FIFO treshold and pause */
661 	HWRITE4(sc, ENET_RSEM, 0x84);
662 	HWRITE4(sc, ENET_RSFL, 16);
663 	HWRITE4(sc, ENET_RAEM, 8);
664 	HWRITE4(sc, ENET_RAFL, 8);
665 	HWRITE4(sc, ENET_OPD, 0xFFF0);
666 
667 	/* do store and forward, only i.MX6, needs to be set correctly else */
668 	HWRITE4(sc, ENET_TFWR, ENET_TFWR_STRFWD);
669 
670 	/* enable gigabit-ethernet and set it to support little-endian */
671 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
672 	case IFM_1000_T:  /* Gigabit */
673 		speed |= ENET_ECR_SPEED;
674 		break;
675 	default:
676 		speed &= ~ENET_ECR_SPEED;
677 	}
678 	HWRITE4(sc, ENET_ECR, ENET_ECR_ETHEREN | speed | ENET_ECR_DBSWP);
679 
680 #ifdef ENET_ENHANCED_BD
681 	HSET4(sc, ENET_ECR, ENET_ECR_EN1588);
682 #endif
683 
684 	/* rx descriptors are ready */
685 	HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
686 
687 	/* program promiscuous mode and multicast filters */
688 	fec_iff(sc);
689 
690 	timeout_add_sec(&sc->sc_tick, 1);
691 
692 	/* Indicate we are up and running. */
693 	ifp->if_flags |= IFF_RUNNING;
694 	ifq_clr_oactive(&ifp->if_snd);
695 
696 	/* enable interrupts for tx/rx */
697 	HWRITE4(sc, ENET_EIMR, ENET_EIR_TXF | ENET_EIR_RXF);
698 
699 	fec_start(ifp);
700 }
701 
702 void
703 fec_stop(struct fec_softc *sc)
704 {
705 	struct ifnet *ifp = &sc->sc_ac.ac_if;
706 
707 	/*
708 	 * Mark the interface down and cancel the watchdog timer.
709 	 */
710 	ifp->if_flags &= ~IFF_RUNNING;
711 	ifp->if_timer = 0;
712 	ifq_clr_oactive(&ifp->if_snd);
713 
714 	timeout_del(&sc->sc_tick);
715 
716 	/* reset the controller */
717 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
718 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
719 		continue;
720 
721 	HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
722 }
723 
724 void
725 fec_iff(struct fec_softc *sc)
726 {
727 	struct arpcom *ac = &sc->sc_ac;
728 	struct ifnet *ifp = &sc->sc_ac.ac_if;
729 	struct ether_multi *enm;
730 	struct ether_multistep step;
731 	uint64_t ghash = 0, ihash = 0;
732 	uint32_t h;
733 
734 	ifp->if_flags &= ~IFF_ALLMULTI;
735 
736 	if (ifp->if_flags & IFF_PROMISC) {
737 		ifp->if_flags |= IFF_ALLMULTI;
738 		ihash = 0xffffffffffffffffLLU;
739 	} else if (ac->ac_multirangecnt > 0) {
740 		ifp->if_flags |= IFF_ALLMULTI;
741 		ghash = 0xffffffffffffffffLLU;
742 	} else {
743 		ETHER_FIRST_MULTI(step, ac, enm);
744 		while (enm != NULL) {
745 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
746 
747 			ghash |= 1LLU << (((uint8_t *)&h)[3] >> 2);
748 
749 			ETHER_NEXT_MULTI(step, enm);
750 		}
751 	}
752 
753 	HWRITE4(sc, ENET_GAUR, (uint32_t)(ghash >> 32));
754 	HWRITE4(sc, ENET_GALR, (uint32_t)ghash);
755 
756 	HWRITE4(sc, ENET_IAUR, (uint32_t)(ihash >> 32));
757 	HWRITE4(sc, ENET_IALR, (uint32_t)ihash);
758 }
759 
760 int
761 fec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
762 {
763 	struct fec_softc *sc = ifp->if_softc;
764 	struct ifreq *ifr = (struct ifreq *)data;
765 	int s, error = 0;
766 
767 	s = splnet();
768 
769 	switch (cmd) {
770 	case SIOCSIFADDR:
771 		ifp->if_flags |= IFF_UP;
772 		if (!(ifp->if_flags & IFF_RUNNING))
773 			fec_init(sc);
774 		break;
775 
776 	case SIOCSIFFLAGS:
777 		if (ifp->if_flags & IFF_UP) {
778 			if (ifp->if_flags & IFF_RUNNING)
779 				error = ENETRESET;
780 			else
781 				fec_init(sc);
782 		} else {
783 			if (ifp->if_flags & IFF_RUNNING)
784 				fec_stop(sc);
785 		}
786 		break;
787 
788 	case SIOCGIFMEDIA:
789 	case SIOCSIFMEDIA:
790 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
791 		break;
792 
793 	default:
794 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
795 	}
796 
797 	if (error == ENETRESET) {
798 		if (ifp->if_flags & IFF_RUNNING)
799 			fec_iff(sc);
800 		error = 0;
801 	}
802 
803 	splx(s);
804 	return(error);
805 }
806 
807 void
808 fec_start(struct ifnet *ifp)
809 {
810 	struct fec_softc *sc = ifp->if_softc;
811 	struct mbuf *m_head = NULL;
812 
813 	if (ifq_is_oactive(&ifp->if_snd) || !(ifp->if_flags & IFF_RUNNING))
814 		return;
815 
816 	for (;;) {
817 		m_head = ifq_deq_begin(&ifp->if_snd);
818 		if (m_head == NULL)
819 			break;
820 
821 		if (fec_encap(sc, m_head)) {
822 			ifq_deq_rollback(&ifp->if_snd, m_head);
823 			ifq_set_oactive(&ifp->if_snd);
824 			break;
825 		}
826 
827 		ifq_deq_commit(&ifp->if_snd, m_head);
828 
829 #if NBPFILTER > 0
830 		if (ifp->if_bpf)
831 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
832 #endif
833 
834 		m_freem(m_head);
835 	}
836 }
837 
838 int
839 fec_encap(struct fec_softc *sc, struct mbuf *m)
840 {
841 	if (sc->tx_desc_base[sc->cur_tx].status & ENET_TXD_READY) {
842 		printf("fec: tx queue full!\n");
843 		return EIO;
844 	}
845 
846 	if (m->m_pkthdr.len > ENET_MAX_PKT_SIZE) {
847 		printf("fec: packet too big\n");
848 		return EIO;
849 	}
850 
851 	/* copy in the actual packet */
852 	m_copydata(m, 0, m->m_pkthdr.len, (caddr_t)sc->tx_buffer_base[sc->cur_tx].data);
853 
854 	sc->tx_desc_base[sc->cur_tx].data_length = m->m_pkthdr.len;
855 
856 	sc->tx_desc_base[sc->cur_tx].status &= ~ENET_TXD_STATUS_MASK;
857 	sc->tx_desc_base[sc->cur_tx].status |= (ENET_TXD_READY | ENET_TXD_LAST | ENET_TXD_TC);
858 
859 #ifdef ENET_ENHANCED_BD
860 	sc->tx_desc_base[sc->cur_tx].enhanced_status = ENET_TXD_INT;
861 	sc->tx_desc_base[sc->cur_tx].update_done = 0;
862 #endif
863 
864 	bus_dmamap_sync(sc->tbdma.dma_tag, sc->tbdma.dma_map,
865 	    ENET_MAX_PKT_SIZE * sc->cur_tx, ENET_MAX_PKT_SIZE,
866 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
867 
868 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map,
869 	    sizeof(struct fec_buf_desc) * sc->cur_tx,
870 	    sizeof(struct fec_buf_desc),
871 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
872 
873 
874 	/* tx descriptors are ready */
875 	HWRITE4(sc, ENET_TDAR, ENET_TDAR_TDAR);
876 
877 	if (sc->tx_desc_base[sc->cur_tx].status & ENET_TXD_WRAP)
878 		sc->cur_tx = 0;
879 	else
880 		sc->cur_tx++;
881 
882 	return 0;
883 }
884 
885 struct mbuf *
886 fec_newbuf(void)
887 {
888 	struct mbuf *m;
889 
890 	MGETHDR(m, M_DONTWAIT, MT_DATA);
891 	if (m == NULL)
892 		return (NULL);
893 
894 	MCLGET(m, M_DONTWAIT);
895 	if (!(m->m_flags & M_EXT)) {
896 		m_freem(m);
897 		return (NULL);
898 	}
899 
900 	return (m);
901 }
902 
903 /*
904  * Established by attachment driver at interrupt priority IPL_NET.
905  */
906 int
907 fec_intr(void *arg)
908 {
909 	struct fec_softc *sc = arg;
910 	struct ifnet *ifp = &sc->sc_ac.ac_if;
911 	u_int32_t status;
912 
913 	/* Find out which interrupts are pending. */
914 	status = HREAD4(sc, ENET_EIR);
915 
916 	/* Acknowledge the interrupts we are about to handle. */
917 	HWRITE4(sc, ENET_EIR, status);
918 
919 	/*
920 	 * Handle incoming packets.
921 	 */
922 	if (ISSET(status, ENET_EIR_RXF)) {
923 		if (ifp->if_flags & IFF_RUNNING)
924 			fec_recv(sc);
925 	}
926 
927 	/* Try to transmit. */
928 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
929 		fec_start(ifp);
930 
931 	return 1;
932 }
933 
934 void
935 fec_recv(struct fec_softc *sc)
936 {
937 	struct ifnet *ifp = &sc->sc_ac.ac_if;
938 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
939 
940 	bus_dmamap_sync(sc->rbdma.dma_tag, sc->rbdma.dma_map,
941 	    0, sc->rbdma.dma_size,
942 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
943 
944 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
945 	    0, sc->rxdma.dma_size,
946 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
947 
948 	while (!(sc->rx_desc_base[sc->cur_rx].status & ENET_RXD_EMPTY))
949 	{
950 		struct mbuf *m;
951 		m = fec_newbuf();
952 
953 		if (m == NULL) {
954 			ifp->if_ierrors++;
955 			goto done;
956 		}
957 
958 		m->m_pkthdr.len = m->m_len = sc->rx_desc_base[sc->cur_rx].data_length;
959 		m_adj(m, ETHER_ALIGN);
960 
961 		memcpy(mtod(m, char *), sc->rx_buffer_base[sc->cur_rx].data,
962 		    sc->rx_desc_base[sc->cur_rx].data_length);
963 
964 		sc->rx_desc_base[sc->cur_rx].status |= ENET_RXD_EMPTY;
965 		sc->rx_desc_base[sc->cur_rx].data_length = 0;
966 
967 		bus_dmamap_sync(sc->rbdma.dma_tag, sc->rbdma.dma_map,
968 		    ENET_MAX_PKT_SIZE * sc->cur_rx, ENET_MAX_PKT_SIZE,
969 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
970 
971 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
972 		    sizeof(struct fec_buf_desc) * sc->cur_rx,
973 		    sizeof(struct fec_buf_desc),
974 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
975 
976 		if (sc->rx_desc_base[sc->cur_rx].status & ENET_RXD_WRAP)
977 			sc->cur_rx = 0;
978 		else
979 			sc->cur_rx++;
980 
981 		ml_enqueue(&ml, m);
982 	}
983 
984 done:
985 	/* rx descriptors are ready */
986 	HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
987 
988 	if_input(ifp, &ml);
989 }
990 
991 void
992 fec_tick(void *arg)
993 {
994 	struct fec_softc *sc = arg;
995 	int s;
996 
997 	s = splnet();
998 	mii_tick(&sc->sc_mii);
999 	splx(s);
1000 
1001 	timeout_add_sec(&sc->sc_tick, 1);
1002 }
1003 
1004 /*
1005  * MII
1006  * Interrupts need ENET_ECR_ETHEREN to be set,
1007  * so we just read the interrupt status registers.
1008  */
1009 int
1010 fec_miibus_readreg(struct device *dev, int phy, int reg)
1011 {
1012 	int r = 0;
1013 	struct fec_softc *sc = (struct fec_softc *)dev;
1014 
1015 	HSET4(sc, ENET_EIR, ENET_EIR_MII);
1016 
1017 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR,
1018 	    ENET_MMFR_ST | ENET_MMFR_OP_RD | ENET_MMFR_TA |
1019 	    phy << ENET_MMFR_PA_SHIFT | reg << ENET_MMFR_RA_SHIFT);
1020 
1021 	while(!(HREAD4(sc, ENET_EIR) & ENET_EIR_MII));
1022 
1023 	r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR);
1024 
1025 	return (r & 0xffff);
1026 }
1027 
1028 void
1029 fec_miibus_writereg(struct device *dev, int phy, int reg, int val)
1030 {
1031 	struct fec_softc *sc = (struct fec_softc *)dev;
1032 
1033 	HSET4(sc, ENET_EIR, ENET_EIR_MII);
1034 
1035 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR,
1036 	    ENET_MMFR_ST | ENET_MMFR_OP_WR | ENET_MMFR_TA |
1037 	    phy << ENET_MMFR_PA_SHIFT | reg << ENET_MMFR_RA_SHIFT |
1038 	    (val & 0xffff));
1039 
1040 	while(!(HREAD4(sc, ENET_EIR) & ENET_EIR_MII));
1041 
1042 	return;
1043 }
1044 
1045 void
1046 fec_miibus_statchg(struct device *dev)
1047 {
1048 	struct fec_softc *sc = (struct fec_softc *)dev;
1049 	uint32_t ecr, rcr;
1050 
1051 	ecr = HREAD4(sc, ENET_ECR) & ~ENET_ECR_SPEED;
1052 	rcr = HREAD4(sc, ENET_RCR) & ~ENET_RCR_RMII_10T;
1053 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1054 	case IFM_1000_T:  /* Gigabit */
1055 		ecr |= ENET_ECR_SPEED;
1056 		break;
1057 	case IFM_100_TX:
1058 		break;
1059 	case IFM_10_T:
1060 		rcr |= ENET_RCR_RMII_10T;
1061 		break;
1062 	}
1063 	HWRITE4(sc, ENET_ECR, ecr);
1064 	HWRITE4(sc, ENET_RCR, rcr);
1065 
1066 	return;
1067 }
1068 
1069 int
1070 fec_ifmedia_upd(struct ifnet *ifp)
1071 {
1072 	struct fec_softc *sc = ifp->if_softc;
1073 	struct mii_data *mii = &sc->sc_mii;
1074 	int err;
1075 	if (mii->mii_instance) {
1076 		struct mii_softc *miisc;
1077 
1078 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1079 			mii_phy_reset(miisc);
1080 	}
1081 	err = mii_mediachg(mii);
1082 	return (err);
1083 }
1084 
1085 void
1086 fec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1087 {
1088 	struct fec_softc *sc = ifp->if_softc;
1089 	struct mii_data *mii = &sc->sc_mii;
1090 
1091 	mii_pollstat(mii);
1092 
1093 	ifmr->ifm_active = mii->mii_media_active;
1094 	ifmr->ifm_status = mii->mii_media_status;
1095 }
1096 
1097 /*
1098  * Manage DMA'able memory.
1099  */
1100 int
1101 fec_dma_malloc(struct fec_softc *sc, bus_size_t size,
1102     struct fec_dma_alloc *dma)
1103 {
1104 	int r;
1105 
1106 	dma->dma_tag = sc->sc_dma_tag;
1107 	r = bus_dmamem_alloc(dma->dma_tag, size, ENET_ALIGNMENT, 0, &dma->dma_seg,
1108 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1109 	if (r != 0) {
1110 		printf("%s: fec_dma_malloc: bus_dmammem_alloc failed; "
1111 			"size %lu, error %d\n", sc->sc_dev.dv_xname,
1112 			(unsigned long)size, r);
1113 		goto fail_0;
1114 	}
1115 
1116 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1117 	    &dma->dma_vaddr, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1118 	if (r != 0) {
1119 		printf("%s: fec_dma_malloc: bus_dmammem_map failed; "
1120 			"size %lu, error %d\n", sc->sc_dev.dv_xname,
1121 			(unsigned long)size, r);
1122 		goto fail_1;
1123 	}
1124 
1125 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1126 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1127 	if (r != 0) {
1128 		printf("%s: fec_dma_malloc: bus_dmamap_create failed; "
1129 			"error %u\n", sc->sc_dev.dv_xname, r);
1130 		goto fail_2;
1131 	}
1132 
1133 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map,
1134 			    dma->dma_vaddr, size, NULL,
1135 			    BUS_DMA_NOWAIT);
1136 	if (r != 0) {
1137 		printf("%s: fec_dma_malloc: bus_dmamap_load failed; "
1138 			"error %u\n", sc->sc_dev.dv_xname, r);
1139 		goto fail_3;
1140 	}
1141 
1142 	dma->dma_size = size;
1143 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1144 	return (0);
1145 
1146 fail_3:
1147 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1148 fail_2:
1149 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1150 fail_1:
1151 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1152 fail_0:
1153 	dma->dma_map = NULL;
1154 	dma->dma_tag = NULL;
1155 
1156 	return (r);
1157 }
1158 
1159 void
1160 fec_dma_free(struct fec_softc *sc, struct fec_dma_alloc *dma)
1161 {
1162 	if (dma->dma_tag == NULL)
1163 		return;
1164 
1165 	if (dma->dma_map != NULL) {
1166 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1167 		    dma->dma_map->dm_mapsize,
1168 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1169 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1170 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1171 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1172 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1173 	}
1174 	dma->dma_tag = NULL;
1175 }
1176