xref: /netbsd-src/sys/arch/evbppc/virtex/dev/if_temac.c (revision c2f76ff004a2cb67efe5b12d97bd3ef7fe89e18d)
1 /* 	$NetBSD: if_temac.c,v 1.7 2010/04/05 07:19:30 joerg Exp $ */
2 
3 /*
4  * Copyright (c) 2006 Jachym Holecek
5  * All rights reserved.
6  *
7  * Written for DFC Design, s.r.o.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  *
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for Xilinx LocalLink TEMAC as wired on the GSRD platform.
34  *
35  * TODO:
36  * 	- Optimize
37  * 	- Checksum offload
38  * 	- Address filters
39  * 	- Support jumbo frames
40  */
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: if_temac.c,v 1.7 2010/04/05 07:19:30 joerg Exp $");
44 
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/mbuf.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/ioctl.h>
52 #include <sys/device.h>
53 
54 #include <uvm/uvm_extern.h>
55 
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60 
61 #include <net/bpf.h>
62 
63 #include <machine/bus.h>
64 
65 #include <evbppc/virtex/idcr.h>
66 #include <evbppc/virtex/dev/xcvbusvar.h>
67 #include <evbppc/virtex/dev/cdmacreg.h>
68 #include <evbppc/virtex/dev/temacreg.h>
69 #include <evbppc/virtex/dev/temacvar.h>
70 
71 #include <dev/mii/miivar.h>
72 
73 
74 /* This is outside of TEMAC's DCR window, we have to hardcode it... */
75 #define DCR_ETH_BASE 		0x0030
76 
77 #define	TEMAC_REGDEBUG 		0
78 #define	TEMAC_RXDEBUG 		0
79 #define	TEMAC_TXDEBUG 		0
80 
81 #if TEMAC_RXDEBUG > 0 || TEMAC_TXDEBUG > 0
82 #define	TEMAC_DEBUG 		1
83 #else
84 #define	TEMAC_DEBUG 		0
85 #endif
86 
87 #if TEMAC_REGDEBUG > 0
88 #define	TRACEREG(arg) 		printf arg
89 #else
90 #define	TRACEREG(arg) 		/* nop */
91 #endif
92 
93 /* DMA control chains take up one (16KB) page. */
94 #define TEMAC_NTXDESC 		256
95 #define TEMAC_NRXDESC 		256
96 
97 #define TEMAC_TXQLEN 		64 	/* Software Tx queue length */
98 #define TEMAC_NTXSEG 		16 	/* Maximum Tx segments per packet */
99 
100 #define TEMAC_NRXSEG 		1 	/* Maximum Rx segments per packet */
101 #define TEMAC_RXPERIOD 		1 	/* Interrupt every N descriptors. */
102 #define TEMAC_RXTIMO_HZ 	100 	/* Rx reaper frequency */
103 
104 /* Next Tx descriptor and descriptor's offset WRT sc_cdaddr. */
105 #define TEMAC_TXSINC(n, i) 	(((n) + TEMAC_TXQLEN + (i)) % TEMAC_TXQLEN)
106 #define TEMAC_TXINC(n, i) 	(((n) + TEMAC_NTXDESC + (i)) % TEMAC_NTXDESC)
107 
108 #define TEMAC_TXSNEXT(n) 	TEMAC_TXSINC((n), 1)
109 #define TEMAC_TXNEXT(n) 	TEMAC_TXINC((n), 1)
110 #define TEMAC_TXDOFF(n) 	(offsetof(struct temac_control, cd_txdesc) + \
111 				 (n) * sizeof(struct cdmac_descr))
112 
113 /* Next Rx descriptor and descriptor's offset WRT sc_cdaddr. */
114 #define TEMAC_RXINC(n, i) 	(((n) + TEMAC_NRXDESC + (i)) % TEMAC_NRXDESC)
115 #define TEMAC_RXNEXT(n) 	TEMAC_RXINC((n), 1)
116 #define TEMAC_RXDOFF(n) 	(offsetof(struct temac_control, cd_rxdesc) + \
117 				 (n) * sizeof(struct cdmac_descr))
118 #define TEMAC_ISINTR(i) 	(((i) % TEMAC_RXPERIOD) == 0)
119 #define TEMAC_ISLAST(i) 	((i) == (TEMAC_NRXDESC - 1))
120 
121 
122 struct temac_control {
123 	struct cdmac_descr 	cd_txdesc[TEMAC_NTXDESC];
124 	struct cdmac_descr 	cd_rxdesc[TEMAC_NRXDESC];
125 };
126 
127 struct temac_txsoft {
128 	bus_dmamap_t 		txs_dmap;
129 	struct mbuf 		*txs_mbuf;
130 	int 			txs_last;
131 };
132 
133 struct temac_rxsoft {
134 	bus_dmamap_t 		rxs_dmap;
135 	struct mbuf 		*rxs_mbuf;
136 };
137 
138 struct temac_softc {
139 	struct device 		sc_dev;
140 	struct ethercom 	sc_ec;
141 #define sc_if 			sc_ec.ec_if
142 
143 	/* Peripheral registers */
144 	bus_space_tag_t 	sc_iot;
145 	bus_space_handle_t 	sc_ioh;
146 
147 	/* CDMAC channel registers */
148 	bus_space_tag_t 	sc_dma_rxt;
149 	bus_space_handle_t 	sc_dma_rxh; 	/* Rx channel */
150 	bus_space_handle_t 	sc_dma_rsh; 	/* Rx status */
151 
152 	bus_space_tag_t 	sc_dma_txt;
153 	bus_space_handle_t 	sc_dma_txh; 	/* Tx channel */
154 	bus_space_handle_t 	sc_dma_tsh; 	/* Tx status */
155 
156 	struct temac_txsoft 	sc_txsoft[TEMAC_TXQLEN];
157 	struct temac_rxsoft 	sc_rxsoft[TEMAC_NRXDESC];
158 
159 	struct callout 		sc_rx_timo;
160 	struct callout 		sc_mii_tick;
161 	struct mii_data 	sc_mii;
162 
163 	bus_dmamap_t 		sc_control_dmap;
164 #define sc_cdaddr 		sc_control_dmap->dm_segs[0].ds_addr
165 
166 	struct temac_control 	*sc_control_data;
167 #define sc_rxdescs 		sc_control_data->cd_rxdesc
168 #define sc_txdescs 		sc_control_data->cd_txdesc
169 
170 	int 			sc_txbusy;
171 
172 	int 			sc_txfree;
173 	int 			sc_txcur;
174 	int 			sc_txreap;
175 
176 	int 			sc_rxreap;
177 
178 	int 			sc_txsfree;
179 	int 			sc_txscur;
180 	int 			sc_txsreap;
181 
182 	int 			sc_dead; 	/* Rx/Tx DMA error (fatal) */
183 	int 			sc_rx_drained;
184 
185 	int 			sc_rx_chan;
186 	int 			sc_tx_chan;
187 
188 	void 			*sc_sdhook;
189 	void 			*sc_rx_ih;
190 	void 			*sc_tx_ih;
191 
192 	bus_dma_tag_t 		sc_dmat;
193 };
194 
195 /* Device interface. */
196 static void 	temac_attach(struct device *, struct device *, void *);
197 
198 /* Ifnet interface. */
199 static int 	temac_init(struct ifnet *);
200 static int 	temac_ioctl(struct ifnet *, u_long, void *);
201 static void 	temac_start(struct ifnet *);
202 static void 	temac_stop(struct ifnet *, int);
203 
204 /* Media management. */
205 static int	temac_mii_readreg(struct device *, int, int);
206 static void	temac_mii_statchg(struct device *);
207 static void	temac_mii_tick(void *);
208 static void	temac_mii_writereg(struct device *, int, int, int);
209 
210 /* Indirect hooks. */
211 static void 	temac_shutdown(void *);
212 static void 	temac_rx_intr(void *);
213 static void 	temac_tx_intr(void *);
214 
215 /* Tools. */
216 static inline void 	temac_rxcdsync(struct temac_softc *, int, int, int);
217 static inline void 	temac_txcdsync(struct temac_softc *, int, int, int);
218 static void 		temac_txreap(struct temac_softc *);
219 static void 		temac_rxreap(struct temac_softc *);
220 static int 		temac_rxalloc(struct temac_softc *, int, int);
221 static void 		temac_rxtimo(void *);
222 static void 		temac_rxdrain(struct temac_softc *);
223 static void 		temac_reset(struct temac_softc *);
224 static void 		temac_txkick(struct temac_softc *);
225 
226 /* Register access. */
227 static inline void 	gmi_write_8(uint32_t, uint32_t, uint32_t);
228 static inline void 	gmi_write_4(uint32_t, uint32_t);
229 static inline void 	gmi_read_8(uint32_t, uint32_t *, uint32_t *);
230 static inline uint32_t 	gmi_read_4(uint32_t);
231 static inline void 	hif_wait_stat(uint32_t);
232 
233 #define cdmac_rx_stat(sc) \
234     bus_space_read_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0 /* XXX hack */)
235 
236 #define cdmac_rx_reset(sc) \
237     bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0, CDMAC_STAT_RESET)
238 
239 #define cdmac_rx_start(sc, val) \
240     bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rxh, CDMAC_CURDESC, (val))
241 
242 #define cdmac_tx_stat(sc) \
243     bus_space_read_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0 /* XXX hack */)
244 
245 #define cdmac_tx_reset(sc) \
246     bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0, CDMAC_STAT_RESET)
247 
248 #define cdmac_tx_start(sc, val) \
249     bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_txh, CDMAC_CURDESC, (val))
250 
251 
252 CFATTACH_DECL(temac, sizeof(struct temac_softc),
253     xcvbus_child_match, temac_attach, NULL, NULL);
254 
255 
256 /*
257  * Private bus utilities.
258  */
259 static inline void
260 hif_wait_stat(uint32_t mask)
261 {
262 	int 			i = 0;
263 
264 	while (mask != (mfidcr(IDCR_HIF_STAT) & mask)) {
265 		if (i++ > 100) {
266 			printf("%s: timeout waiting for 0x%08x\n",
267 			    __func__, mask);
268 			break;
269 		}
270 		delay(5);
271 	}
272 
273 	TRACEREG(("%s: stat %#08x loops %d\n", __func__, mask, i));
274 }
275 
276 static inline void
277 gmi_write_4(uint32_t addr, uint32_t lo)
278 {
279 	mtidcr(IDCR_HIF_ARG0, lo);
280 	mtidcr(IDCR_HIF_CTRL, (addr & HIF_CTRL_GMIADDR) | HIF_CTRL_WRITE);
281 	hif_wait_stat(HIF_STAT_GMIWR);
282 
283 	TRACEREG(("%s: %#08x <- %#08x\n", __func__, addr, lo));
284 }
285 
286 static inline void
287 gmi_write_8(uint32_t addr, uint32_t lo, uint32_t hi)
288 {
289 	mtidcr(IDCR_HIF_ARG1, hi);
290 	gmi_write_4(addr, lo);
291 }
292 
293 static inline void
294 gmi_read_8(uint32_t addr, uint32_t *lo, uint32_t *hi)
295 {
296 	*lo = gmi_read_4(addr);
297 	*hi = mfidcr(IDCR_HIF_ARG1);
298 }
299 
300 static inline uint32_t
301 gmi_read_4(uint32_t addr)
302 {
303 	uint32_t 		res;
304 
305 	mtidcr(IDCR_HIF_CTRL, addr & HIF_CTRL_GMIADDR);
306 	hif_wait_stat(HIF_STAT_GMIRR);
307 
308 	res = mfidcr(IDCR_HIF_ARG0);
309 	TRACEREG(("%s:  %#08x -> %#08x\n", __func__, addr, res));
310 	return (res);
311 }
312 
313 /*
314  * Generic device.
315  */
316 static void
317 temac_attach(struct device *parent, struct device *self, void *aux)
318 {
319 	struct xcvbus_attach_args *vaa = aux;
320 	struct ll_dmac 		*rx = vaa->vaa_rx_dmac;
321 	struct ll_dmac 		*tx = vaa->vaa_tx_dmac;
322 	struct temac_softc 	*sc = (struct temac_softc *)self;
323 	struct ifnet 		*ifp = &sc->sc_if;
324 	struct mii_data 	*mii = &sc->sc_mii;
325 	uint8_t 		enaddr[ETHER_ADDR_LEN];
326 	bus_dma_segment_t 	seg;
327 	int 			error, nseg, i;
328 
329 	printf(": TEMAC\n"); 	/* XXX will be LL_TEMAC, PLB_TEMAC */
330 
331 	KASSERT(rx);
332 	KASSERT(tx);
333 
334 	sc->sc_dmat = vaa->vaa_dmat;
335 	sc->sc_dead = 0;
336 	sc->sc_rx_drained = 1;
337 	sc->sc_txbusy = 0;
338 	sc->sc_iot = vaa->vaa_iot;
339 	sc->sc_dma_rxt = rx->dmac_iot;
340 	sc->sc_dma_txt = tx->dmac_iot;
341 
342 	/*
343 	 * Map HIF and receive/transmit dmac registers.
344 	 */
345 	if ((error = bus_space_map(vaa->vaa_iot, vaa->vaa_addr, TEMAC_SIZE, 0,
346 	    &sc->sc_ioh)) != 0) {
347 		printf("%s: could not map registers\n", device_xname(self));
348 		goto fail_0;
349 	}
350 
351 	if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_ctrl_addr,
352 	    CDMAC_CTRL_SIZE, 0, &sc->sc_dma_rxh)) != 0) {
353 		printf("%s: could not map Rx control registers\n",
354 		    device_xname(self));
355 		goto fail_0;
356 	}
357 	if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_stat_addr,
358 	    CDMAC_STAT_SIZE, 0, &sc->sc_dma_rsh)) != 0) {
359 		printf("%s: could not map Rx status register\n",
360 		    device_xname(self));
361 		goto fail_0;
362 	}
363 
364 	if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_ctrl_addr,
365 	    CDMAC_CTRL_SIZE, 0, &sc->sc_dma_txh)) != 0) {
366 		printf("%s: could not map Tx control registers\n",
367 		    device_xname(self));
368 		goto fail_0;
369 	}
370 	if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_stat_addr,
371 	    CDMAC_STAT_SIZE, 0, &sc->sc_dma_tsh)) != 0) {
372 		printf("%s: could not map Tx status register\n",
373 		    device_xname(self));
374 		goto fail_0;
375 	}
376 
377 	/*
378 	 * Allocate and initialize DMA control chains.
379 	 */
380 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
381 	    sizeof(struct temac_control), 8, 0, &seg, 1, &nseg, 0)) != 0) {
382 	    	printf("%s: could not allocate control data\n",
383 	    	    sc->sc_dev.dv_xname);
384 		goto fail_0;
385 	}
386 
387 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
388 	    sizeof(struct temac_control),
389 	    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
390 	    	printf("%s: could not map control data\n",
391 	    	    sc->sc_dev.dv_xname);
392 		goto fail_1;
393 	}
394 
395 	if ((error = bus_dmamap_create(sc->sc_dmat,
396 	    sizeof(struct temac_control), 1,
397 	    sizeof(struct temac_control), 0, 0, &sc->sc_control_dmap)) != 0) {
398 	    	printf("%s: could not create control data DMA map\n",
399 	    	    sc->sc_dev.dv_xname);
400 		goto fail_2;
401 	}
402 
403 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_control_dmap,
404 	    sc->sc_control_data, sizeof(struct temac_control), NULL, 0)) != 0) {
405 	    	printf("%s: could not load control data DMA map\n",
406 	    	    sc->sc_dev.dv_xname);
407 		goto fail_3;
408 	}
409 
410 	/*
411 	 * Link descriptor chains.
412 	 */
413 	memset(sc->sc_control_data, 0, sizeof(struct temac_control));
414 
415 	for (i = 0; i < TEMAC_NTXDESC; i++) {
416 		sc->sc_txdescs[i].desc_next = sc->sc_cdaddr +
417 		    TEMAC_TXDOFF(TEMAC_TXNEXT(i));
418 		sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
419 	}
420 	for (i = 0; i < TEMAC_NRXDESC; i++) {
421 		sc->sc_rxdescs[i].desc_next = sc->sc_cdaddr +
422 		    TEMAC_RXDOFF(TEMAC_RXNEXT(i));
423 		sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
424 	}
425 
426 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 0,
427 	    sizeof(struct temac_control),
428 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
429 
430 	/*
431 	 * Initialize software state for transmit/receive jobs.
432 	 */
433 	for (i = 0; i < TEMAC_TXQLEN; i++) {
434 		if ((error = bus_dmamap_create(sc->sc_dmat,
435 		    ETHER_MAX_LEN_JUMBO, TEMAC_NTXSEG, ETHER_MAX_LEN_JUMBO,
436 		    0, 0, &sc->sc_txsoft[i].txs_dmap)) != 0) {
437 		    	printf("%s: could not create Tx DMA map %d\n",
438 		    	    sc->sc_dev.dv_xname, i);
439 			goto fail_4;
440 		}
441 		sc->sc_txsoft[i].txs_mbuf = NULL;
442 		sc->sc_txsoft[i].txs_last = 0;
443 	}
444 
445 	for (i = 0; i < TEMAC_NRXDESC; i++) {
446 		if ((error = bus_dmamap_create(sc->sc_dmat,
447 		    MCLBYTES, TEMAC_NRXSEG, MCLBYTES, 0, 0,
448 		    &sc->sc_rxsoft[i].rxs_dmap)) != 0) {
449 		    	printf("%s: could not create Rx DMA map %d\n",
450 		    	    sc->sc_dev.dv_xname, i);
451 			goto fail_5;
452 		}
453 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
454 	}
455 
456 	/*
457 	 * Setup transfer interrupt handlers.
458 	 */
459 	error = ENOMEM;
460 
461 	sc->sc_rx_ih = ll_dmac_intr_establish(rx->dmac_chan,
462 	    temac_rx_intr, sc);
463 	if (sc->sc_rx_ih == NULL) {
464 		printf("%s: could not establish Rx interrupt\n",
465 		    device_xname(self));
466 		goto fail_5;
467 	}
468 
469 	sc->sc_tx_ih = ll_dmac_intr_establish(tx->dmac_chan,
470 	    temac_tx_intr, sc);
471 	if (sc->sc_tx_ih == NULL) {
472 		printf("%s: could not establish Tx interrupt\n",
473 		    device_xname(self));
474 		goto fail_6;
475 	}
476 
477 	/* XXXFreza: faked, should read unicast address filter. */
478 	enaddr[0] = 0x00;
479 	enaddr[1] = 0x11;
480 	enaddr[2] = 0x17;
481 	enaddr[3] = 0xff;
482 	enaddr[4] = 0xff;
483 	enaddr[5] = 0x01;
484 
485 	/*
486 	 * Initialize the TEMAC.
487 	 */
488 	temac_reset(sc);
489 
490 	/* Configure MDIO link. */
491 	gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
492 
493 	/* Initialize PHY. */
494 	mii->mii_ifp = ifp;
495 	mii->mii_readreg = temac_mii_readreg;
496 	mii->mii_writereg = temac_mii_writereg;
497 	mii->mii_statchg = temac_mii_statchg;
498 	sc->sc_ec.ec_mii = mii;
499 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
500 
501 	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
502 	    MII_OFFSET_ANY, 0);
503 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
504 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
505 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
506 	} else {
507 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
508 	}
509 
510 	/* Hold PHY in reset. */
511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, TEMAC_RESET_PHY);
512 
513 	/* Reset EMAC. */
514 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
515 	    TEMAC_RESET_EMAC);
516 	delay(10000);
517 
518 	/* Reset peripheral, awakes PHY and EMAC. */
519 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
520 	    TEMAC_RESET_PERIPH);
521 	delay(40000);
522 
523 	/* (Re-)Configure MDIO link. */
524 	gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
525 
526 	/*
527 	 * Hook up with network stack.
528 	 */
529 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
530 	ifp->if_softc = sc;
531 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
532 	ifp->if_ioctl = temac_ioctl;
533 	ifp->if_start = temac_start;
534 	ifp->if_init = temac_init;
535 	ifp->if_stop = temac_stop;
536 	ifp->if_watchdog = NULL;
537 	IFQ_SET_READY(&ifp->if_snd);
538 	IFQ_SET_MAXLEN(&ifp->if_snd, TEMAC_TXQLEN);
539 
540 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
541 
542 	if_attach(ifp);
543 	ether_ifattach(ifp, enaddr);
544 
545 	sc->sc_sdhook = shutdownhook_establish(temac_shutdown, sc);
546 	if (sc->sc_sdhook == NULL)
547 		printf("%s: WARNING: unable to establish shutdown hook\n",
548 		    device_xname(self));
549 
550 	callout_setfunc(&sc->sc_mii_tick, temac_mii_tick, sc);
551 	callout_setfunc(&sc->sc_rx_timo, temac_rxtimo, sc);
552 
553 	return ;
554 
555  fail_6:
556 	ll_dmac_intr_disestablish(rx->dmac_chan, sc->sc_rx_ih);
557 	i = TEMAC_NRXDESC;
558  fail_5:
559  	for (--i; i >= 0; i--)
560  		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].rxs_dmap);
561 	i = TEMAC_TXQLEN;
562  fail_4:
563  	for (--i; i >= 0; i--)
564  		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].txs_dmap);
565  fail_3:
566 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_control_dmap);
567  fail_2:
568 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
569 	    sizeof(struct temac_control));
570  fail_1:
571 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
572  fail_0:
573  	printf("%s: error = %d\n", device_xname(self), error);
574 }
575 
576 /*
577  * Network device.
578  */
579 static int
580 temac_init(struct ifnet *ifp)
581 {
582 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
583 	uint32_t 		rcr, tcr;
584 	int 			i, error;
585 
586 	/* Reset DMA channels. */
587 	cdmac_tx_reset(sc);
588 	cdmac_rx_reset(sc);
589 
590 	/* Set current media. */
591 	if ((error = ether_mediachange(ifp)) != 0)
592 		return error;
593 
594 	callout_schedule(&sc->sc_mii_tick, hz);
595 
596 	/* Enable EMAC engine. */
597 	rcr = (gmi_read_4(TEMAC_GMI_RXCF1) | GMI_RX_ENABLE) &
598 	    ~(GMI_RX_JUMBO | GMI_RX_FCS);
599 	gmi_write_4(TEMAC_GMI_RXCF1, rcr);
600 
601 	tcr = (gmi_read_4(TEMAC_GMI_TXCF) | GMI_TX_ENABLE) &
602 	    ~(GMI_TX_JUMBO | GMI_TX_FCS);
603 	gmi_write_4(TEMAC_GMI_TXCF, tcr);
604 
605 	/* XXXFreza: Force promiscuous mode, for now. */
606 	gmi_write_4(TEMAC_GMI_AFM, GMI_AFM_PROMISC);
607 	ifp->if_flags |= IFF_PROMISC;
608 
609 	/* Rx/Tx queues are drained -- either from attach() or stop(). */
610 	sc->sc_txsfree = TEMAC_TXQLEN;
611 	sc->sc_txsreap = 0;
612 	sc->sc_txscur = 0;
613 
614 	sc->sc_txfree = TEMAC_NTXDESC;
615 	sc->sc_txreap = 0;
616 	sc->sc_txcur = 0;
617 
618 	sc->sc_rxreap = 0;
619 
620 	/* Allocate and map receive buffers. */
621 	if (sc->sc_rx_drained) {
622 		for (i = 0; i < TEMAC_NRXDESC; i++) {
623 			if ((error = temac_rxalloc(sc, i, 1)) != 0) {
624 				printf("%s: failed to allocate Rx "
625 				    "descriptor %d\n",
626 				    sc->sc_dev.dv_xname, i);
627 
628 				temac_rxdrain(sc);
629 				return (error);
630 			}
631 		}
632 		sc->sc_rx_drained = 0;
633 
634 		temac_rxcdsync(sc, 0, TEMAC_NRXDESC,
635 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
636 		cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
637 	}
638 
639 	ifp->if_flags |= IFF_RUNNING;
640 	ifp->if_flags &= ~IFF_OACTIVE;
641 
642 	return (0);
643 }
644 
645 static int
646 temac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
647 {
648 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
649 	int 			s, ret;
650 
651 	s = splnet();
652 	if (sc->sc_dead)
653 		ret = EIO;
654 	else
655 		ret = ether_ioctl(ifp, cmd, data);
656 	splx(s);
657 	return (ret);
658 }
659 
660 static void
661 temac_start(struct ifnet *ifp)
662 {
663 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
664 	struct temac_txsoft 	*txs;
665 	struct mbuf 		*m;
666 	bus_dmamap_t 		dmap;
667 	int 			error, head, nsegs, i;
668 
669 	nsegs = 0;
670 	head = sc->sc_txcur;
671 	txs = NULL; 		/* gcc */
672 
673 	if (sc->sc_dead)
674 		return;
675 
676 	KASSERT(sc->sc_txfree >= 0);
677 	KASSERT(sc->sc_txsfree >= 0);
678 
679 	/*
680 	 * Push mbufs into descriptor chain until we drain the interface
681 	 * queue or run out of descriptors. We'll mark the first segment
682 	 * as "done" in hope that we might put CDMAC interrupt above IPL_NET
683 	 * and have it start jobs & mark packets for GC preemtively for
684 	 * us -- creativity due to limitations in CDMAC transfer engine
685 	 * (it really consumes lists, not circular queues, AFAICS).
686 	 *
687 	 * We schedule one interrupt per Tx batch.
688 	 */
689 	while (1) {
690 		IFQ_POLL(&ifp->if_snd, m);
691 		if (m == NULL)
692 			break;
693 
694 		if (sc->sc_txsfree == 0) {
695 			ifp->if_flags |= IFF_OACTIVE;
696 			break;
697 		}
698 
699 		txs = &sc->sc_txsoft[sc->sc_txscur];
700 		dmap = txs->txs_dmap;
701 
702 		if (txs->txs_mbuf != NULL)
703 			printf("FOO\n");
704 		if (txs->txs_last)
705 			printf("BAR\n");
706 
707 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
708 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
709 		    	if (error == EFBIG) {
710 		    		printf("%s: Tx consumes too many segments, "
711 		    		    "dropped\n", sc->sc_dev.dv_xname);
712 				IFQ_DEQUEUE(&ifp->if_snd, m);
713 				m_freem(m);
714 				continue;
715 		    	} else {
716 		    		printf("%s: Tx stall due to resource "
717 		    		    "shortage\n", sc->sc_dev.dv_xname);
718 		    		break;
719 			}
720 		}
721 
722 		/*
723 		 * If we're short on DMA descriptors, notify upper layers
724 		 * and leave this packet for later.
725 		 */
726 		if (dmap->dm_nsegs > sc->sc_txfree) {
727 			bus_dmamap_unload(sc->sc_dmat, dmap);
728 			ifp->if_flags |= IFF_OACTIVE;
729 			break;
730 		}
731 
732 		IFQ_DEQUEUE(&ifp->if_snd, m);
733 
734 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
735 		    BUS_DMASYNC_PREWRITE);
736 		txs->txs_mbuf = m;
737 
738 		/*
739 		 * Map the packet into descriptor chain. XXX We'll want
740 		 * to fill checksum offload commands here.
741 		 *
742 		 * We would be in a race if we weren't blocking CDMAC intr
743 		 * at this point -- we need to be locked against txreap()
744 		 * because of dmasync ops.
745 		 */
746 
747 		temac_txcdsync(sc, sc->sc_txcur, dmap->dm_nsegs,
748 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
749 
750 		for (i = 0; i < dmap->dm_nsegs; i++) {
751 			sc->sc_txdescs[sc->sc_txcur].desc_addr =
752 			    dmap->dm_segs[i].ds_addr;
753 			sc->sc_txdescs[sc->sc_txcur].desc_size =
754 			    dmap->dm_segs[i].ds_len;
755 			sc->sc_txdescs[sc->sc_txcur].desc_stat =
756 			    (i == 0 			? CDMAC_STAT_SOP : 0) |
757 			    (i == (dmap->dm_nsegs - 1) 	? CDMAC_STAT_EOP : 0);
758 
759 			sc->sc_txcur = TEMAC_TXNEXT(sc->sc_txcur);
760 		}
761 
762 		sc->sc_txfree -= dmap->dm_nsegs;
763 		nsegs += dmap->dm_nsegs;
764 
765 		sc->sc_txscur = TEMAC_TXSNEXT(sc->sc_txscur);
766 		sc->sc_txsfree--;
767 	}
768 
769 	/* Get data running if we queued any. */
770 	if (nsegs > 0) {
771 		int 		tail = TEMAC_TXINC(sc->sc_txcur, -1);
772 
773 		/* Mark the last packet in this job. */
774 		txs->txs_last = 1;
775 
776 		/* Mark the last descriptor in this job. */
777 		sc->sc_txdescs[tail].desc_stat |= CDMAC_STAT_STOP |
778 		    CDMAC_STAT_INTR;
779 		temac_txcdsync(sc, head, nsegs,
780 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
781 
782 		temac_txkick(sc);
783 #if TEMAC_TXDEBUG > 0
784 		printf("%s: start:  txcur  %03d -> %03d, nseg %03d\n",
785 		    sc->sc_dev.dv_xname, head, sc->sc_txcur, nsegs);
786 #endif
787 	}
788 }
789 
790 static void
791 temac_stop(struct ifnet *ifp, int disable)
792 {
793 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
794 	struct temac_txsoft 	*txs;
795 	int 			i;
796 
797 #if TEMAC_DEBUG > 0
798 	printf("%s: stop\n", device_xname(&sc->sc_dev));
799 #endif
800 
801 	/* Down the MII. */
802 	callout_stop(&sc->sc_mii_tick);
803 	mii_down(&sc->sc_mii);
804 
805 	/* Stop the engine. */
806 	temac_reset(sc);
807 
808 	/* Drain buffers queues (unconditionally). */
809 	temac_rxdrain(sc);
810 
811 	for (i = 0; i < TEMAC_TXQLEN; i++) {
812 		txs = &sc->sc_txsoft[i];
813 
814 		if (txs->txs_mbuf != NULL) {
815 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
816 			m_freem(txs->txs_mbuf);
817 			txs->txs_mbuf = NULL;
818 			txs->txs_last = 0;
819 		}
820 	}
821 	sc->sc_txbusy = 0;
822 
823 	/* Acknowledge we're down. */
824 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
825 }
826 
827 static int
828 temac_mii_readreg(struct device *self, int phy, int reg)
829 {
830 	mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
831 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR);
832 	hif_wait_stat(HIF_STAT_MIIRR);
833 
834 	return (int)mfidcr(IDCR_HIF_ARG0);
835 }
836 
837 static void
838 temac_mii_writereg(struct device *self, int phy, int reg, int val)
839 {
840 	mtidcr(IDCR_HIF_ARG0, val);
841 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_WRVAL | HIF_CTRL_WRITE);
842 	mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
843 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR | HIF_CTRL_WRITE);
844 	hif_wait_stat(HIF_STAT_MIIWR);
845 }
846 
847 static void
848 temac_mii_statchg(struct device *self)
849 {
850 	struct temac_softc 	*sc = (struct temac_softc *)self;
851 	uint32_t 		rcf, tcf, mmc;
852 
853 	/* Full/half duplex link. */
854 	rcf = gmi_read_4(TEMAC_GMI_RXCF1);
855 	tcf = gmi_read_4(TEMAC_GMI_TXCF);
856 
857 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
858 		gmi_write_4(TEMAC_GMI_RXCF1, rcf & ~GMI_RX_HDX);
859 		gmi_write_4(TEMAC_GMI_TXCF, tcf & ~GMI_TX_HDX);
860 	} else {
861 		gmi_write_4(TEMAC_GMI_RXCF1, rcf | GMI_RX_HDX);
862 		gmi_write_4(TEMAC_GMI_TXCF, tcf | GMI_TX_HDX);
863 	}
864 
865 	/* Link speed. */
866 	mmc = gmi_read_4(TEMAC_GMI_MMC) & ~GMI_MMC_SPEED_MASK;
867 
868 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
869 	case IFM_10_T:
870 		/*
871 		 * XXXFreza: the GMAC is not happy with 10Mbit ethernet,
872 		 * although the documentation claims it's supported. Maybe
873 		 * it's just my equipment...
874 		 */
875 		mmc |= GMI_MMC_SPEED_10;
876 		break;
877 	case IFM_100_TX:
878 		mmc |= GMI_MMC_SPEED_100;
879 		break;
880 	case IFM_1000_T:
881 		mmc |= GMI_MMC_SPEED_1000;
882 		break;
883 	}
884 
885 	gmi_write_4(TEMAC_GMI_MMC, mmc);
886 }
887 
888 static void
889 temac_mii_tick(void *arg)
890 {
891 	struct temac_softc 	*sc = (struct temac_softc *)arg;
892 	int 			s;
893 
894 	if (!device_is_active(&sc->sc_dev))
895 		return;
896 
897 	s = splnet();
898 	mii_tick(&sc->sc_mii);
899 	splx(s);
900 
901 	callout_schedule(&sc->sc_mii_tick, hz);
902 }
903 
904 /*
905  * External hooks.
906  */
907 static void
908 temac_shutdown(void *arg)
909 {
910 	struct temac_softc 	*sc = (struct temac_softc *)arg;
911 
912 	temac_reset(sc);
913 }
914 
915 static void
916 temac_tx_intr(void *arg)
917 {
918 	struct temac_softc 	*sc = (struct temac_softc *)arg;
919 	uint32_t 		stat;
920 
921 	/* XXX: We may need to splnet() here if cdmac(4) changes. */
922 
923 	if ((stat = cdmac_tx_stat(sc)) & CDMAC_STAT_ERROR) {
924 		printf("%s: transmit DMA is toast (%#08x), halted!\n",
925 		    sc->sc_dev.dv_xname, stat);
926 
927 		/* XXXFreza: how to signal this upstream? */
928 		temac_stop(&sc->sc_if, 1);
929 		sc->sc_dead = 1;
930 	}
931 
932 #if TEMAC_DEBUG > 0
933 	printf("%s: tx intr 0x%08x\n", device_xname(&sc->sc_dev), stat);
934 #endif
935 	temac_txreap(sc);
936 }
937 
938 static void
939 temac_rx_intr(void *arg)
940 {
941 	struct temac_softc 	*sc = (struct temac_softc *)arg;
942 	uint32_t 		stat;
943 
944 	/* XXX: We may need to splnet() here if cdmac(4) changes. */
945 
946 	if ((stat = cdmac_rx_stat(sc)) & CDMAC_STAT_ERROR) {
947 		printf("%s: receive DMA is toast (%#08x), halted!\n",
948 		    sc->sc_dev.dv_xname, stat);
949 
950 		/* XXXFreza: how to signal this upstream? */
951 		temac_stop(&sc->sc_if, 1);
952 		sc->sc_dead = 1;
953 	}
954 
955 #if TEMAC_DEBUG > 0
956 	printf("%s: rx intr 0x%08x\n", device_xname(&sc->sc_dev), stat);
957 #endif
958 	temac_rxreap(sc);
959 }
960 
961 /*
962  * Utils.
963  */
964 static inline void
965 temac_txcdsync(struct temac_softc *sc, int first, int cnt, int flag)
966 {
967 	if ((first + cnt) > TEMAC_NTXDESC) {
968 		bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
969 		    TEMAC_TXDOFF(first),
970 		    sizeof(struct cdmac_descr) * (TEMAC_NTXDESC - first),
971 		    flag);
972 		cnt = (first + cnt) % TEMAC_NTXDESC;
973 		first = 0;
974 	}
975 
976 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
977 	    TEMAC_TXDOFF(first),
978 	    sizeof(struct cdmac_descr) * cnt,
979 	    flag);
980 }
981 
982 static inline void
983 temac_rxcdsync(struct temac_softc *sc, int first, int cnt, int flag)
984 {
985 	if ((first + cnt) > TEMAC_NRXDESC) {
986 		bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
987 		    TEMAC_RXDOFF(first),
988 		    sizeof(struct cdmac_descr) * (TEMAC_NRXDESC - first),
989 		    flag);
990 		cnt = (first + cnt) % TEMAC_NRXDESC;
991 		first = 0;
992 	}
993 
994 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
995 	    TEMAC_RXDOFF(first),
996 	    sizeof(struct cdmac_descr) * cnt,
997 	    flag);
998 }
999 
1000 static void
1001 temac_txreap(struct temac_softc *sc)
1002 {
1003 	struct temac_txsoft 	*txs;
1004 	bus_dmamap_t 		dmap;
1005 	int 			sent = 0;
1006 
1007 	/*
1008 	 * Transmit interrupts happen on the last descriptor of Tx jobs.
1009 	 * Hence, every time we're called (and we assume txintr is our
1010 	 * only caller!), we reap packets upto and including the one
1011 	 * marked as last-in-batch.
1012 	 *
1013 	 * XXX we rely on that we make EXACTLY one batch per intr, no more
1014 	 */
1015 	while (sc->sc_txsfree != TEMAC_TXQLEN) {
1016 		txs = &sc->sc_txsoft[sc->sc_txsreap];
1017 		dmap = txs->txs_dmap;
1018 
1019 		sc->sc_txreap = TEMAC_TXINC(sc->sc_txreap, dmap->dm_nsegs);
1020 		sc->sc_txfree += dmap->dm_nsegs;
1021 
1022 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
1023 		m_freem(txs->txs_mbuf);
1024 		txs->txs_mbuf = NULL;
1025 
1026 		sc->sc_if.if_opackets++;
1027 		sent = 1;
1028 
1029 		sc->sc_txsreap = TEMAC_TXSNEXT(sc->sc_txsreap);
1030 		sc->sc_txsfree++;
1031 
1032 		if (txs->txs_last) {
1033 			txs->txs_last = 0;
1034 			sc->sc_txbusy = 0; 	/* channel stopped now */
1035 
1036 			temac_txkick(sc);
1037 			break;
1038 		}
1039 	}
1040 
1041 	if (sent && (sc->sc_if.if_flags & IFF_OACTIVE))
1042 		sc->sc_if.if_flags &= ~IFF_OACTIVE;
1043 }
1044 
1045 static int
1046 temac_rxalloc(struct temac_softc *sc, int which, int verbose)
1047 {
1048 	struct temac_rxsoft 	*rxs;
1049 	struct mbuf 		*m;
1050 	uint32_t 		stat;
1051 	int 			error;
1052 
1053 	rxs = &sc->sc_rxsoft[which];
1054 
1055 	/* The mbuf itself is not our problem, just clear DMA related stuff. */
1056 	if (rxs->rxs_mbuf != NULL) {
1057 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
1058 		rxs->rxs_mbuf = NULL;
1059 	}
1060 
1061 	/*
1062 	 * We would like to store mbuf and dmap in application specific
1063 	 * fields of the descriptor, but that doesn't work for Rx. Shame
1064 	 * on Xilinx for this (and for the useless timer architecture).
1065 	 *
1066 	 * Hence each descriptor needs its own soft state. We may want
1067 	 * to merge multiple rxs's into a monster mbuf when we support
1068 	 * jumbo frames though. Also, we use single set of indexing
1069 	 * variables for both sc_rxdescs[] and sc_rxsoft[].
1070 	 */
1071 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1072 	if (m == NULL) {
1073 		if (verbose)
1074 			printf("%s: out of Rx header mbufs\n",
1075 			    sc->sc_dev.dv_xname);
1076 		return (ENOBUFS);
1077 	}
1078 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
1079 
1080 	MCLGET(m, M_DONTWAIT);
1081 	if ((m->m_flags & M_EXT) == 0) {
1082 		if (verbose)
1083 			printf("%s: out of Rx cluster mbufs\n",
1084 			    sc->sc_dev.dv_xname);
1085 		m_freem(m);
1086 		return (ENOBUFS);
1087 	}
1088 
1089 	rxs->rxs_mbuf = m;
1090 	m->m_pkthdr.len = m->m_len = MCLBYTES;
1091 
1092 	/* Make sure the payload after ethernet header is 4-aligned. */
1093 	m_adj(m, 2);
1094 
1095 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmap, m,
1096 	    BUS_DMA_NOWAIT);
1097 	if (error) {
1098 		if (verbose)
1099 			printf("%s: could not map Rx descriptor %d, "
1100 			    "error = %d\n", sc->sc_dev.dv_xname, which, error);
1101 
1102 		rxs->rxs_mbuf = NULL;
1103 		m_freem(m);
1104 
1105 		return (error);
1106 	}
1107 
1108 	stat = \
1109 	    (TEMAC_ISINTR(which) ? CDMAC_STAT_INTR : 0) |
1110 	    (TEMAC_ISLAST(which) ? CDMAC_STAT_STOP : 0);
1111 
1112 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmap, 0,
1113 	    rxs->rxs_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
1114 
1115 	/* Descriptor post-sync, if needed, left to the caller. */
1116 
1117 	sc->sc_rxdescs[which].desc_addr = rxs->rxs_dmap->dm_segs[0].ds_addr;
1118 	sc->sc_rxdescs[which].desc_size  = rxs->rxs_dmap->dm_segs[0].ds_len;
1119 	sc->sc_rxdescs[which].desc_stat = stat;
1120 
1121 	/* Descriptor pre-sync, if needed, left to the caller. */
1122 
1123 	return (0);
1124 }
1125 
1126 static void
1127 temac_rxreap(struct temac_softc *sc)
1128 {
1129 	struct ifnet 		*ifp = &sc->sc_if;
1130 	uint32_t 		stat, rxstat, rxsize;
1131 	struct mbuf 		*m;
1132 	int 			nseg, head, tail;
1133 
1134 	head = sc->sc_rxreap;
1135 	tail = 0; 		/* gcc */
1136 	nseg = 0;
1137 
1138 	/*
1139 	 * Collect finished entries on the Rx list, kick DMA if we hit
1140 	 * the end. DMA will always stop on the last descriptor in chain,
1141 	 * so it will never hit a reap-in-progress descriptor.
1142 	 */
1143 	while (1) {
1144 		/* Maybe we previously failed to refresh this one? */
1145 		if (sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf == NULL) {
1146 			if (temac_rxalloc(sc, sc->sc_rxreap, 0) != 0)
1147 				break;
1148 
1149 			sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
1150 			continue;
1151 		}
1152 		temac_rxcdsync(sc, sc->sc_rxreap, 1,
1153 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1154 
1155 		stat = sc->sc_rxdescs[sc->sc_rxreap].desc_stat;
1156 		m = NULL;
1157 
1158 		if ((stat & CDMAC_STAT_DONE) == 0)
1159 			break;
1160 
1161 		/* Count any decriptor we've collected, regardless of status. */
1162 		nseg ++;
1163 
1164 		/* XXXFreza: This won't work for jumbo frames. */
1165 
1166 		if ((stat & (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) !=
1167 		    (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) {
1168 		    	printf("%s: Rx packet doesn't fit in "
1169 		    	    "one descriptor, stat = %#08x\n",
1170 		    	    sc->sc_dev.dv_xname, stat);
1171 			goto badframe;
1172 		}
1173 
1174 		/* Dissect TEMAC footer if this is end of packet. */
1175 		rxstat = sc->sc_rxdescs[sc->sc_rxreap].desc_rxstat;
1176 		rxsize = sc->sc_rxdescs[sc->sc_rxreap].desc_rxsize &
1177 		    RXSIZE_MASK;
1178 
1179 		if ((rxstat & RXSTAT_GOOD) == 0 ||
1180 		    (rxstat & RXSTAT_SICK) != 0) {
1181 		    	printf("%s: corrupt Rx packet, rxstat = %#08x\n",
1182 		    	    sc->sc_dev.dv_xname, rxstat);
1183 			goto badframe;
1184 		}
1185 
1186 		/* We are now bound to succeed. */
1187 		bus_dmamap_sync(sc->sc_dmat,
1188 		    sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap, 0,
1189 		    sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap->dm_mapsize,
1190 		    BUS_DMASYNC_POSTREAD);
1191 
1192 		m = sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf;
1193 		m->m_pkthdr.rcvif = ifp;
1194 		m->m_pkthdr.len = m->m_len = rxsize;
1195 
1196  badframe:
1197  		/* Get ready for more work. */
1198 		tail = sc->sc_rxreap;
1199 		sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
1200 
1201  		/* On failures we reuse the descriptor and go ahead. */
1202  		if (m == NULL) {
1203 			sc->sc_rxdescs[tail].desc_stat =
1204 			    (TEMAC_ISINTR(tail) ? CDMAC_STAT_INTR : 0) |
1205 			    (TEMAC_ISLAST(tail) ? CDMAC_STAT_STOP : 0);
1206 
1207 			ifp->if_ierrors++;
1208 			continue;
1209  		}
1210 
1211 		bpf_mtap(ifp, m);
1212 
1213 		ifp->if_ipackets++;
1214 		(ifp->if_input)(ifp, m);
1215 
1216 		/* Refresh descriptor, bail out if we're out of buffers. */
1217 		if (temac_rxalloc(sc, tail, 1) != 0) {
1218  			sc->sc_rxreap = TEMAC_RXINC(sc->sc_rxreap, -1);
1219  			printf("%s: Rx give up for now\n", sc->sc_dev.dv_xname);
1220 			break;
1221 		}
1222 	}
1223 
1224 	/* We may now have a contiguous ready-to-go chunk of descriptors. */
1225 	if (nseg > 0) {
1226 #if TEMAC_RXDEBUG > 0
1227 		printf("%s: rxreap: rxreap %03d -> %03d, nseg %03d\n",
1228 		    sc->sc_dev.dv_xname, head, sc->sc_rxreap, nseg);
1229 #endif
1230 		temac_rxcdsync(sc, head, nseg,
1231 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1232 
1233 		if (TEMAC_ISLAST(tail))
1234 			cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
1235 	}
1236 
1237 	/* Ensure maximum Rx latency is kept under control. */
1238 	callout_schedule(&sc->sc_rx_timo, hz / TEMAC_RXTIMO_HZ);
1239 }
1240 
1241 static void
1242 temac_rxtimo(void *arg)
1243 {
1244 	struct temac_softc 	*sc = (struct temac_softc *)arg;
1245 	int 			s;
1246 
1247 	/* We run TEMAC_RXTIMO_HZ times/sec to ensure Rx doesn't stall. */
1248 	s = splnet();
1249 	temac_rxreap(sc);
1250 	splx(s);
1251 }
1252 
1253 static void
1254 temac_reset(struct temac_softc *sc)
1255 {
1256 	uint32_t 		rcr, tcr;
1257 
1258 	/* Kill CDMAC channels. */
1259 	cdmac_tx_reset(sc);
1260 	cdmac_rx_reset(sc);
1261 
1262 	/* Disable receiver. */
1263 	rcr = gmi_read_4(TEMAC_GMI_RXCF1) & ~GMI_RX_ENABLE;
1264 	gmi_write_4(TEMAC_GMI_RXCF1, rcr);
1265 
1266 	/* Disable transmitter. */
1267 	tcr = gmi_read_4(TEMAC_GMI_TXCF) & ~GMI_TX_ENABLE;
1268 	gmi_write_4(TEMAC_GMI_TXCF, tcr);
1269 }
1270 
1271 static void
1272 temac_rxdrain(struct temac_softc *sc)
1273 {
1274 	struct temac_rxsoft 	*rxs;
1275 	int 			i;
1276 
1277 	for (i = 0; i < TEMAC_NRXDESC; i++) {
1278 		rxs = &sc->sc_rxsoft[i];
1279 
1280 		if (rxs->rxs_mbuf != NULL) {
1281 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
1282 			m_freem(rxs->rxs_mbuf);
1283 			rxs->rxs_mbuf = NULL;
1284 		}
1285 	}
1286 
1287 	sc->sc_rx_drained = 1;
1288 }
1289 
1290 static void
1291 temac_txkick(struct temac_softc *sc)
1292 {
1293 	if (sc->sc_txsoft[sc->sc_txsreap].txs_mbuf != NULL &&
1294 	    sc->sc_txbusy == 0) {
1295 		cdmac_tx_start(sc, sc->sc_cdaddr + TEMAC_TXDOFF(sc->sc_txreap));
1296 		sc->sc_txbusy = 1;
1297 	}
1298 }
1299