xref: /netbsd-src/sys/arch/evbppc/virtex/dev/if_temac.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /* 	$NetBSD: if_temac.c,v 1.2 2007/03/04 05:59:46 christos Exp $ */
2 
3 /*
4  * Copyright (c) 2006 Jachym Holecek
5  * All rights reserved.
6  *
7  * Written for DFC Design, s.r.o.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  *
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for Xilinx LocalLink TEMAC as wired on the GSRD platform.
34  *
35  * TODO:
36  * 	- Optimize
37  * 	- Checksum offload
38  * 	- Address filters
39  * 	- Support jumbo frames
40  */
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: if_temac.c,v 1.2 2007/03/04 05:59:46 christos Exp $");
44 
45 #include "bpfilter.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/mbuf.h>
50 #include <sys/kernel.h>
51 #include <sys/socket.h>
52 #include <sys/ioctl.h>
53 #include <sys/device.h>
54 
55 #include <uvm/uvm_extern.h>
56 
57 #include <net/if.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_ether.h>
61 
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65 
66 #include <machine/bus.h>
67 
68 #include <evbppc/virtex/idcr.h>
69 #include <evbppc/virtex/dev/xcvbusvar.h>
70 #include <evbppc/virtex/dev/cdmacreg.h>
71 #include <evbppc/virtex/dev/temacreg.h>
72 #include <evbppc/virtex/dev/temacvar.h>
73 
74 #include <dev/mii/miivar.h>
75 
76 
77 /* This is outside of TEMAC's DCR window, we have to hardcode it... */
78 #define DCR_ETH_BASE 		0x0030
79 
80 #define	TEMAC_REGDEBUG 		0
81 #define	TEMAC_RXDEBUG 		0
82 #define	TEMAC_TXDEBUG 		0
83 
84 #if TEMAC_RXDEBUG > 0 || TEMAC_TXDEBUG > 0
85 #define	TEMAC_DEBUG 		1
86 #else
87 #define	TEMAC_DEBUG 		0
88 #endif
89 
90 #if TEMAC_REGDEBUG > 0
91 #define	TRACEREG(arg) 		printf arg
92 #else
93 #define	TRACEREG(arg) 		/* nop */
94 #endif
95 
96 /* DMA control chains take up one (16KB) page. */
97 #define TEMAC_NTXDESC 		256
98 #define TEMAC_NRXDESC 		256
99 
100 #define TEMAC_TXQLEN 		64 	/* Software Tx queue length */
101 #define TEMAC_NTXSEG 		16 	/* Maximum Tx segments per packet */
102 
103 #define TEMAC_NRXSEG 		1 	/* Maximum Rx segments per packet */
104 #define TEMAC_RXPERIOD 		1 	/* Interrupt every N descriptors. */
105 #define TEMAC_RXTIMO_HZ 	100 	/* Rx reaper frequency */
106 
107 /* Next Tx descriptor and descriptor's offset WRT sc_cdaddr. */
108 #define TEMAC_TXSINC(n, i) 	(((n) + TEMAC_TXQLEN + (i)) % TEMAC_TXQLEN)
109 #define TEMAC_TXINC(n, i) 	(((n) + TEMAC_NTXDESC + (i)) % TEMAC_NTXDESC)
110 
111 #define TEMAC_TXSNEXT(n) 	TEMAC_TXSINC((n), 1)
112 #define TEMAC_TXNEXT(n) 	TEMAC_TXINC((n), 1)
113 #define TEMAC_TXDOFF(n) 	(offsetof(struct temac_control, cd_txdesc) + \
114 				 (n) * sizeof(struct cdmac_descr))
115 
116 /* Next Rx descriptor and descriptor's offset WRT sc_cdaddr. */
117 #define TEMAC_RXINC(n, i) 	(((n) + TEMAC_NRXDESC + (i)) % TEMAC_NRXDESC)
118 #define TEMAC_RXNEXT(n) 	TEMAC_RXINC((n), 1)
119 #define TEMAC_RXDOFF(n) 	(offsetof(struct temac_control, cd_rxdesc) + \
120 				 (n) * sizeof(struct cdmac_descr))
121 #define TEMAC_ISINTR(i) 	(((i) % TEMAC_RXPERIOD) == 0)
122 #define TEMAC_ISLAST(i) 	((i) == (TEMAC_NRXDESC - 1))
123 
124 
125 struct temac_control {
126 	struct cdmac_descr 	cd_txdesc[TEMAC_NTXDESC];
127 	struct cdmac_descr 	cd_rxdesc[TEMAC_NRXDESC];
128 };
129 
130 struct temac_txsoft {
131 	bus_dmamap_t 		txs_dmap;
132 	struct mbuf 		*txs_mbuf;
133 	int 			txs_last;
134 };
135 
136 struct temac_rxsoft {
137 	bus_dmamap_t 		rxs_dmap;
138 	struct mbuf 		*rxs_mbuf;
139 };
140 
141 struct temac_softc {
142 	struct device 		sc_dev;
143 	struct ethercom 	sc_ec;
144 #define sc_if 			sc_ec.ec_if
145 
146 	/* Peripheral registers */
147 	bus_space_tag_t 	sc_iot;
148 	bus_space_handle_t 	sc_ioh;
149 
150 	/* CDMAC channel registers */
151 	bus_space_tag_t 	sc_dma_rxt;
152 	bus_space_handle_t 	sc_dma_rxh; 	/* Rx channel */
153 	bus_space_handle_t 	sc_dma_rsh; 	/* Rx status */
154 
155 	bus_space_tag_t 	sc_dma_txt;
156 	bus_space_handle_t 	sc_dma_txh; 	/* Tx channel */
157 	bus_space_handle_t 	sc_dma_tsh; 	/* Tx status */
158 
159 	struct temac_txsoft 	sc_txsoft[TEMAC_TXQLEN];
160 	struct temac_rxsoft 	sc_rxsoft[TEMAC_NRXDESC];
161 
162 	struct callout 		sc_rx_timo;
163 	struct callout 		sc_mii_tick;
164 	struct mii_data 	sc_mii;
165 
166 	bus_dmamap_t 		sc_control_dmap;
167 #define sc_cdaddr 		sc_control_dmap->dm_segs[0].ds_addr
168 
169 	struct temac_control 	*sc_control_data;
170 #define sc_rxdescs 		sc_control_data->cd_rxdesc
171 #define sc_txdescs 		sc_control_data->cd_txdesc
172 
173 	int 			sc_txbusy;
174 
175 	int 			sc_txfree;
176 	int 			sc_txcur;
177 	int 			sc_txreap;
178 
179 	int 			sc_rxreap;
180 
181 	int 			sc_txsfree;
182 	int 			sc_txscur;
183 	int 			sc_txsreap;
184 
185 	int 			sc_dead; 	/* Rx/Tx DMA error (fatal) */
186 	int 			sc_rx_drained;
187 
188 	int 			sc_rx_chan;
189 	int 			sc_tx_chan;
190 
191 	void 			*sc_sdhook;
192 	void 			*sc_rx_ih;
193 	void 			*sc_tx_ih;
194 
195 	bus_dma_tag_t 		sc_dmat;
196 };
197 
198 /* Device interface. */
199 static void 	temac_attach(struct device *, struct device *, void *);
200 
201 /* Ifnet interface. */
202 static int 	temac_init(struct ifnet *);
203 static int 	temac_ioctl(struct ifnet *, u_long, void *);
204 static void 	temac_start(struct ifnet *);
205 static void 	temac_stop(struct ifnet *, int);
206 
207 /* Media management. */
208 static int	temac_mediachange(struct ifnet *);
209 static void	temac_mediastatus(struct ifnet *, struct ifmediareq *);
210 static int	temac_mii_readreg(struct device *, int, int);
211 static void	temac_mii_statchg(struct device *);
212 static void	temac_mii_tick(void *);
213 static void	temac_mii_writereg(struct device *, int, int, int);
214 
215 /* Indirect hooks. */
216 static void 	temac_shutdown(void *);
217 static void 	temac_rx_intr(void *);
218 static void 	temac_tx_intr(void *);
219 
220 /* Tools. */
221 static inline void 	temac_rxcdsync(struct temac_softc *, int, int, int);
222 static inline void 	temac_txcdsync(struct temac_softc *, int, int, int);
223 static void 		temac_txreap(struct temac_softc *);
224 static void 		temac_rxreap(struct temac_softc *);
225 static int 		temac_rxalloc(struct temac_softc *, int, int);
226 static void 		temac_rxtimo(void *);
227 static void 		temac_rxdrain(struct temac_softc *);
228 static void 		temac_reset(struct temac_softc *);
229 static void 		temac_txkick(struct temac_softc *);
230 
231 /* Register access. */
232 static inline void 	gmi_write_8(uint32_t, uint32_t, uint32_t);
233 static inline void 	gmi_write_4(uint32_t, uint32_t);
234 static inline void 	gmi_read_8(uint32_t, uint32_t *, uint32_t *);
235 static inline uint32_t 	gmi_read_4(uint32_t);
236 static inline void 	hif_wait_stat(uint32_t);
237 
238 #define cdmac_rx_stat(sc) \
239     bus_space_read_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0 /* XXX hack */)
240 
241 #define cdmac_rx_reset(sc) \
242     bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0, CDMAC_STAT_RESET)
243 
244 #define cdmac_rx_start(sc, val) \
245     bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rxh, CDMAC_CURDESC, (val))
246 
247 #define cdmac_tx_stat(sc) \
248     bus_space_read_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0 /* XXX hack */)
249 
250 #define cdmac_tx_reset(sc) \
251     bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0, CDMAC_STAT_RESET)
252 
253 #define cdmac_tx_start(sc, val) \
254     bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_txh, CDMAC_CURDESC, (val))
255 
256 
257 CFATTACH_DECL(temac, sizeof(struct temac_softc),
258     xcvbus_child_match, temac_attach, NULL, NULL);
259 
260 
261 /*
262  * Private bus utilities.
263  */
264 static inline void
265 hif_wait_stat(uint32_t mask)
266 {
267 	int 			i = 0;
268 
269 	while (mask != (mfidcr(IDCR_HIF_STAT) & mask)) {
270 		if (i++ > 100) {
271 			printf("%s: timeout waiting for 0x%08x\n",
272 			    __func__, mask);
273 			break;
274 		}
275 		delay(5);
276 	}
277 
278 	TRACEREG(("%s: stat %#08x loops %d\n", __func__, mask, i));
279 }
280 
281 static inline void
282 gmi_write_4(uint32_t addr, uint32_t lo)
283 {
284 	mtidcr(IDCR_HIF_ARG0, lo);
285 	mtidcr(IDCR_HIF_CTRL, (addr & HIF_CTRL_GMIADDR) | HIF_CTRL_WRITE);
286 	hif_wait_stat(HIF_STAT_GMIWR);
287 
288 	TRACEREG(("%s: %#08x <- %#08x\n", __func__, addr, lo));
289 }
290 
291 static inline void
292 gmi_write_8(uint32_t addr, uint32_t lo, uint32_t hi)
293 {
294 	mtidcr(IDCR_HIF_ARG1, hi);
295 	gmi_write_4(addr, lo);
296 }
297 
298 static inline void
299 gmi_read_8(uint32_t addr, uint32_t *lo, uint32_t *hi)
300 {
301 	*lo = gmi_read_4(addr);
302 	*hi = mfidcr(IDCR_HIF_ARG1);
303 }
304 
305 static inline uint32_t
306 gmi_read_4(uint32_t addr)
307 {
308 	uint32_t 		res;
309 
310 	mtidcr(IDCR_HIF_CTRL, addr & HIF_CTRL_GMIADDR);
311 	hif_wait_stat(HIF_STAT_GMIRR);
312 
313 	res = mfidcr(IDCR_HIF_ARG0);
314 	TRACEREG(("%s:  %#08x -> %#08x\n", __func__, addr, res));
315 	return (res);
316 }
317 
318 /*
319  * Generic device.
320  */
321 static void
322 temac_attach(struct device *parent, struct device *self, void *aux)
323 {
324 	struct xcvbus_attach_args *vaa = aux;
325 	struct ll_dmac 		*rx = vaa->vaa_rx_dmac;
326 	struct ll_dmac 		*tx = vaa->vaa_tx_dmac;
327 	struct temac_softc 	*sc = (struct temac_softc *)self;
328 	struct ifnet 		*ifp = &sc->sc_if;
329 	struct mii_data 	*mii = &sc->sc_mii;
330 	uint8_t 		enaddr[ETHER_ADDR_LEN];
331 	bus_dma_segment_t 	seg;
332 	int 			error, nseg, i;
333 
334 	printf(": TEMAC\n"); 	/* XXX will be LL_TEMAC, PLB_TEMAC */
335 
336 	KASSERT(rx);
337 	KASSERT(tx);
338 
339 	sc->sc_dmat = vaa->vaa_dmat;
340 	sc->sc_dead = 0;
341 	sc->sc_rx_drained = 1;
342 	sc->sc_txbusy = 0;
343 	sc->sc_iot = vaa->vaa_iot;
344 	sc->sc_dma_rxt = rx->dmac_iot;
345 	sc->sc_dma_txt = tx->dmac_iot;
346 
347 	/*
348 	 * Map HIF and receive/transmit dmac registers.
349 	 */
350 	if ((error = bus_space_map(vaa->vaa_iot, vaa->vaa_addr, TEMAC_SIZE, 0,
351 	    &sc->sc_ioh)) != 0) {
352 		printf("%s: could not map registers\n", device_xname(self));
353 		goto fail_0;
354 	}
355 
356 	if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_ctrl_addr,
357 	    CDMAC_CTRL_SIZE, 0, &sc->sc_dma_rxh)) != 0) {
358 		printf("%s: could not map Rx control registers\n",
359 		    device_xname(self));
360 		goto fail_0;
361 	}
362 	if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_stat_addr,
363 	    CDMAC_STAT_SIZE, 0, &sc->sc_dma_rsh)) != 0) {
364 		printf("%s: could not map Rx status register\n",
365 		    device_xname(self));
366 		goto fail_0;
367 	}
368 
369 	if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_ctrl_addr,
370 	    CDMAC_CTRL_SIZE, 0, &sc->sc_dma_txh)) != 0) {
371 		printf("%s: could not map Tx control registers\n",
372 		    device_xname(self));
373 		goto fail_0;
374 	}
375 	if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_stat_addr,
376 	    CDMAC_STAT_SIZE, 0, &sc->sc_dma_tsh)) != 0) {
377 		printf("%s: could not map Tx status register\n",
378 		    device_xname(self));
379 		goto fail_0;
380 	}
381 
382 	/*
383 	 * Allocate and initialize DMA control chains.
384 	 */
385 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
386 	    sizeof(struct temac_control), 8, 0, &seg, 1, &nseg, 0)) != 0) {
387 	    	printf("%s: could not allocate control data\n",
388 	    	    sc->sc_dev.dv_xname);
389 		goto fail_0;
390 	}
391 
392 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
393 	    sizeof(struct temac_control),
394 	    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
395 	    	printf("%s: could not map control data\n",
396 	    	    sc->sc_dev.dv_xname);
397 		goto fail_1;
398 	}
399 
400 	if ((error = bus_dmamap_create(sc->sc_dmat,
401 	    sizeof(struct temac_control), 1,
402 	    sizeof(struct temac_control), 0, 0, &sc->sc_control_dmap)) != 0) {
403 	    	printf("%s: could not create control data DMA map\n",
404 	    	    sc->sc_dev.dv_xname);
405 		goto fail_2;
406 	}
407 
408 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_control_dmap,
409 	    sc->sc_control_data, sizeof(struct temac_control), NULL, 0)) != 0) {
410 	    	printf("%s: could not load control data DMA map\n",
411 	    	    sc->sc_dev.dv_xname);
412 		goto fail_3;
413 	}
414 
415 	/*
416 	 * Link descriptor chains.
417 	 */
418 	memset(sc->sc_control_data, 0, sizeof(struct temac_control));
419 
420 	for (i = 0; i < TEMAC_NTXDESC; i++) {
421 		sc->sc_txdescs[i].desc_next = sc->sc_cdaddr +
422 		    TEMAC_TXDOFF(TEMAC_TXNEXT(i));
423 		sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
424 	}
425 	for (i = 0; i < TEMAC_NRXDESC; i++) {
426 		sc->sc_rxdescs[i].desc_next = sc->sc_cdaddr +
427 		    TEMAC_RXDOFF(TEMAC_RXNEXT(i));
428 		sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
429 	}
430 
431 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 0,
432 	    sizeof(struct temac_control),
433 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
434 
435 	/*
436 	 * Initialize software state for transmit/receive jobs.
437 	 */
438 	for (i = 0; i < TEMAC_TXQLEN; i++) {
439 		if ((error = bus_dmamap_create(sc->sc_dmat,
440 		    ETHER_MAX_LEN_JUMBO, TEMAC_NTXSEG, ETHER_MAX_LEN_JUMBO,
441 		    0, 0, &sc->sc_txsoft[i].txs_dmap)) != 0) {
442 		    	printf("%s: could not create Tx DMA map %d\n",
443 		    	    sc->sc_dev.dv_xname, i);
444 			goto fail_4;
445 		}
446 		sc->sc_txsoft[i].txs_mbuf = NULL;
447 		sc->sc_txsoft[i].txs_last = 0;
448 	}
449 
450 	for (i = 0; i < TEMAC_NRXDESC; i++) {
451 		if ((error = bus_dmamap_create(sc->sc_dmat,
452 		    MCLBYTES, TEMAC_NRXSEG, MCLBYTES, 0, 0,
453 		    &sc->sc_rxsoft[i].rxs_dmap)) != 0) {
454 		    	printf("%s: could not create Rx DMA map %d\n",
455 		    	    sc->sc_dev.dv_xname, i);
456 			goto fail_5;
457 		}
458 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
459 	}
460 
461 	/*
462 	 * Setup transfer interrupt handlers.
463 	 */
464 	error = ENOMEM;
465 
466 	sc->sc_rx_ih = ll_dmac_intr_establish(rx->dmac_chan,
467 	    temac_rx_intr, sc);
468 	if (sc->sc_rx_ih == NULL) {
469 		printf("%s: could not establish Rx interrupt\n",
470 		    device_xname(self));
471 		goto fail_5;
472 	}
473 
474 	sc->sc_tx_ih = ll_dmac_intr_establish(tx->dmac_chan,
475 	    temac_tx_intr, sc);
476 	if (sc->sc_tx_ih == NULL) {
477 		printf("%s: could not establish Tx interrupt\n",
478 		    device_xname(self));
479 		goto fail_6;
480 	}
481 
482 	/* XXXFreza: faked, should read unicast address filter. */
483 	enaddr[0] = 0x00;
484 	enaddr[1] = 0x11;
485 	enaddr[2] = 0x17;
486 	enaddr[3] = 0xff;
487 	enaddr[4] = 0xff;
488 	enaddr[5] = 0x01;
489 
490 	/*
491 	 * Initialize the TEMAC.
492 	 */
493 	temac_reset(sc);
494 
495 	/* Configure MDIO link. */
496 	gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
497 
498 	/* Initialize PHY. */
499 	mii->mii_ifp = ifp;
500 	mii->mii_readreg = temac_mii_readreg;
501 	mii->mii_writereg = temac_mii_writereg;
502 	mii->mii_statchg = temac_mii_statchg;
503 	ifmedia_init(&mii->mii_media, 0, temac_mediachange,
504 	    temac_mediastatus);
505 
506 	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
507 	    MII_OFFSET_ANY, 0);
508 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
509 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
510 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
511 	} else {
512 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
513 	}
514 
515 	/* Hold PHY in reset. */
516 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, TEMAC_RESET_PHY);
517 
518 	/* Reset EMAC. */
519 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
520 	    TEMAC_RESET_EMAC);
521 	delay(10000);
522 
523 	/* Reset peripheral, awakes PHY and EMAC. */
524 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
525 	    TEMAC_RESET_PERIPH);
526 	delay(40000);
527 
528 	/* (Re-)Configure MDIO link. */
529 	gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
530 
531 	/*
532 	 * Hook up with network stack.
533 	 */
534 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
535 	ifp->if_softc = sc;
536 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
537 	ifp->if_ioctl = temac_ioctl;
538 	ifp->if_start = temac_start;
539 	ifp->if_init = temac_init;
540 	ifp->if_stop = temac_stop;
541 	ifp->if_watchdog = NULL;
542 	IFQ_SET_READY(&ifp->if_snd);
543 	IFQ_SET_MAXLEN(&ifp->if_snd, TEMAC_TXQLEN);
544 
545 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
546 
547 	if_attach(ifp);
548 	ether_ifattach(ifp, enaddr);
549 
550 	sc->sc_sdhook = shutdownhook_establish(temac_shutdown, sc);
551 	if (sc->sc_sdhook == NULL)
552 		printf("%s: WARNING: unable to establish shutdown hook\n",
553 		    device_xname(self));
554 
555 	callout_setfunc(&sc->sc_mii_tick, temac_mii_tick, sc);
556 	callout_setfunc(&sc->sc_rx_timo, temac_rxtimo, sc);
557 
558 	return ;
559 
560  fail_6:
561 	ll_dmac_intr_disestablish(rx->dmac_chan, sc->sc_rx_ih);
562 	i = TEMAC_NRXDESC;
563  fail_5:
564  	for (--i; i >= 0; i--)
565  		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].rxs_dmap);
566 	i = TEMAC_TXQLEN;
567  fail_4:
568  	for (--i; i >= 0; i--)
569  		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].txs_dmap);
570  fail_3:
571 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_control_dmap);
572  fail_2:
573 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
574 	    sizeof(struct temac_control));
575  fail_1:
576 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
577  fail_0:
578  	printf("%s: error = %d\n", device_xname(self), error);
579 }
580 
581 /*
582  * Network device.
583  */
584 static int
585 temac_init(struct ifnet *ifp)
586 {
587 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
588 	uint32_t 		rcr, tcr;
589 	int 			i, error;
590 
591 	/* Reset DMA channels. */
592 	cdmac_tx_reset(sc);
593 	cdmac_rx_reset(sc);
594 
595 	/* Set current media. */
596 	mii_mediachg(&sc->sc_mii);
597 	callout_schedule(&sc->sc_mii_tick, hz);
598 
599 	/* Enable EMAC engine. */
600 	rcr = (gmi_read_4(TEMAC_GMI_RXCF1) | GMI_RX_ENABLE) &
601 	    ~(GMI_RX_JUMBO | GMI_RX_FCS);
602 	gmi_write_4(TEMAC_GMI_RXCF1, rcr);
603 
604 	tcr = (gmi_read_4(TEMAC_GMI_TXCF) | GMI_TX_ENABLE) &
605 	    ~(GMI_TX_JUMBO | GMI_TX_FCS);
606 	gmi_write_4(TEMAC_GMI_TXCF, tcr);
607 
608 	/* XXXFreza: Force promiscuous mode, for now. */
609 	gmi_write_4(TEMAC_GMI_AFM, GMI_AFM_PROMISC);
610 	ifp->if_flags |= IFF_PROMISC;
611 
612 	/* Rx/Tx queues are drained -- either from attach() or stop(). */
613 	sc->sc_txsfree = TEMAC_TXQLEN;
614 	sc->sc_txsreap = 0;
615 	sc->sc_txscur = 0;
616 
617 	sc->sc_txfree = TEMAC_NTXDESC;
618 	sc->sc_txreap = 0;
619 	sc->sc_txcur = 0;
620 
621 	sc->sc_rxreap = 0;
622 
623 	/* Allocate and map receive buffers. */
624 	if (sc->sc_rx_drained) {
625 		for (i = 0; i < TEMAC_NRXDESC; i++) {
626 			if ((error = temac_rxalloc(sc, i, 1)) != 0) {
627 				printf("%s: failed to allocate Rx "
628 				    "descriptor %d\n",
629 				    sc->sc_dev.dv_xname, i);
630 
631 				temac_rxdrain(sc);
632 				return (error);
633 			}
634 		}
635 		sc->sc_rx_drained = 0;
636 
637 		temac_rxcdsync(sc, 0, TEMAC_NRXDESC,
638 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
639 		cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
640 	}
641 
642 	ifp->if_flags |= IFF_RUNNING;
643 	ifp->if_flags &= ~IFF_OACTIVE;
644 
645 	return (0);
646 }
647 
648 static int
649 temac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
650 {
651 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
652 	struct ifreq 		*ifr = (struct ifreq *)data;
653 	int 			s, ret;
654 
655 	s = splnet();
656 	if (sc->sc_dead) {
657 		ret = EIO;
658 	} else
659 		switch (cmd) {
660 		case SIOCSIFMEDIA:
661 		case SIOCGIFMEDIA:
662 			ret = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media,
663 			    cmd);
664 			break;
665 
666 		default:
667 			ret = ether_ioctl(ifp, cmd, data);
668 			break;
669 		}
670 
671 	splx(s);
672 	return (ret);
673 }
674 
675 static void
676 temac_start(struct ifnet *ifp)
677 {
678 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
679 	struct temac_txsoft 	*txs;
680 	struct mbuf 		*m;
681 	bus_dmamap_t 		dmap;
682 	int 			error, head, nsegs, i;
683 
684 	nsegs = 0;
685 	head = sc->sc_txcur;
686 	txs = NULL; 		/* gcc */
687 
688 	if (sc->sc_dead)
689 		return;
690 
691 	KASSERT(sc->sc_txfree >= 0);
692 	KASSERT(sc->sc_txsfree >= 0);
693 
694 	/*
695 	 * Push mbufs into descriptor chain until we drain the interface
696 	 * queue or run out of descriptors. We'll mark the first segment
697 	 * as "done" in hope that we might put CDMAC interrupt above IPL_NET
698 	 * and have it start jobs & mark packets for GC preemtively for
699 	 * us -- creativity due to limitations in CDMAC transfer engine
700 	 * (it really consumes lists, not circular queues, AFAICS).
701 	 *
702 	 * We schedule one interrupt per Tx batch.
703 	 */
704 	while (1) {
705 		IFQ_POLL(&ifp->if_snd, m);
706 		if (m == NULL)
707 			break;
708 
709 		if (sc->sc_txsfree == 0) {
710 			ifp->if_flags |= IFF_OACTIVE;
711 			break;
712 		}
713 
714 		txs = &sc->sc_txsoft[sc->sc_txscur];
715 		dmap = txs->txs_dmap;
716 
717 		if (txs->txs_mbuf != NULL)
718 			printf("FOO\n");
719 		if (txs->txs_last)
720 			printf("BAR\n");
721 
722 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
723 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
724 		    	if (error == EFBIG) {
725 		    		printf("%s: Tx consumes too many segments, "
726 		    		    "dropped\n", sc->sc_dev.dv_xname);
727 				IFQ_DEQUEUE(&ifp->if_snd, m);
728 				m_freem(m);
729 				continue;
730 		    	} else {
731 		    		printf("%s: Tx stall due to resource "
732 		    		    "shortage\n", sc->sc_dev.dv_xname);
733 		    		break;
734 			}
735 		}
736 
737 		/*
738 		 * If we're short on DMA descriptors, notify upper layers
739 		 * and leave this packet for later.
740 		 */
741 		if (dmap->dm_nsegs > sc->sc_txfree) {
742 			bus_dmamap_unload(sc->sc_dmat, dmap);
743 			ifp->if_flags |= IFF_OACTIVE;
744 			break;
745 		}
746 
747 		IFQ_DEQUEUE(&ifp->if_snd, m);
748 
749 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
750 		    BUS_DMASYNC_PREWRITE);
751 		txs->txs_mbuf = m;
752 
753 		/*
754 		 * Map the packet into descriptor chain. XXX We'll want
755 		 * to fill checksum offload commands here.
756 		 *
757 		 * We would be in a race if we weren't blocking CDMAC intr
758 		 * at this point -- we need to be locked against txreap()
759 		 * because of dmasync ops.
760 		 */
761 
762 		temac_txcdsync(sc, sc->sc_txcur, dmap->dm_nsegs,
763 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
764 
765 		for (i = 0; i < dmap->dm_nsegs; i++) {
766 			sc->sc_txdescs[sc->sc_txcur].desc_addr =
767 			    dmap->dm_segs[i].ds_addr;
768 			sc->sc_txdescs[sc->sc_txcur].desc_size =
769 			    dmap->dm_segs[i].ds_len;
770 			sc->sc_txdescs[sc->sc_txcur].desc_stat =
771 			    (i == 0 			? CDMAC_STAT_SOP : 0) |
772 			    (i == (dmap->dm_nsegs - 1) 	? CDMAC_STAT_EOP : 0);
773 
774 			sc->sc_txcur = TEMAC_TXNEXT(sc->sc_txcur);
775 		}
776 
777 		sc->sc_txfree -= dmap->dm_nsegs;
778 		nsegs += dmap->dm_nsegs;
779 
780 		sc->sc_txscur = TEMAC_TXSNEXT(sc->sc_txscur);
781 		sc->sc_txsfree--;
782 	}
783 
784 	/* Get data running if we queued any. */
785 	if (nsegs > 0) {
786 		int 		tail = TEMAC_TXINC(sc->sc_txcur, -1);
787 
788 		/* Mark the last packet in this job. */
789 		txs->txs_last = 1;
790 
791 		/* Mark the last descriptor in this job. */
792 		sc->sc_txdescs[tail].desc_stat |= CDMAC_STAT_STOP |
793 		    CDMAC_STAT_INTR;
794 		temac_txcdsync(sc, head, nsegs,
795 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
796 
797 		temac_txkick(sc);
798 #if TEMAC_TXDEBUG > 0
799 		printf("%s: start:  txcur  %03d -> %03d, nseg %03d\n",
800 		    sc->sc_dev.dv_xname, head, sc->sc_txcur, nsegs);
801 #endif
802 	}
803 }
804 
805 static void
806 temac_stop(struct ifnet *ifp, int disable)
807 {
808 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
809 	struct temac_txsoft 	*txs;
810 	int 			i;
811 
812 #if TEMAC_DEBUG > 0
813 	printf("%s: stop\n", device_xname(&sc->sc_dev));
814 #endif
815 
816 	/* Down the MII. */
817 	callout_stop(&sc->sc_mii_tick);
818 	mii_down(&sc->sc_mii);
819 
820 	/* Stop the engine. */
821 	temac_reset(sc);
822 
823 	/* Drain buffers queues (unconditionally). */
824 	temac_rxdrain(sc);
825 
826 	for (i = 0; i < TEMAC_TXQLEN; i++) {
827 		txs = &sc->sc_txsoft[i];
828 
829 		if (txs->txs_mbuf != NULL) {
830 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
831 			m_freem(txs->txs_mbuf);
832 			txs->txs_mbuf = NULL;
833 			txs->txs_last = 0;
834 		}
835 	}
836 	sc->sc_txbusy = 0;
837 
838 	/* Acknowledge we're down. */
839 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
840 }
841 
842 /*
843  * Media management.
844  */
845 static int
846 temac_mediachange(struct ifnet *ifp)
847 {
848 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
849 
850 	if (ifp->if_flags & IFF_UP)
851 		mii_mediachg(&sc->sc_mii);
852 	return (0);
853 }
854 
855 static void
856 temac_mediastatus(struct ifnet *ifp, struct ifmediareq *imr)
857 {
858 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
859 
860 	mii_pollstat(&sc->sc_mii);
861 
862 	imr->ifm_status = sc->sc_mii.mii_media_status;
863 	imr->ifm_active = sc->sc_mii.mii_media_active;
864 }
865 
866 static int
867 temac_mii_readreg(struct device *self, int phy, int reg)
868 {
869 	mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
870 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR);
871 	hif_wait_stat(HIF_STAT_MIIRR);
872 
873 	return (int)mfidcr(IDCR_HIF_ARG0);
874 }
875 
876 static void
877 temac_mii_writereg(struct device *self, int phy, int reg, int val)
878 {
879 	mtidcr(IDCR_HIF_ARG0, val);
880 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_WRVAL | HIF_CTRL_WRITE);
881 	mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
882 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR | HIF_CTRL_WRITE);
883 	hif_wait_stat(HIF_STAT_MIIWR);
884 }
885 
886 static void
887 temac_mii_statchg(struct device *self)
888 {
889 	struct temac_softc 	*sc = (struct temac_softc *)self;
890 	uint32_t 		rcf, tcf, mmc;
891 
892 	/* Full/half duplex link. */
893 	rcf = gmi_read_4(TEMAC_GMI_RXCF1);
894 	tcf = gmi_read_4(TEMAC_GMI_TXCF);
895 
896 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
897 		gmi_write_4(TEMAC_GMI_RXCF1, rcf & ~GMI_RX_HDX);
898 		gmi_write_4(TEMAC_GMI_TXCF, tcf & ~GMI_TX_HDX);
899 	} else {
900 		gmi_write_4(TEMAC_GMI_RXCF1, rcf | GMI_RX_HDX);
901 		gmi_write_4(TEMAC_GMI_TXCF, tcf | GMI_TX_HDX);
902 	}
903 
904 	/* Link speed. */
905 	mmc = gmi_read_4(TEMAC_GMI_MMC) & ~GMI_MMC_SPEED_MASK;
906 
907 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
908 	case IFM_10_T:
909 		/*
910 		 * XXXFreza: the GMAC is not happy with 10Mbit ethernet,
911 		 * although the documentation claims it's supported. Maybe
912 		 * it's just my equipment...
913 		 */
914 		mmc |= GMI_MMC_SPEED_10;
915 		break;
916 	case IFM_100_TX:
917 		mmc |= GMI_MMC_SPEED_100;
918 		break;
919 	case IFM_1000_T:
920 		mmc |= GMI_MMC_SPEED_1000;
921 		break;
922 	}
923 
924 	gmi_write_4(TEMAC_GMI_MMC, mmc);
925 }
926 
927 static void
928 temac_mii_tick(void *arg)
929 {
930 	struct temac_softc 	*sc = (struct temac_softc *)arg;
931 	int 			s;
932 
933 	if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
934 		return ;
935 
936 	s = splnet();
937 	mii_tick(&sc->sc_mii);
938 	splx(s);
939 
940 	callout_schedule(&sc->sc_mii_tick, hz);
941 }
942 
943 /*
944  * External hooks.
945  */
946 static void
947 temac_shutdown(void *arg)
948 {
949 	struct temac_softc 	*sc = (struct temac_softc *)arg;
950 
951 	temac_reset(sc);
952 }
953 
954 static void
955 temac_tx_intr(void *arg)
956 {
957 	struct temac_softc 	*sc = (struct temac_softc *)arg;
958 	uint32_t 		stat;
959 
960 	/* XXX: We may need to splnet() here if cdmac(4) changes. */
961 
962 	if ((stat = cdmac_tx_stat(sc)) & CDMAC_STAT_ERROR) {
963 		printf("%s: transmit DMA is toast (%#08x), halted!\n",
964 		    sc->sc_dev.dv_xname, stat);
965 
966 		/* XXXFreza: how to signal this upstream? */
967 		temac_stop(&sc->sc_if, 1);
968 		sc->sc_dead = 1;
969 	}
970 
971 #if TEMAC_DEBUG > 0
972 	printf("%s: tx intr 0x%08x\n", device_xname(&sc->sc_dev), stat);
973 #endif
974 	temac_txreap(sc);
975 }
976 
977 static void
978 temac_rx_intr(void *arg)
979 {
980 	struct temac_softc 	*sc = (struct temac_softc *)arg;
981 	uint32_t 		stat;
982 
983 	/* XXX: We may need to splnet() here if cdmac(4) changes. */
984 
985 	if ((stat = cdmac_rx_stat(sc)) & CDMAC_STAT_ERROR) {
986 		printf("%s: receive DMA is toast (%#08x), halted!\n",
987 		    sc->sc_dev.dv_xname, stat);
988 
989 		/* XXXFreza: how to signal this upstream? */
990 		temac_stop(&sc->sc_if, 1);
991 		sc->sc_dead = 1;
992 	}
993 
994 #if TEMAC_DEBUG > 0
995 	printf("%s: rx intr 0x%08x\n", device_xname(&sc->sc_dev), stat);
996 #endif
997 	temac_rxreap(sc);
998 }
999 
1000 /*
1001  * Utils.
1002  */
1003 static inline void
1004 temac_txcdsync(struct temac_softc *sc, int first, int cnt, int flag)
1005 {
1006 	if ((first + cnt) > TEMAC_NTXDESC) {
1007 		bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
1008 		    TEMAC_TXDOFF(first),
1009 		    sizeof(struct cdmac_descr) * (TEMAC_NTXDESC - first),
1010 		    flag);
1011 		cnt = (first + cnt) % TEMAC_NTXDESC;
1012 		first = 0;
1013 	}
1014 
1015 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
1016 	    TEMAC_TXDOFF(first),
1017 	    sizeof(struct cdmac_descr) * cnt,
1018 	    flag);
1019 }
1020 
1021 static inline void
1022 temac_rxcdsync(struct temac_softc *sc, int first, int cnt, int flag)
1023 {
1024 	if ((first + cnt) > TEMAC_NRXDESC) {
1025 		bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
1026 		    TEMAC_RXDOFF(first),
1027 		    sizeof(struct cdmac_descr) * (TEMAC_NRXDESC - first),
1028 		    flag);
1029 		cnt = (first + cnt) % TEMAC_NRXDESC;
1030 		first = 0;
1031 	}
1032 
1033 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
1034 	    TEMAC_RXDOFF(first),
1035 	    sizeof(struct cdmac_descr) * cnt,
1036 	    flag);
1037 }
1038 
1039 static void
1040 temac_txreap(struct temac_softc *sc)
1041 {
1042 	struct temac_txsoft 	*txs;
1043 	bus_dmamap_t 		dmap;
1044 	int 			sent = 0;
1045 
1046 	/*
1047 	 * Transmit interrupts happen on the last descriptor of Tx jobs.
1048 	 * Hence, every time we're called (and we assume txintr is our
1049 	 * only caller!), we reap packets upto and including the one
1050 	 * marked as last-in-batch.
1051 	 *
1052 	 * XXX we rely on that we make EXACTLY one batch per intr, no more
1053 	 */
1054 	while (sc->sc_txsfree != TEMAC_TXQLEN) {
1055 		txs = &sc->sc_txsoft[sc->sc_txsreap];
1056 		dmap = txs->txs_dmap;
1057 
1058 		sc->sc_txreap = TEMAC_TXINC(sc->sc_txreap, dmap->dm_nsegs);
1059 		sc->sc_txfree += dmap->dm_nsegs;
1060 
1061 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
1062 		m_freem(txs->txs_mbuf);
1063 		txs->txs_mbuf = NULL;
1064 
1065 		sc->sc_if.if_opackets++;
1066 		sent = 1;
1067 
1068 		sc->sc_txsreap = TEMAC_TXSNEXT(sc->sc_txsreap);
1069 		sc->sc_txsfree++;
1070 
1071 		if (txs->txs_last) {
1072 			txs->txs_last = 0;
1073 			sc->sc_txbusy = 0; 	/* channel stopped now */
1074 
1075 			temac_txkick(sc);
1076 			break;
1077 		}
1078 	}
1079 
1080 	if (sent && (sc->sc_if.if_flags & IFF_OACTIVE))
1081 		sc->sc_if.if_flags &= ~IFF_OACTIVE;
1082 }
1083 
1084 static int
1085 temac_rxalloc(struct temac_softc *sc, int which, int verbose)
1086 {
1087 	struct temac_rxsoft 	*rxs;
1088 	struct mbuf 		*m;
1089 	uint32_t 		stat;
1090 	int 			error;
1091 
1092 	rxs = &sc->sc_rxsoft[which];
1093 
1094 	/* The mbuf itself is not our problem, just clear DMA related stuff. */
1095 	if (rxs->rxs_mbuf != NULL) {
1096 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
1097 		rxs->rxs_mbuf = NULL;
1098 	}
1099 
1100 	/*
1101 	 * We would like to store mbuf and dmap in application specific
1102 	 * fields of the descriptor, but that doesn't work for Rx. Shame
1103 	 * on Xilinx for this (and for the useless timer architecture).
1104 	 *
1105 	 * Hence each descriptor needs its own soft state. We may want
1106 	 * to merge multiple rxs's into a monster mbuf when we support
1107 	 * jumbo frames though. Also, we use single set of indexing
1108 	 * variables for both sc_rxdescs[] and sc_rxsoft[].
1109 	 */
1110 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1111 	if (m == NULL) {
1112 		if (verbose)
1113 			printf("%s: out of Rx header mbufs\n",
1114 			    sc->sc_dev.dv_xname);
1115 		return (ENOBUFS);
1116 	}
1117 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
1118 
1119 	MCLGET(m, M_DONTWAIT);
1120 	if ((m->m_flags & M_EXT) == 0) {
1121 		if (verbose)
1122 			printf("%s: out of Rx cluster mbufs\n",
1123 			    sc->sc_dev.dv_xname);
1124 		m_freem(m);
1125 		return (ENOBUFS);
1126 	}
1127 
1128 	rxs->rxs_mbuf = m;
1129 	m->m_pkthdr.len = m->m_len = MCLBYTES;
1130 
1131 	/* Make sure the payload after ethernet header is 4-aligned. */
1132 	m_adj(m, 2);
1133 
1134 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmap, m,
1135 	    BUS_DMA_NOWAIT);
1136 	if (error) {
1137 		if (verbose)
1138 			printf("%s: could not map Rx descriptor %d, "
1139 			    "error = %d\n", sc->sc_dev.dv_xname, which, error);
1140 
1141 		rxs->rxs_mbuf = NULL;
1142 		m_freem(m);
1143 
1144 		return (error);
1145 	}
1146 
1147 	stat = \
1148 	    (TEMAC_ISINTR(which) ? CDMAC_STAT_INTR : 0) |
1149 	    (TEMAC_ISLAST(which) ? CDMAC_STAT_STOP : 0);
1150 
1151 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmap, 0,
1152 	    rxs->rxs_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
1153 
1154 	/* Descriptor post-sync, if needed, left to the caller. */
1155 
1156 	sc->sc_rxdescs[which].desc_addr = rxs->rxs_dmap->dm_segs[0].ds_addr;
1157 	sc->sc_rxdescs[which].desc_size  = rxs->rxs_dmap->dm_segs[0].ds_len;
1158 	sc->sc_rxdescs[which].desc_stat = stat;
1159 
1160 	/* Descriptor pre-sync, if needed, left to the caller. */
1161 
1162 	return (0);
1163 }
1164 
1165 static void
1166 temac_rxreap(struct temac_softc *sc)
1167 {
1168 	struct ifnet 		*ifp = &sc->sc_if;
1169 	uint32_t 		stat, rxstat, rxsize;
1170 	struct mbuf 		*m;
1171 	int 			nseg, head, tail;
1172 
1173 	head = sc->sc_rxreap;
1174 	tail = 0; 		/* gcc */
1175 	nseg = 0;
1176 
1177 	/*
1178 	 * Collect finished entries on the Rx list, kick DMA if we hit
1179 	 * the end. DMA will always stop on the last descriptor in chain,
1180 	 * so it will never hit a reap-in-progress descriptor.
1181 	 */
1182 	while (1) {
1183 		/* Maybe we previously failed to refresh this one? */
1184 		if (sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf == NULL) {
1185 			if (temac_rxalloc(sc, sc->sc_rxreap, 0) != 0)
1186 				break;
1187 
1188 			sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
1189 			continue;
1190 		}
1191 		temac_rxcdsync(sc, sc->sc_rxreap, 1,
1192 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1193 
1194 		stat = sc->sc_rxdescs[sc->sc_rxreap].desc_stat;
1195 		m = NULL;
1196 
1197 		if ((stat & CDMAC_STAT_DONE) == 0)
1198 			break;
1199 
1200 		/* Count any decriptor we've collected, regardless of status. */
1201 		nseg ++;
1202 
1203 		/* XXXFreza: This won't work for jumbo frames. */
1204 
1205 		if ((stat & (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) !=
1206 		    (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) {
1207 		    	printf("%s: Rx packet doesn't fit in "
1208 		    	    "one descriptor, stat = %#08x\n",
1209 		    	    sc->sc_dev.dv_xname, stat);
1210 			goto badframe;
1211 		}
1212 
1213 		/* Dissect TEMAC footer if this is end of packet. */
1214 		rxstat = sc->sc_rxdescs[sc->sc_rxreap].desc_rxstat;
1215 		rxsize = sc->sc_rxdescs[sc->sc_rxreap].desc_rxsize &
1216 		    RXSIZE_MASK;
1217 
1218 		if ((rxstat & RXSTAT_GOOD) == 0 ||
1219 		    (rxstat & RXSTAT_SICK) != 0) {
1220 		    	printf("%s: corrupt Rx packet, rxstat = %#08x\n",
1221 		    	    sc->sc_dev.dv_xname, rxstat);
1222 			goto badframe;
1223 		}
1224 
1225 		/* We are now bound to succeed. */
1226 		bus_dmamap_sync(sc->sc_dmat,
1227 		    sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap, 0,
1228 		    sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap->dm_mapsize,
1229 		    BUS_DMASYNC_POSTREAD);
1230 
1231 		m = sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf;
1232 		m->m_pkthdr.rcvif = ifp;
1233 		m->m_pkthdr.len = m->m_len = rxsize;
1234 
1235  badframe:
1236  		/* Get ready for more work. */
1237 		tail = sc->sc_rxreap;
1238 		sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
1239 
1240  		/* On failures we reuse the descriptor and go ahead. */
1241  		if (m == NULL) {
1242 			sc->sc_rxdescs[tail].desc_stat =
1243 			    (TEMAC_ISINTR(tail) ? CDMAC_STAT_INTR : 0) |
1244 			    (TEMAC_ISLAST(tail) ? CDMAC_STAT_STOP : 0);
1245 
1246 			ifp->if_ierrors++;
1247 			continue;
1248  		}
1249 
1250 #if NBPFILTER > 0
1251 		if (ifp->if_bpf != NULL)
1252 			bpf_mtap(ifp->if_bpf, m);
1253 #endif
1254 
1255 		ifp->if_ipackets++;
1256 		(ifp->if_input)(ifp, m);
1257 
1258 		/* Refresh descriptor, bail out if we're out of buffers. */
1259 		if (temac_rxalloc(sc, tail, 1) != 0) {
1260  			sc->sc_rxreap = TEMAC_RXINC(sc->sc_rxreap, -1);
1261  			printf("%s: Rx give up for now\n", sc->sc_dev.dv_xname);
1262 			break;
1263 		}
1264 	}
1265 
1266 	/* We may now have a contiguous ready-to-go chunk of descriptors. */
1267 	if (nseg > 0) {
1268 #if TEMAC_RXDEBUG > 0
1269 		printf("%s: rxreap: rxreap %03d -> %03d, nseg %03d\n",
1270 		    sc->sc_dev.dv_xname, head, sc->sc_rxreap, nseg);
1271 #endif
1272 		temac_rxcdsync(sc, head, nseg,
1273 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1274 
1275 		if (TEMAC_ISLAST(tail))
1276 			cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
1277 	}
1278 
1279 	/* Ensure maximum Rx latency is kept under control. */
1280 	callout_schedule(&sc->sc_rx_timo, hz / TEMAC_RXTIMO_HZ);
1281 }
1282 
1283 static void
1284 temac_rxtimo(void *arg)
1285 {
1286 	struct temac_softc 	*sc = (struct temac_softc *)arg;
1287 	int 			s;
1288 
1289 	/* We run TEMAC_RXTIMO_HZ times/sec to ensure Rx doesn't stall. */
1290 	s = splnet();
1291 	temac_rxreap(sc);
1292 	splx(s);
1293 }
1294 
1295 static void
1296 temac_reset(struct temac_softc *sc)
1297 {
1298 	uint32_t 		rcr, tcr;
1299 
1300 	/* Kill CDMAC channels. */
1301 	cdmac_tx_reset(sc);
1302 	cdmac_rx_reset(sc);
1303 
1304 	/* Disable receiver. */
1305 	rcr = gmi_read_4(TEMAC_GMI_RXCF1) & ~GMI_RX_ENABLE;
1306 	gmi_write_4(TEMAC_GMI_RXCF1, rcr);
1307 
1308 	/* Disable transmitter. */
1309 	tcr = gmi_read_4(TEMAC_GMI_TXCF) & ~GMI_TX_ENABLE;
1310 	gmi_write_4(TEMAC_GMI_TXCF, tcr);
1311 }
1312 
1313 static void
1314 temac_rxdrain(struct temac_softc *sc)
1315 {
1316 	struct temac_rxsoft 	*rxs;
1317 	int 			i;
1318 
1319 	for (i = 0; i < TEMAC_NRXDESC; i++) {
1320 		rxs = &sc->sc_rxsoft[i];
1321 
1322 		if (rxs->rxs_mbuf != NULL) {
1323 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
1324 			m_freem(rxs->rxs_mbuf);
1325 			rxs->rxs_mbuf = NULL;
1326 		}
1327 	}
1328 
1329 	sc->sc_rx_drained = 1;
1330 }
1331 
1332 static void
1333 temac_txkick(struct temac_softc *sc)
1334 {
1335 	if (sc->sc_txsoft[sc->sc_txsreap].txs_mbuf != NULL &&
1336 	    sc->sc_txbusy == 0) {
1337 		cdmac_tx_start(sc, sc->sc_cdaddr + TEMAC_TXDOFF(sc->sc_txreap));
1338 		sc->sc_txbusy = 1;
1339 	}
1340 }
1341