xref: /netbsd-src/sys/arch/next68k/dev/if_xe.c (revision d48f14661dda8638fee055ba15d35bdfb29b9fa8)
1 /*	$NetBSD: if_xe.c,v 1.17 2005/12/11 12:18:25 christos Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.17 2005/12/11 12:18:25 christos Exp $");
34 
35 #include "opt_inet.h"
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/device.h>
44 
45 #include <net/if.h>
46 #include <net/if_ether.h>
47 #include <net/if_media.h>
48 
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/if_inarp.h>
52 #endif
53 
54 #include <machine/autoconf.h>
55 #include <machine/cpu.h>
56 #include <machine/intr.h>
57 #include <machine/bus.h>
58 
59 #include <next68k/next68k/isr.h>
60 
61 #include <next68k/dev/mb8795reg.h>
62 #include <next68k/dev/mb8795var.h>
63 
64 #include <next68k/dev/bmapreg.h>
65 #include <next68k/dev/intiovar.h>
66 #include <next68k/dev/nextdmareg.h>
67 #include <next68k/dev/nextdmavar.h>
68 
69 #include <next68k/dev/if_xevar.h>
70 #include <next68k/dev/if_xereg.h>
71 
72 #ifdef DEBUG
73 #define XE_DEBUG
74 #endif
75 
76 #ifdef XE_DEBUG
77 int xe_debug = 0;
78 #define DPRINTF(x) if (xe_debug) printf x;
79 extern char *ndtracep;
80 extern char ndtrace[];
81 extern int ndtraceshow;
82 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
83 #else
84 #define DPRINTF(x)
85 #define NDTRACEIF(x)
86 #endif
87 #define PRINTF(x) printf x;
88 
89 extern int turbo;
90 
91 int	xe_match(struct device *, struct cfdata *, void *);
92 void	xe_attach(struct device *, struct device *, void *);
93 int	xe_tint(void *);
94 int	xe_rint(void *);
95 
96 struct mbuf * xe_dma_rxmap_load(struct mb8795_softc *, bus_dmamap_t);
97 
98 bus_dmamap_t xe_dma_rx_continue(void *);
99 void xe_dma_rx_completed(bus_dmamap_t, void *);
100 bus_dmamap_t xe_dma_tx_continue(void *);
101 void xe_dma_tx_completed(bus_dmamap_t, void *);
102 void xe_dma_rx_shutdown(void *);
103 void xe_dma_tx_shutdown(void *);
104 
105 static void	findchannel_defer(struct device *);
106 
107 CFATTACH_DECL(xe, sizeof(struct xe_softc),
108     xe_match, xe_attach, NULL, NULL);
109 
110 static int xe_dma_medias[] = {
111 	IFM_ETHER|IFM_AUTO,
112 	IFM_ETHER|IFM_10_T,
113 	IFM_ETHER|IFM_10_2,
114 };
115 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
116 
117 static int attached = 0;
118 
119 /*
120  * Functions and the switch for the MI code.
121  */
122 u_char		xe_read_reg(struct mb8795_softc *, int);
123 void		xe_write_reg(struct mb8795_softc *, int, u_char);
124 void		xe_dma_reset(struct mb8795_softc *);
125 void		xe_dma_rx_setup(struct mb8795_softc *);
126 void		xe_dma_rx_go(struct mb8795_softc *);
127 struct mbuf *	xe_dma_rx_mbuf(struct mb8795_softc *);
128 void		xe_dma_tx_setup(struct mb8795_softc *);
129 void		xe_dma_tx_go(struct mb8795_softc *);
130 int		xe_dma_tx_mbuf(struct mb8795_softc *, struct mbuf *);
131 int		xe_dma_tx_isactive(struct mb8795_softc *);
132 
133 struct mb8795_glue xe_glue = {
134 	xe_read_reg,
135 	xe_write_reg,
136 	xe_dma_reset,
137 	xe_dma_rx_setup,
138 	xe_dma_rx_go,
139 	xe_dma_rx_mbuf,
140 	xe_dma_tx_setup,
141 	xe_dma_tx_go,
142 	xe_dma_tx_mbuf,
143 	xe_dma_tx_isactive,
144 };
145 
146 int
147 xe_match(struct device *parent, struct cfdata *match, void *aux)
148 {
149 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
150 
151 	if (attached)
152 		return (0);
153 
154 	ia->ia_addr = (void *)NEXT_P_ENET;
155 
156 	return (1);
157 }
158 
159 static void
160 findchannel_defer(struct device *self)
161 {
162 	struct xe_softc *xsc = (struct xe_softc *)self;
163 	struct mb8795_softc *sc = &xsc->sc_mb8795;
164 	int i, error;
165 
166 	if (!xsc->sc_txdma) {
167 		xsc->sc_txdma = nextdma_findchannel ("enetx");
168 		if (xsc->sc_txdma == NULL)
169 			panic ("%s: can't find enetx DMA channel",
170 			       sc->sc_dev.dv_xname);
171 	}
172 	if (!xsc->sc_rxdma) {
173 		xsc->sc_rxdma = nextdma_findchannel ("enetr");
174 		if (xsc->sc_rxdma == NULL)
175 			panic ("%s: can't find enetr DMA channel",
176 			       sc->sc_dev.dv_xname);
177 	}
178 	printf ("%s: using DMA channels %s %s\n", sc->sc_dev.dv_xname,
179 		xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
180 
181 	nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
182 	nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
183 	nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
184 	nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
185 
186 	nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
187 	nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
188 	nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
189 	nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
190 
191 	/* Initialize the DMA maps */
192 	error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
193 				  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
194 				  &xsc->sc_tx_dmamap);
195 	if (error) {
196 		panic("%s: can't create tx DMA map, error = %d",
197 		      sc->sc_dev.dv_xname, error);
198 	}
199 
200 	for(i = 0; i < MB8795_NRXBUFS; i++) {
201 		error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
202 					  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
203 					  &xsc->sc_rx_dmamap[i]);
204 		if (error) {
205 			panic("%s: can't create rx DMA map, error = %d",
206 			      sc->sc_dev.dv_xname, error);
207 		}
208 		xsc->sc_rx_mb_head[i] = NULL;
209 	}
210 	xsc->sc_rx_loaded_idx = 0;
211 	xsc->sc_rx_completed_idx = 0;
212 	xsc->sc_rx_handled_idx = 0;
213 
214 	/* @@@ more next hacks
215 	 * the  2000 covers at least a 1500 mtu + headers
216 	 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
217 	 */
218 	xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
219 	if (!xsc->sc_txbuf)
220 		panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
221 
222 	xsc->sc_tx_mb_head = NULL;
223 	xsc->sc_tx_loaded = 0;
224 
225 	mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
226 
227 	isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
228 	INTR_ENABLE(NEXT_I_ENETX);
229 	isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
230 	INTR_ENABLE(NEXT_I_ENETR);
231 }
232 
233 void
234 xe_attach(struct device *parent, struct device *self, void *aux)
235 {
236 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
237 	struct xe_softc *xsc = (struct xe_softc *)self;
238 	struct mb8795_softc *sc = &xsc->sc_mb8795;
239 
240 	DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
241 
242 	{
243 		extern u_char rom_enetaddr[6];     /* kludge from machdep.c:next68k_bootargs() */
244 		int i;
245 		for(i=0;i<6;i++) {
246 			sc->sc_enaddr[i] = rom_enetaddr[i];
247 		}
248 	}
249 
250 	printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
251 	       sc->sc_dev.dv_xname,
252 	       sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
253 	       sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
254 
255 	xsc->sc_bst = ia->ia_bst;
256 	if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
257 			  XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
258 		panic("\n%s: can't map mb8795 registers",
259 		      sc->sc_dev.dv_xname);
260 	}
261 
262 	sc->sc_bmap_bst = ia->ia_bst;
263 	if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
264 			  BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
265 		panic("\n%s: can't map bmap registers",
266 		      sc->sc_dev.dv_xname);
267 	}
268 
269 	/*
270 	 * Set up glue for MI code.
271 	 */
272 	sc->sc_glue = &xe_glue;
273 
274 	xsc->sc_txdma = nextdma_findchannel ("enetx");
275 	xsc->sc_rxdma = nextdma_findchannel ("enetr");
276 	if (xsc->sc_rxdma && xsc->sc_txdma) {
277 		findchannel_defer (self);
278 	} else {
279 		config_defer (self, findchannel_defer);
280 	}
281 
282 	attached = 1;
283 }
284 
285 int
286 xe_tint(void *arg)
287 {
288 	if (!INTR_OCCURRED(NEXT_I_ENETX))
289 		return 0;
290 	mb8795_tint((struct mb8795_softc *)arg);
291 	return(1);
292 }
293 
294 int
295 xe_rint(void *arg)
296 {
297 	if (!INTR_OCCURRED(NEXT_I_ENETR))
298 		return(0);
299 	mb8795_rint((struct mb8795_softc *)arg);
300 	return(1);
301 }
302 
303 /*
304  * Glue functions.
305  */
306 
307 u_char
308 xe_read_reg(struct mb8795_softc *sc, int reg)
309 {
310 	struct xe_softc *xsc = (struct xe_softc *)sc;
311 
312 	return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
313 }
314 
315 void
316 xe_write_reg(struct mb8795_softc *sc, int reg, u_char val)
317 {
318 	struct xe_softc *xsc = (struct xe_softc *)sc;
319 
320 	bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
321 }
322 
323 void
324 xe_dma_reset(struct mb8795_softc *sc)
325 {
326 	struct xe_softc *xsc = (struct xe_softc *)sc;
327 	int i;
328 
329 	DPRINTF(("xe DMA reset\n"));
330 
331 	nextdma_reset(xsc->sc_rxdma);
332 	nextdma_reset(xsc->sc_txdma);
333 
334 	if (xsc->sc_tx_loaded) {
335 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
336 				0, xsc->sc_tx_dmamap->dm_mapsize,
337 				BUS_DMASYNC_POSTWRITE);
338 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
339 		xsc->sc_tx_loaded = 0;
340 	}
341 	if (xsc->sc_tx_mb_head) {
342 		m_freem(xsc->sc_tx_mb_head);
343 		xsc->sc_tx_mb_head = NULL;
344 	}
345 
346 	for(i = 0; i < MB8795_NRXBUFS; i++) {
347 		if (xsc->sc_rx_mb_head[i]) {
348 			bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
349 			m_freem(xsc->sc_rx_mb_head[i]);
350 			xsc->sc_rx_mb_head[i] = NULL;
351 		}
352 	}
353 }
354 
355 void
356 xe_dma_rx_setup(struct mb8795_softc *sc)
357 {
358 	struct xe_softc *xsc = (struct xe_softc *)sc;
359 	int i;
360 
361 	DPRINTF(("xe DMA rx setup\n"));
362 
363 	for(i = 0; i < MB8795_NRXBUFS; i++) {
364 		xsc->sc_rx_mb_head[i] =
365 			xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
366 	}
367 	xsc->sc_rx_loaded_idx = 0;
368 	xsc->sc_rx_completed_idx = 0;
369 	xsc->sc_rx_handled_idx = 0;
370 
371 	nextdma_init(xsc->sc_rxdma);
372 }
373 
374 void
375 xe_dma_rx_go(struct mb8795_softc *sc)
376 {
377 	struct xe_softc *xsc = (struct xe_softc *)sc;
378 
379 	DPRINTF(("xe DMA rx go\n"));
380 
381 	nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
382 }
383 
384 struct mbuf *
385 xe_dma_rx_mbuf(struct mb8795_softc *sc)
386 {
387 	struct xe_softc *xsc = (struct xe_softc *)sc;
388 	bus_dmamap_t map;
389 	struct mbuf *m;
390 
391 	m = NULL;
392 	if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
393 		xsc->sc_rx_handled_idx++;
394 		xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
395 
396 		map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
397 		m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
398 
399 		m->m_len = map->dm_xfer_len;
400 
401 		bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
402 				0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
403 
404 		bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
405 
406 		/* Install a fresh mbuf for next packet */
407 
408 		xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
409 			xe_dma_rxmap_load(sc,map);
410 
411 		/* Punt runt packets
412 		 * DMA restarts create 0 length packets for example
413 		 */
414 		if (m->m_len < ETHER_MIN_LEN) {
415 			m_freem(m);
416 			m = NULL;
417 		}
418 	}
419 	return (m);
420 }
421 
422 void
423 xe_dma_tx_setup(struct mb8795_softc *sc)
424 {
425 	struct xe_softc *xsc = (struct xe_softc *)sc;
426 
427 	DPRINTF(("xe DMA tx setup\n"));
428 
429 	nextdma_init(xsc->sc_txdma);
430 }
431 
432 void
433 xe_dma_tx_go(struct mb8795_softc *sc)
434 {
435 	struct xe_softc *xsc = (struct xe_softc *)sc;
436 
437 	DPRINTF(("xe DMA tx go\n"));
438 
439 	nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
440 }
441 
442 int
443 xe_dma_tx_mbuf(struct mb8795_softc *sc, struct mbuf *m)
444 {
445 	struct xe_softc *xsc = (struct xe_softc *)sc;
446 	int error;
447 
448 	xsc->sc_tx_mb_head = m;
449 
450 /* The following is a next specific hack that should
451  * probably be moved out of MI code.
452  * This macro assumes it can move forward as needed
453  * in the buffer.  Perhaps it should zero the extra buffer.
454  */
455 #define REALIGN_DMABUF(s,l) \
456 	{ (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
457 			&~(DMA_BEGINALIGNMENT-1))); \
458     (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
459 				&~(DMA_ENDALIGNMENT-1)))-(s);}
460 
461 #if 0
462 	error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
463 				     xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
464 #else
465 	{
466 		u_char *buf = xsc->sc_txbuf;
467 		int buflen = 0;
468 
469 		buflen = m->m_pkthdr.len;
470 
471 		{
472 			u_char *p = buf;
473 			for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
474 				if (m->m_len == 0) continue;
475 				bcopy(mtod(m, u_char *), p, m->m_len);
476 				p += m->m_len;
477 			}
478 			/* Fix runt packets */
479 			if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
480 				memset(p, 0,
481 				    ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
482 				buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
483 			}
484 		}
485 
486 		error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
487 					buf,buflen,NULL,BUS_DMA_NOWAIT);
488 	}
489 #endif
490 	if (error) {
491 		printf("%s: can't load mbuf chain, error = %d\n",
492 		       sc->sc_dev.dv_xname, error);
493 		m_freem(xsc->sc_tx_mb_head);
494 		xsc->sc_tx_mb_head = NULL;
495 		return (error);
496 	}
497 
498 #ifdef DIAGNOSTIC
499 	if (xsc->sc_tx_loaded != 0) {
500 		panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
501 		      xsc->sc_tx_loaded);
502 	}
503 #endif
504 
505 	bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
506 			xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
507 
508 	return (0);
509 }
510 
511 int
512 xe_dma_tx_isactive(struct mb8795_softc *sc)
513 {
514 	struct xe_softc *xsc = (struct xe_softc *)sc;
515 
516 	return (xsc->sc_tx_loaded != 0);
517 }
518 
519 /****************************************************************/
520 
521 void
522 xe_dma_tx_completed(bus_dmamap_t map, void *arg)
523 {
524 #if defined (XE_DEBUG) || defined (DIAGNOSTIC)
525 	struct mb8795_softc *sc = arg;
526 #endif
527 #ifdef DIAGNOSTIC
528 	struct xe_softc *xsc = (struct xe_softc *)sc;
529 #endif
530 
531 	DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
532 
533 #ifdef DIAGNOSTIC
534 	if (!xsc->sc_tx_loaded) {
535 		panic("%s: tx completed never loaded",sc->sc_dev.dv_xname);
536 	}
537 	if (map != xsc->sc_tx_dmamap) {
538 		panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
539 	}
540 
541 #endif
542 }
543 
544 void
545 xe_dma_tx_shutdown(void *arg)
546 {
547 	struct mb8795_softc *sc = arg;
548 	struct xe_softc *xsc = (struct xe_softc *)sc;
549 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
550 
551 	DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
552 
553 #ifdef DIAGNOSTIC
554 	if (!xsc->sc_tx_loaded) {
555 		panic("%s: tx shutdown never loaded",sc->sc_dev.dv_xname);
556 	}
557 #endif
558 
559 	if (turbo)
560 		MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
561 	if (xsc->sc_tx_loaded) {
562 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
563 				0, xsc->sc_tx_dmamap->dm_mapsize,
564 				BUS_DMASYNC_POSTWRITE);
565 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
566 		m_freem(xsc->sc_tx_mb_head);
567 		xsc->sc_tx_mb_head = NULL;
568 
569 		xsc->sc_tx_loaded--;
570 	}
571 
572 #ifdef DIAGNOSTIC
573 	if (xsc->sc_tx_loaded != 0) {
574 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
575 		      xsc->sc_tx_loaded);
576 	}
577 #endif
578 
579 	ifp->if_timer = 0;
580 
581 #if 1
582 	if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
583 		void mb8795_start_dma(struct mb8795_softc *); /* XXXX */
584 		mb8795_start_dma(sc);
585 	}
586 #endif
587 
588 #if 0
589 	/* Enable ready interrupt */
590 	MB_WRITE_REG(sc, MB8795_TXMASK,
591 		     MB_READ_REG(sc, MB8795_TXMASK)
592 		     | MB8795_TXMASK_TXRXIE/* READYIE */);
593 #endif
594 }
595 
596 
597 void
598 xe_dma_rx_completed(bus_dmamap_t map, void *arg)
599 {
600 	struct mb8795_softc *sc = arg;
601 	struct xe_softc *xsc = (struct xe_softc *)sc;
602 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
603 
604 	if (ifp->if_flags & IFF_RUNNING) {
605 		xsc->sc_rx_completed_idx++;
606 		xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
607 
608 		DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
609 			 sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
610 
611 #if (defined(DIAGNOSTIC))
612 		if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
613 			panic("%s: Unexpected rx dmamap completed",
614 			      sc->sc_dev.dv_xname);
615 		}
616 #endif
617 	}
618 #ifdef DIAGNOSTIC
619 	else
620 		DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
621 			 sc->sc_dev.dv_xname));
622 #endif
623 }
624 
625 void
626 xe_dma_rx_shutdown(void *arg)
627 {
628 	struct mb8795_softc *sc = arg;
629 	struct xe_softc *xsc = (struct xe_softc *)sc;
630 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
631 
632 	if (ifp->if_flags & IFF_RUNNING) {
633 		DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
634 			 sc->sc_dev.dv_xname));
635 
636 		nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
637 		if (turbo)
638 			MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
639 	}
640 #ifdef DIAGNOSTIC
641 	else
642 		DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
643 			 sc->sc_dev.dv_xname));
644 #endif
645 }
646 
647 /*
648  * load a dmamap with a freshly allocated mbuf
649  */
650 struct mbuf *
651 xe_dma_rxmap_load(struct mb8795_softc *sc, bus_dmamap_t map)
652 {
653 	struct xe_softc *xsc = (struct xe_softc *)sc;
654 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
655 	struct mbuf *m;
656 	int error;
657 
658 	MGETHDR(m, M_DONTWAIT, MT_DATA);
659 	if (m) {
660 		MCLGET(m, M_DONTWAIT);
661 		if ((m->m_flags & M_EXT) == 0) {
662 			m_freem(m);
663 			m = NULL;
664 		} else {
665 			m->m_len = MCLBYTES;
666 		}
667 	}
668 	if (!m) {
669 		/* @@@ Handle this gracefully by reusing a scratch buffer
670 		 * or something.
671 		 */
672 		panic("Unable to get memory for incoming ethernet");
673 	}
674 
675 	/* Align buffer, @@@ next specific.
676 	 * perhaps should be using M_ALIGN here instead?
677 	 * First we give us a little room to align with.
678 	 */
679 	{
680 		u_char *buf = m->m_data;
681 		int buflen = m->m_len;
682 		buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
683 		REALIGN_DMABUF(buf, buflen);
684 		m->m_data = buf;
685 		m->m_len = buflen;
686 	}
687 
688 	m->m_pkthdr.rcvif = ifp;
689 	m->m_pkthdr.len = m->m_len;
690 
691 	error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
692 			map, m, BUS_DMA_NOWAIT);
693 
694 	bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
695 			map->dm_mapsize, BUS_DMASYNC_PREREAD);
696 
697 	if (error) {
698 		DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
699 				m->m_data, m->m_len));
700 		DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
701 				MCLBYTES, map->_dm_size));
702 
703 		panic("%s: can't load rx mbuf chain, error = %d",
704 				sc->sc_dev.dv_xname, error);
705 		m_freem(m);
706 		m = NULL;
707 	}
708 
709 	return(m);
710 }
711 
712 bus_dmamap_t
713 xe_dma_rx_continue(void *arg)
714 {
715 	struct mb8795_softc *sc = arg;
716 	struct xe_softc *xsc = (struct xe_softc *)sc;
717 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
718 	bus_dmamap_t map = NULL;
719 
720 	if (ifp->if_flags & IFF_RUNNING) {
721 		if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
722 			/* make space for one packet by dropping one */
723 			struct mbuf *m;
724 			m = xe_dma_rx_mbuf (sc);
725 			if (m)
726 				m_freem(m);
727 #if (defined(DIAGNOSTIC))
728 			DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
729 #endif
730 		}
731 		xsc->sc_rx_loaded_idx++;
732 		xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
733 		map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
734 
735 		DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
736 			 sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
737 	}
738 #ifdef DIAGNOSTIC
739 	else
740 		panic("%s: Unexpected rx DMA continue while if not running",
741 		      sc->sc_dev.dv_xname);
742 #endif
743 
744 	return(map);
745 }
746 
747 bus_dmamap_t
748 xe_dma_tx_continue(void *arg)
749 {
750 	struct mb8795_softc *sc = arg;
751 	struct xe_softc *xsc = (struct xe_softc *)sc;
752 	bus_dmamap_t map;
753 
754 	DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
755 
756 	if (xsc->sc_tx_loaded) {
757 		map = NULL;
758 	} else {
759 		map = xsc->sc_tx_dmamap;
760 		xsc->sc_tx_loaded++;
761 	}
762 
763 #ifdef DIAGNOSTIC
764 	if (xsc->sc_tx_loaded != 1) {
765 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
766 				xsc->sc_tx_loaded);
767 	}
768 #endif
769 
770 	return(map);
771 }
772