xref: /netbsd-src/sys/arch/next68k/dev/if_xe.c (revision 08c81a9c2dc8c7300e893321eb65c0925d60871c)
1 /*	$NetBSD: if_xe.c,v 1.7 2002/09/11 01:46:31 mycroft Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "opt_inet.h"
33 #include "bpfilter.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/syslog.h>
39 #include <sys/socket.h>
40 #include <sys/device.h>
41 
42 #include <net/if.h>
43 #include <net/if_ether.h>
44 #include <net/if_media.h>
45 
46 #ifdef INET
47 #include <netinet/in.h>
48 #include <netinet/if_inarp.h>
49 #endif
50 
51 #include <machine/autoconf.h>
52 #include <machine/cpu.h>
53 #include <machine/intr.h>
54 #include <machine/bus.h>
55 
56 #include <next68k/next68k/isr.h>
57 
58 #include <next68k/dev/mb8795reg.h>
59 #include <next68k/dev/mb8795var.h>
60 
61 #include <next68k/dev/bmapreg.h>
62 #include <next68k/dev/intiovar.h>
63 #include <next68k/dev/nextdmareg.h>
64 #include <next68k/dev/nextdmavar.h>
65 
66 #include <next68k/dev/if_xevar.h>
67 #include <next68k/dev/if_xereg.h>
68 
69 #ifdef DEBUG
70 #define XE_DEBUG
71 #endif
72 
73 #ifdef XE_DEBUG
74 int xe_debug = 0;
75 #define DPRINTF(x) if (xe_debug) printf x;
76 extern char *ndtracep;
77 extern char ndtrace[];
78 extern int ndtraceshow;
79 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
80 #else
81 #define DPRINTF(x)
82 #define NDTRACEIF(x)
83 #endif
84 #define PRINTF(x) printf x;
85 
86 extern int turbo;
87 
88 int	xe_match __P((struct device *, struct cfdata *, void *));
89 void	xe_attach __P((struct device *, struct device *, void *));
90 int	xe_tint __P((void *));
91 int	xe_rint __P((void *));
92 
93 struct mbuf * xe_dma_rxmap_load __P((struct mb8795_softc *,
94 		bus_dmamap_t map));
95 
96 bus_dmamap_t xe_dma_rx_continue __P((void *));
97 void xe_dma_rx_completed __P((bus_dmamap_t,void *));
98 bus_dmamap_t xe_dma_tx_continue __P((void *));
99 void xe_dma_tx_completed __P((bus_dmamap_t,void *));
100 void xe_dma_rx_shutdown __P((void *));
101 void xe_dma_tx_shutdown __P((void *));
102 
103 static void	findchannel_defer __P((struct device *));
104 
105 struct cfattach xe_ca = {
106 	sizeof(struct xe_softc), xe_match, xe_attach
107 };
108 
109 static int xe_dma_medias[] = {
110 	IFM_ETHER|IFM_AUTO,
111 	IFM_ETHER|IFM_10_T,
112 	IFM_ETHER|IFM_10_2,
113 };
114 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
115 
116 static int attached = 0;
117 
118 /*
119  * Functions and the switch for the MI code.
120  */
121 u_char		xe_read_reg __P((struct mb8795_softc *, int));
122 void		xe_write_reg __P((struct mb8795_softc *, int, u_char));
123 void		xe_dma_reset __P((struct mb8795_softc *));
124 void		xe_dma_rx_setup __P((struct mb8795_softc *));
125 void		xe_dma_rx_go __P((struct mb8795_softc *));
126 struct mbuf *	xe_dma_rx_mbuf __P((struct mb8795_softc *));
127 void		xe_dma_tx_setup __P((struct mb8795_softc *));
128 void		xe_dma_tx_go __P((struct mb8795_softc *));
129 int		xe_dma_tx_mbuf __P((struct mb8795_softc *, struct mbuf *));
130 int		xe_dma_tx_isactive __P((struct mb8795_softc *));
131 #if 0
132 int	xe_dma_setup __P((struct mb8795_softc *, caddr_t *,
133 	    size_t *, int, size_t *));
134 void	xe_dma_go __P((struct mb8795_softc *));
135 void	xe_dma_stop __P((struct mb8795_softc *));
136 int	xe_dma_isactive __P((struct mb8795_softc *));
137 #endif
138 
139 struct mb8795_glue xe_glue = {
140 	xe_read_reg,
141 	xe_write_reg,
142 	xe_dma_reset,
143 	xe_dma_rx_setup,
144 	xe_dma_rx_go,
145 	xe_dma_rx_mbuf,
146 	xe_dma_tx_setup,
147 	xe_dma_tx_go,
148 	xe_dma_tx_mbuf,
149 	xe_dma_tx_isactive,
150 #if 0
151 	xe_dma_setup,
152 	xe_dma_go,
153 	xe_dma_stop,
154 	xe_dma_isactive,
155 	NULL,			/* gl_clear_latched_intr */
156 #endif
157 };
158 
159 int
160 xe_match(parent, match, aux)
161 	struct device *parent;
162 	struct cfdata *match;
163 	void *aux;
164 {
165 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
166 
167 	if (attached)
168 		return (0);
169 
170 	ia->ia_addr = (void *)NEXT_P_ENET;
171 
172 	return (1);
173 }
174 
175 static void
176 findchannel_defer(self)
177 	struct device *self;
178 {
179 	struct xe_softc *xsc = (struct xe_softc *)self;
180 	struct mb8795_softc *sc = &xsc->sc_mb8795;
181 	int i, error;
182 
183 	if (!xsc->sc_txdma) {
184 		xsc->sc_txdma = nextdma_findchannel ("enetx");
185 		if (xsc->sc_txdma == NULL)
186 			panic ("%s: can't find enetx dma channel",
187 			       sc->sc_dev.dv_xname);
188 	}
189 	if (!xsc->sc_rxdma) {
190 		xsc->sc_rxdma = nextdma_findchannel ("enetr");
191 		if (xsc->sc_rxdma == NULL)
192 			panic ("%s: can't find enetr dma channel",
193 			       sc->sc_dev.dv_xname);
194 	}
195 	printf ("%s: using dma channels %s %s\n", sc->sc_dev.dv_xname,
196 		xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
197 
198 	nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
199 	nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
200 	nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
201 	nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
202 
203 	nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
204 	nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
205 	nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
206 	nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
207 
208 	/* Initialize the dma maps */
209 	error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
210 				  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
211 				  &xsc->sc_tx_dmamap);
212 	if (error) {
213 		panic("%s: can't create tx DMA map, error = %d\n",
214 		      sc->sc_dev.dv_xname, error);
215 	}
216 
217 	for(i = 0; i < MB8795_NRXBUFS; i++) {
218 		error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
219 					  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
220 					  &xsc->sc_rx_dmamap[i]);
221 		if (error) {
222 			panic("%s: can't create rx DMA map, error = %d\n",
223 			      sc->sc_dev.dv_xname, error);
224 		}
225 		xsc->sc_rx_mb_head[i] = NULL;
226 	}
227 	xsc->sc_rx_loaded_idx = 0;
228 	xsc->sc_rx_completed_idx = 0;
229 	xsc->sc_rx_handled_idx = 0;
230 
231 	/* @@@ more next hacks
232 	 * the  2000 covers at least a 1500 mtu + headers
233 	 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
234 	 */
235 	xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
236 	if (!xsc->sc_txbuf)
237 		panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
238 
239 	xsc->sc_tx_mb_head = NULL;
240 	xsc->sc_tx_loaded = 0;
241 
242 	mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
243 
244 	isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
245 	INTR_ENABLE(NEXT_I_ENETX);
246 	isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
247 	INTR_ENABLE(NEXT_I_ENETR);
248 }
249 
250 void
251 xe_attach(parent, self, aux)
252 	struct device *parent, *self;
253 	void *aux;
254 {
255 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
256 	struct xe_softc *xsc = (struct xe_softc *)self;
257 	struct mb8795_softc *sc = &xsc->sc_mb8795;
258 
259 	DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
260 
261 	{
262 		extern u_char rom_enetaddr[6];     /* kludge from machdep.c:next68k_bootargs() */
263 		int i;
264 		for(i=0;i<6;i++) {
265 			sc->sc_enaddr[i] = rom_enetaddr[i];
266 		}
267 	}
268 
269 	printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
270 	       sc->sc_dev.dv_xname,
271 	       sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
272 	       sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
273 
274 	xsc->sc_bst = ia->ia_bst;
275 	if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
276 			  XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
277 		panic("\n%s: can't map mb8795 registers\n",
278 		      sc->sc_dev.dv_xname);
279 	}
280 
281 	sc->sc_bmap_bst = ia->ia_bst;
282 	if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
283 			  BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
284 		panic("\n%s: can't map bmap registers\n",
285 		      sc->sc_dev.dv_xname);
286 	}
287 
288 	/*
289 	 * Set up glue for MI code.
290 	 */
291 	sc->sc_glue = &xe_glue;
292 
293 	xsc->sc_txdma = nextdma_findchannel ("enetx");
294 	xsc->sc_rxdma = nextdma_findchannel ("enetr");
295 	if (xsc->sc_rxdma && xsc->sc_txdma) {
296 		findchannel_defer (self);
297 	} else {
298 		config_defer (self, findchannel_defer);
299 	}
300 
301 	attached = 1;
302 }
303 
304 int
305 xe_tint(arg)
306 	void *arg;
307 {
308 	if (!INTR_OCCURRED(NEXT_I_ENETX))
309 		return 0;
310 	mb8795_tint((struct mb8795_softc *)arg);
311 	return(1);
312 }
313 
314 int
315 xe_rint(arg)
316 	void *arg;
317 {
318 	if (!INTR_OCCURRED(NEXT_I_ENETR))
319 		return(0);
320 	mb8795_rint((struct mb8795_softc *)arg);
321 	return(1);
322 }
323 
324 /*
325  * Glue functions.
326  */
327 
328 u_char
329 xe_read_reg(sc, reg)
330 	struct mb8795_softc *sc;
331 	int reg;
332 {
333 	struct xe_softc *xsc = (struct xe_softc *)sc;
334 
335 	return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
336 }
337 
338 void
339 xe_write_reg(sc, reg, val)
340 	struct mb8795_softc *sc;
341 	int reg;
342 	u_char val;
343 {
344 	struct xe_softc *xsc = (struct xe_softc *)sc;
345 
346 	bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
347 }
348 
349 void
350 xe_dma_reset(sc)
351 	struct mb8795_softc *sc;
352 {
353 	struct xe_softc *xsc = (struct xe_softc *)sc;
354 	int i;
355 
356 	DPRINTF(("xe dma reset\n"));
357 
358 	nextdma_reset(xsc->sc_rxdma);
359 	nextdma_reset(xsc->sc_txdma);
360 
361 	if (xsc->sc_tx_loaded) {
362 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
363 				0, xsc->sc_tx_dmamap->dm_mapsize,
364 				BUS_DMASYNC_POSTWRITE);
365 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
366 		xsc->sc_tx_loaded = 0;
367 	}
368 	if (xsc->sc_tx_mb_head) {
369 		m_freem(xsc->sc_tx_mb_head);
370 		xsc->sc_tx_mb_head = NULL;
371 	}
372 
373 	for(i = 0; i < MB8795_NRXBUFS; i++) {
374 		if (xsc->sc_rx_mb_head[i]) {
375 			bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
376 			m_freem(xsc->sc_rx_mb_head[i]);
377 			xsc->sc_rx_mb_head[i] = NULL;
378 		}
379 	}
380 }
381 
382 void
383 xe_dma_rx_setup (sc)
384 	struct mb8795_softc *sc;
385 {
386 	struct xe_softc *xsc = (struct xe_softc *)sc;
387 	int i;
388 
389 	DPRINTF(("xe dma rx setup\n"));
390 
391 	for(i = 0; i < MB8795_NRXBUFS; i++) {
392 		xsc->sc_rx_mb_head[i] =
393 			xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
394 	}
395 	xsc->sc_rx_loaded_idx = 0;
396 	xsc->sc_rx_completed_idx = 0;
397 	xsc->sc_rx_handled_idx = 0;
398 
399 	nextdma_init(xsc->sc_rxdma);
400 }
401 
402 void
403 xe_dma_rx_go (sc)
404 	struct mb8795_softc *sc;
405 {
406 	struct xe_softc *xsc = (struct xe_softc *)sc;
407 
408 	DPRINTF(("xe dma rx go\n"));
409 
410 	nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
411 }
412 
413 struct mbuf *
414 xe_dma_rx_mbuf (sc)
415 	struct mb8795_softc *sc;
416 {
417 	struct xe_softc *xsc = (struct xe_softc *)sc;
418 	bus_dmamap_t map;
419 	struct mbuf *m;
420 
421 	m = NULL;
422 	if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
423 		xsc->sc_rx_handled_idx++;
424 		xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
425 
426 		map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
427 		m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
428 
429 		m->m_len = map->dm_xfer_len;
430 
431 		bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
432 				0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
433 
434 		bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
435 
436 		/* Install a fresh mbuf for next packet */
437 
438 		xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
439 			xe_dma_rxmap_load(sc,map);
440 
441 		/* Punt runt packets
442 		 * dma restarts create 0 length packets for example
443 		 */
444 		if (m->m_len < ETHER_MIN_LEN) {
445 			m_freem(m);
446 			m = NULL;
447 		}
448 	}
449 	return (m);
450 }
451 
452 void
453 xe_dma_tx_setup (sc)
454 	struct mb8795_softc *sc;
455 {
456 	struct xe_softc *xsc = (struct xe_softc *)sc;
457 
458 	DPRINTF(("xe dma tx setup\n"));
459 
460 	nextdma_init(xsc->sc_txdma);
461 }
462 
463 void
464 xe_dma_tx_go (sc)
465 	struct mb8795_softc *sc;
466 {
467 	struct xe_softc *xsc = (struct xe_softc *)sc;
468 
469 	DPRINTF(("xe dma tx go\n"));
470 
471 	nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
472 }
473 
474 int
475 xe_dma_tx_mbuf (sc, m)
476 	struct mb8795_softc *sc;
477 	struct mbuf *m;
478 {
479 	struct xe_softc *xsc = (struct xe_softc *)sc;
480 	int error;
481 
482 	xsc->sc_tx_mb_head = m;
483 
484 /* The following is a next specific hack that should
485  * probably be moved out of MI code.
486  * This macro assumes it can move forward as needed
487  * in the buffer.  Perhaps it should zero the extra buffer.
488  */
489 #define REALIGN_DMABUF(s,l) \
490 	{ (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
491 			&~(DMA_BEGINALIGNMENT-1))); \
492     (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
493 				&~(DMA_ENDALIGNMENT-1)))-(s);}
494 
495 #if 0
496 	error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
497 				     xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
498 #else
499 	{
500 		u_char *buf = xsc->sc_txbuf;
501 		int buflen = 0;
502 
503 		buflen = m->m_pkthdr.len;
504 
505 		/* Fix runt packets,  @@@ memory overrun */
506 		if (buflen < ETHERMIN+sizeof(struct ether_header)) {
507 			buflen = ETHERMIN+sizeof(struct ether_header);
508 		}
509 
510 		{
511 			u_char *p = buf;
512 			for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
513 				if (m->m_len == 0) continue;
514 				bcopy(mtod(m, u_char *), p, m->m_len);
515 				p += m->m_len;
516 			}
517 		}
518 
519 		error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
520 					buf,buflen,NULL,BUS_DMA_NOWAIT);
521 	}
522 #endif
523 	if (error) {
524 		printf("%s: can't load mbuf chain, error = %d\n",
525 		       sc->sc_dev.dv_xname, error);
526 		m_freem(xsc->sc_tx_mb_head);
527 		xsc->sc_tx_mb_head = NULL;
528 		return (error);
529 	}
530 
531 #ifdef DIAGNOSTIC
532 	if (xsc->sc_tx_loaded != 0) {
533 		panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
534 		      xsc->sc_tx_loaded);
535 	}
536 #endif
537 
538 	bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
539 			xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
540 
541 	return (0);
542 }
543 
544 int
545 xe_dma_tx_isactive (sc)
546 	struct mb8795_softc *sc;
547 {
548 	struct xe_softc *xsc = (struct xe_softc *)sc;
549 
550 	return (xsc->sc_tx_loaded != 0);
551 }
552 
553 /****************************************************************/
554 
555 void
556 xe_dma_tx_completed(map, arg)
557 	bus_dmamap_t map;
558 	void *arg;
559 {
560 	struct mb8795_softc *sc = arg;
561 	struct xe_softc *xsc = (struct xe_softc *)sc;
562 
563 	DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
564 
565 #ifdef DIAGNOSTIC
566 	if (!xsc->sc_tx_loaded) {
567 		panic("%s: tx completed never loaded ",sc->sc_dev.dv_xname);
568 	}
569 	if (map != xsc->sc_tx_dmamap) {
570 		panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
571 	}
572 
573 #endif
574 }
575 
576 void
577 xe_dma_tx_shutdown(arg)
578 	void *arg;
579 {
580 	struct mb8795_softc *sc = arg;
581 	struct xe_softc *xsc = (struct xe_softc *)sc;
582 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
583 
584 	DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
585 
586 #ifdef DIAGNOSTIC
587 	if (!xsc->sc_tx_loaded) {
588 		panic("%s: tx shutdown never loaded ",sc->sc_dev.dv_xname);
589 	}
590 #endif
591 
592 	if (turbo)
593 		MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
594 	if (xsc->sc_tx_loaded) {
595 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
596 				0, xsc->sc_tx_dmamap->dm_mapsize,
597 				BUS_DMASYNC_POSTWRITE);
598 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
599 		m_freem(xsc->sc_tx_mb_head);
600 		xsc->sc_tx_mb_head = NULL;
601 
602 		xsc->sc_tx_loaded--;
603 	}
604 
605 #ifdef DIAGNOSTIC
606 	if (xsc->sc_tx_loaded != 0) {
607 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
608 		      xsc->sc_tx_loaded);
609 	}
610 #endif
611 
612 	ifp->if_timer = 0;
613 
614 #if 1
615 	if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
616 		void mb8795_start_dma __P((struct mb8795_softc *)); /* XXXX */
617 		mb8795_start_dma(sc);
618 	}
619 #endif
620 
621 #if 0
622 	/* Enable ready interrupt */
623 	MB_WRITE_REG(sc, MB8795_TXMASK,
624 		     MB_READ_REG(sc, MB8795_TXMASK)
625 		     | MB8795_TXMASK_TXRXIE/* READYIE */);
626 #endif
627 }
628 
629 
630 void
631 xe_dma_rx_completed(map, arg)
632 	bus_dmamap_t map;
633 	void *arg;
634 {
635 	struct mb8795_softc *sc = arg;
636 	struct xe_softc *xsc = (struct xe_softc *)sc;
637 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
638 
639 	if (ifp->if_flags & IFF_RUNNING) {
640 		xsc->sc_rx_completed_idx++;
641 		xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
642 
643 		DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
644 			 sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
645 
646 #if (defined(DIAGNOSTIC))
647 		if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
648 			panic("%s: Unexpected rx dmamap completed\n",
649 			      sc->sc_dev.dv_xname);
650 		}
651 #endif
652 	}
653 #ifdef DIAGNOSTIC
654 	else
655 		DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
656 			 sc->sc_dev.dv_xname));
657 #endif
658 }
659 
660 void
661 xe_dma_rx_shutdown(arg)
662 	void *arg;
663 {
664 	struct mb8795_softc *sc = arg;
665 	struct xe_softc *xsc = (struct xe_softc *)sc;
666 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
667 
668 	if (ifp->if_flags & IFF_RUNNING) {
669 		DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
670 			 sc->sc_dev.dv_xname));
671 
672 		nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
673 		if (turbo)
674 			MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
675 	}
676 #ifdef DIAGNOSTIC
677 	else
678 		DPRINTF(("%s: Unexpected rx dma shutdown while if not running\n",
679 			 sc->sc_dev.dv_xname));
680 #endif
681 }
682 
683 /*
684  * load a dmamap with a freshly allocated mbuf
685  */
686 struct mbuf *
687 xe_dma_rxmap_load(sc,map)
688 	struct mb8795_softc *sc;
689 	bus_dmamap_t map;
690 {
691 	struct xe_softc *xsc = (struct xe_softc *)sc;
692 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
693 	struct mbuf *m;
694 	int error;
695 
696 	MGETHDR(m, M_DONTWAIT, MT_DATA);
697 	if (m) {
698 		MCLGET(m, M_DONTWAIT);
699 		if ((m->m_flags & M_EXT) == 0) {
700 			m_freem(m);
701 			m = NULL;
702 		} else {
703 			m->m_len = MCLBYTES;
704 		}
705 	}
706 	if (!m) {
707 		/* @@@ Handle this gracefully by reusing a scratch buffer
708 		 * or something.
709 		 */
710 		panic("Unable to get memory for incoming ethernet\n");
711 	}
712 
713 	/* Align buffer, @@@ next specific.
714 	 * perhaps should be using M_ALIGN here instead?
715 	 * First we give us a little room to align with.
716 	 */
717 	{
718 		u_char *buf = m->m_data;
719 		int buflen = m->m_len;
720 		buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
721 		REALIGN_DMABUF(buf, buflen);
722 		m->m_data = buf;
723 		m->m_len = buflen;
724 	}
725 
726 	m->m_pkthdr.rcvif = ifp;
727 	m->m_pkthdr.len = m->m_len;
728 
729 	error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
730 			map, m, BUS_DMA_NOWAIT);
731 
732 	bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
733 			map->dm_mapsize, BUS_DMASYNC_PREREAD);
734 
735 	if (error) {
736 		DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
737 				m->m_data, m->m_len));
738 		DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
739 				MCLBYTES, map->_dm_size));
740 
741 		panic("%s: can't load rx mbuf chain, error = %d\n",
742 				sc->sc_dev.dv_xname, error);
743 		m_freem(m);
744 		m = NULL;
745 	}
746 
747 	return(m);
748 }
749 
750 bus_dmamap_t
751 xe_dma_rx_continue(arg)
752 	void *arg;
753 {
754 	struct mb8795_softc *sc = arg;
755 	struct xe_softc *xsc = (struct xe_softc *)sc;
756 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
757 	bus_dmamap_t map = NULL;
758 
759 	if (ifp->if_flags & IFF_RUNNING) {
760 		if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
761 			/* make space for one packet by dropping one */
762 			struct mbuf *m;
763 			m = xe_dma_rx_mbuf (sc);
764 			if (m)
765 				m_freem(m);
766 #if (defined(DIAGNOSTIC))
767 			DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
768 #endif
769 		}
770 		xsc->sc_rx_loaded_idx++;
771 		xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
772 		map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
773 
774 		DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
775 			 sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
776 	}
777 #ifdef DIAGNOSTIC
778 	else
779 		panic("%s: Unexpected rx dma continue while if not running\n",
780 		      sc->sc_dev.dv_xname);
781 #endif
782 
783 	return(map);
784 }
785 
786 bus_dmamap_t
787 xe_dma_tx_continue(arg)
788 	void *arg;
789 {
790 	struct mb8795_softc *sc = arg;
791 	struct xe_softc *xsc = (struct xe_softc *)sc;
792 	bus_dmamap_t map;
793 
794 	DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
795 
796 	if (xsc->sc_tx_loaded) {
797 		map = NULL;
798 	} else {
799 		map = xsc->sc_tx_dmamap;
800 		xsc->sc_tx_loaded++;
801 	}
802 
803 #ifdef DIAGNOSTIC
804 	if (xsc->sc_tx_loaded != 1) {
805 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
806 				xsc->sc_tx_loaded);
807 	}
808 #endif
809 
810 	return(map);
811 }
812