xref: /netbsd-src/sys/arch/next68k/dev/if_xe.c (revision 7f21db1c0118155e0dd40b75182e30c589d9f63e)
1 /*	$NetBSD: if_xe.c,v 1.20 2010/01/19 22:06:22 pooka Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.20 2010/01/19 22:06:22 pooka Exp $");
34 
35 #include "opt_inet.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/mbuf.h>
40 #include <sys/syslog.h>
41 #include <sys/socket.h>
42 #include <sys/device.h>
43 
44 #include <net/if.h>
45 #include <net/if_ether.h>
46 #include <net/if_media.h>
47 
48 #ifdef INET
49 #include <netinet/in.h>
50 #include <netinet/if_inarp.h>
51 #endif
52 
53 #include <machine/autoconf.h>
54 #include <machine/cpu.h>
55 #include <machine/intr.h>
56 #include <machine/bus.h>
57 
58 #include <next68k/next68k/isr.h>
59 
60 #include <next68k/dev/mb8795reg.h>
61 #include <next68k/dev/mb8795var.h>
62 
63 #include <next68k/dev/bmapreg.h>
64 #include <next68k/dev/intiovar.h>
65 #include <next68k/dev/nextdmareg.h>
66 #include <next68k/dev/nextdmavar.h>
67 
68 #include <next68k/dev/if_xevar.h>
69 #include <next68k/dev/if_xereg.h>
70 
71 #ifdef DEBUG
72 #define XE_DEBUG
73 #endif
74 
75 #ifdef XE_DEBUG
76 int xe_debug = 0;
77 #define DPRINTF(x) if (xe_debug) printf x;
78 extern char *ndtracep;
79 extern char ndtrace[];
80 extern int ndtraceshow;
81 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
82 #else
83 #define DPRINTF(x)
84 #define NDTRACEIF(x)
85 #endif
86 #define PRINTF(x) printf x;
87 
88 extern int turbo;
89 
90 int	xe_match(struct device *, struct cfdata *, void *);
91 void	xe_attach(struct device *, struct device *, void *);
92 int	xe_tint(void *);
93 int	xe_rint(void *);
94 
95 struct mbuf * xe_dma_rxmap_load(struct mb8795_softc *, bus_dmamap_t);
96 
97 bus_dmamap_t xe_dma_rx_continue(void *);
98 void xe_dma_rx_completed(bus_dmamap_t, void *);
99 bus_dmamap_t xe_dma_tx_continue(void *);
100 void xe_dma_tx_completed(bus_dmamap_t, void *);
101 void xe_dma_rx_shutdown(void *);
102 void xe_dma_tx_shutdown(void *);
103 
104 static void	findchannel_defer(struct device *);
105 
106 CFATTACH_DECL(xe, sizeof(struct xe_softc),
107     xe_match, xe_attach, NULL, NULL);
108 
109 static int xe_dma_medias[] = {
110 	IFM_ETHER|IFM_AUTO,
111 	IFM_ETHER|IFM_10_T,
112 	IFM_ETHER|IFM_10_2,
113 };
114 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
115 
116 static int attached = 0;
117 
118 /*
119  * Functions and the switch for the MI code.
120  */
121 u_char		xe_read_reg(struct mb8795_softc *, int);
122 void		xe_write_reg(struct mb8795_softc *, int, u_char);
123 void		xe_dma_reset(struct mb8795_softc *);
124 void		xe_dma_rx_setup(struct mb8795_softc *);
125 void		xe_dma_rx_go(struct mb8795_softc *);
126 struct mbuf *	xe_dma_rx_mbuf(struct mb8795_softc *);
127 void		xe_dma_tx_setup(struct mb8795_softc *);
128 void		xe_dma_tx_go(struct mb8795_softc *);
129 int		xe_dma_tx_mbuf(struct mb8795_softc *, struct mbuf *);
130 int		xe_dma_tx_isactive(struct mb8795_softc *);
131 
132 struct mb8795_glue xe_glue = {
133 	xe_read_reg,
134 	xe_write_reg,
135 	xe_dma_reset,
136 	xe_dma_rx_setup,
137 	xe_dma_rx_go,
138 	xe_dma_rx_mbuf,
139 	xe_dma_tx_setup,
140 	xe_dma_tx_go,
141 	xe_dma_tx_mbuf,
142 	xe_dma_tx_isactive,
143 };
144 
145 int
146 xe_match(struct device *parent, struct cfdata *match, void *aux)
147 {
148 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
149 
150 	if (attached)
151 		return (0);
152 
153 	ia->ia_addr = (void *)NEXT_P_ENET;
154 
155 	return (1);
156 }
157 
158 static void
159 findchannel_defer(struct device *self)
160 {
161 	struct xe_softc *xsc = (struct xe_softc *)self;
162 	struct mb8795_softc *sc = &xsc->sc_mb8795;
163 	int i, error;
164 
165 	if (!xsc->sc_txdma) {
166 		xsc->sc_txdma = nextdma_findchannel ("enetx");
167 		if (xsc->sc_txdma == NULL)
168 			panic ("%s: can't find enetx DMA channel",
169 			       sc->sc_dev.dv_xname);
170 	}
171 	if (!xsc->sc_rxdma) {
172 		xsc->sc_rxdma = nextdma_findchannel ("enetr");
173 		if (xsc->sc_rxdma == NULL)
174 			panic ("%s: can't find enetr DMA channel",
175 			       sc->sc_dev.dv_xname);
176 	}
177 	printf ("%s: using DMA channels %s %s\n", sc->sc_dev.dv_xname,
178 		xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
179 
180 	nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
181 	nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
182 	nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
183 	nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
184 
185 	nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
186 	nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
187 	nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
188 	nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
189 
190 	/* Initialize the DMA maps */
191 	error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
192 				  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
193 				  &xsc->sc_tx_dmamap);
194 	if (error) {
195 		panic("%s: can't create tx DMA map, error = %d",
196 		      sc->sc_dev.dv_xname, error);
197 	}
198 
199 	for(i = 0; i < MB8795_NRXBUFS; i++) {
200 		error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
201 					  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
202 					  &xsc->sc_rx_dmamap[i]);
203 		if (error) {
204 			panic("%s: can't create rx DMA map, error = %d",
205 			      sc->sc_dev.dv_xname, error);
206 		}
207 		xsc->sc_rx_mb_head[i] = NULL;
208 	}
209 	xsc->sc_rx_loaded_idx = 0;
210 	xsc->sc_rx_completed_idx = 0;
211 	xsc->sc_rx_handled_idx = 0;
212 
213 	/* @@@ more next hacks
214 	 * the  2000 covers at least a 1500 mtu + headers
215 	 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
216 	 */
217 	xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
218 	if (!xsc->sc_txbuf)
219 		panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
220 
221 	xsc->sc_tx_mb_head = NULL;
222 	xsc->sc_tx_loaded = 0;
223 
224 	mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
225 
226 	isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
227 	INTR_ENABLE(NEXT_I_ENETX);
228 	isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
229 	INTR_ENABLE(NEXT_I_ENETR);
230 }
231 
232 void
233 xe_attach(struct device *parent, struct device *self, void *aux)
234 {
235 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
236 	struct xe_softc *xsc = (struct xe_softc *)self;
237 	struct mb8795_softc *sc = &xsc->sc_mb8795;
238 
239 	DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
240 
241 	{
242 		extern u_char rom_enetaddr[6];     /* kludge from machdep.c:next68k_bootargs() */
243 		int i;
244 		for(i=0;i<6;i++) {
245 			sc->sc_enaddr[i] = rom_enetaddr[i];
246 		}
247 	}
248 
249 	printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
250 	       sc->sc_dev.dv_xname,
251 	       sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
252 	       sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
253 
254 	xsc->sc_bst = ia->ia_bst;
255 	if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
256 			  XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
257 		panic("\n%s: can't map mb8795 registers",
258 		      sc->sc_dev.dv_xname);
259 	}
260 
261 	sc->sc_bmap_bst = ia->ia_bst;
262 	if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
263 			  BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
264 		panic("\n%s: can't map bmap registers",
265 		      sc->sc_dev.dv_xname);
266 	}
267 
268 	/*
269 	 * Set up glue for MI code.
270 	 */
271 	sc->sc_glue = &xe_glue;
272 
273 	xsc->sc_txdma = nextdma_findchannel ("enetx");
274 	xsc->sc_rxdma = nextdma_findchannel ("enetr");
275 	if (xsc->sc_rxdma && xsc->sc_txdma) {
276 		findchannel_defer (self);
277 	} else {
278 		config_defer (self, findchannel_defer);
279 	}
280 
281 	attached = 1;
282 }
283 
284 int
285 xe_tint(void *arg)
286 {
287 	if (!INTR_OCCURRED(NEXT_I_ENETX))
288 		return 0;
289 	mb8795_tint((struct mb8795_softc *)arg);
290 	return(1);
291 }
292 
293 int
294 xe_rint(void *arg)
295 {
296 	if (!INTR_OCCURRED(NEXT_I_ENETR))
297 		return(0);
298 	mb8795_rint((struct mb8795_softc *)arg);
299 	return(1);
300 }
301 
302 /*
303  * Glue functions.
304  */
305 
306 u_char
307 xe_read_reg(struct mb8795_softc *sc, int reg)
308 {
309 	struct xe_softc *xsc = (struct xe_softc *)sc;
310 
311 	return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
312 }
313 
314 void
315 xe_write_reg(struct mb8795_softc *sc, int reg, u_char val)
316 {
317 	struct xe_softc *xsc = (struct xe_softc *)sc;
318 
319 	bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
320 }
321 
322 void
323 xe_dma_reset(struct mb8795_softc *sc)
324 {
325 	struct xe_softc *xsc = (struct xe_softc *)sc;
326 	int i;
327 
328 	DPRINTF(("xe DMA reset\n"));
329 
330 	nextdma_reset(xsc->sc_rxdma);
331 	nextdma_reset(xsc->sc_txdma);
332 
333 	if (xsc->sc_tx_loaded) {
334 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
335 				0, xsc->sc_tx_dmamap->dm_mapsize,
336 				BUS_DMASYNC_POSTWRITE);
337 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
338 		xsc->sc_tx_loaded = 0;
339 	}
340 	if (xsc->sc_tx_mb_head) {
341 		m_freem(xsc->sc_tx_mb_head);
342 		xsc->sc_tx_mb_head = NULL;
343 	}
344 
345 	for(i = 0; i < MB8795_NRXBUFS; i++) {
346 		if (xsc->sc_rx_mb_head[i]) {
347 			bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
348 			m_freem(xsc->sc_rx_mb_head[i]);
349 			xsc->sc_rx_mb_head[i] = NULL;
350 		}
351 	}
352 }
353 
354 void
355 xe_dma_rx_setup(struct mb8795_softc *sc)
356 {
357 	struct xe_softc *xsc = (struct xe_softc *)sc;
358 	int i;
359 
360 	DPRINTF(("xe DMA rx setup\n"));
361 
362 	for(i = 0; i < MB8795_NRXBUFS; i++) {
363 		xsc->sc_rx_mb_head[i] =
364 			xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
365 	}
366 	xsc->sc_rx_loaded_idx = 0;
367 	xsc->sc_rx_completed_idx = 0;
368 	xsc->sc_rx_handled_idx = 0;
369 
370 	nextdma_init(xsc->sc_rxdma);
371 }
372 
373 void
374 xe_dma_rx_go(struct mb8795_softc *sc)
375 {
376 	struct xe_softc *xsc = (struct xe_softc *)sc;
377 
378 	DPRINTF(("xe DMA rx go\n"));
379 
380 	nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
381 }
382 
383 struct mbuf *
384 xe_dma_rx_mbuf(struct mb8795_softc *sc)
385 {
386 	struct xe_softc *xsc = (struct xe_softc *)sc;
387 	bus_dmamap_t map;
388 	struct mbuf *m;
389 
390 	m = NULL;
391 	if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
392 		xsc->sc_rx_handled_idx++;
393 		xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
394 
395 		map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
396 		m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
397 
398 		m->m_len = map->dm_xfer_len;
399 
400 		bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
401 				0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
402 
403 		bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
404 
405 		/* Install a fresh mbuf for next packet */
406 
407 		xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
408 			xe_dma_rxmap_load(sc,map);
409 
410 		/* Punt runt packets
411 		 * DMA restarts create 0 length packets for example
412 		 */
413 		if (m->m_len < ETHER_MIN_LEN) {
414 			m_freem(m);
415 			m = NULL;
416 		}
417 	}
418 	return (m);
419 }
420 
421 void
422 xe_dma_tx_setup(struct mb8795_softc *sc)
423 {
424 	struct xe_softc *xsc = (struct xe_softc *)sc;
425 
426 	DPRINTF(("xe DMA tx setup\n"));
427 
428 	nextdma_init(xsc->sc_txdma);
429 }
430 
431 void
432 xe_dma_tx_go(struct mb8795_softc *sc)
433 {
434 	struct xe_softc *xsc = (struct xe_softc *)sc;
435 
436 	DPRINTF(("xe DMA tx go\n"));
437 
438 	nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
439 }
440 
441 int
442 xe_dma_tx_mbuf(struct mb8795_softc *sc, struct mbuf *m)
443 {
444 	struct xe_softc *xsc = (struct xe_softc *)sc;
445 	int error;
446 
447 	xsc->sc_tx_mb_head = m;
448 
449 /* The following is a next specific hack that should
450  * probably be moved out of MI code.
451  * This macro assumes it can move forward as needed
452  * in the buffer.  Perhaps it should zero the extra buffer.
453  */
454 #define REALIGN_DMABUF(s,l) \
455 	{ (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
456 			&~(DMA_BEGINALIGNMENT-1))); \
457     (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
458 				&~(DMA_ENDALIGNMENT-1)))-(s);}
459 
460 #if 0
461 	error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
462 				     xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
463 #else
464 	{
465 		u_char *buf = xsc->sc_txbuf;
466 		int buflen = 0;
467 
468 		buflen = m->m_pkthdr.len;
469 
470 		{
471 			u_char *p = buf;
472 			for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
473 				if (m->m_len == 0) continue;
474 				memcpy(p, mtod(m, u_char *), m->m_len);
475 				p += m->m_len;
476 			}
477 			/* Fix runt packets */
478 			if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
479 				memset(p, 0,
480 				    ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
481 				buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
482 			}
483 		}
484 
485 		error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
486 					buf,buflen,NULL,BUS_DMA_NOWAIT);
487 	}
488 #endif
489 	if (error) {
490 		printf("%s: can't load mbuf chain, error = %d\n",
491 		       sc->sc_dev.dv_xname, error);
492 		m_freem(xsc->sc_tx_mb_head);
493 		xsc->sc_tx_mb_head = NULL;
494 		return (error);
495 	}
496 
497 #ifdef DIAGNOSTIC
498 	if (xsc->sc_tx_loaded != 0) {
499 		panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
500 		      xsc->sc_tx_loaded);
501 	}
502 #endif
503 
504 	bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
505 			xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
506 
507 	return (0);
508 }
509 
510 int
511 xe_dma_tx_isactive(struct mb8795_softc *sc)
512 {
513 	struct xe_softc *xsc = (struct xe_softc *)sc;
514 
515 	return (xsc->sc_tx_loaded != 0);
516 }
517 
518 /****************************************************************/
519 
520 void
521 xe_dma_tx_completed(bus_dmamap_t map, void *arg)
522 {
523 #if defined (XE_DEBUG) || defined (DIAGNOSTIC)
524 	struct mb8795_softc *sc = arg;
525 #endif
526 #ifdef DIAGNOSTIC
527 	struct xe_softc *xsc = (struct xe_softc *)sc;
528 #endif
529 
530 	DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
531 
532 #ifdef DIAGNOSTIC
533 	if (!xsc->sc_tx_loaded) {
534 		panic("%s: tx completed never loaded",sc->sc_dev.dv_xname);
535 	}
536 	if (map != xsc->sc_tx_dmamap) {
537 		panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
538 	}
539 
540 #endif
541 }
542 
543 void
544 xe_dma_tx_shutdown(void *arg)
545 {
546 	struct mb8795_softc *sc = arg;
547 	struct xe_softc *xsc = (struct xe_softc *)sc;
548 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
549 
550 	DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
551 
552 #ifdef DIAGNOSTIC
553 	if (!xsc->sc_tx_loaded) {
554 		panic("%s: tx shutdown never loaded",sc->sc_dev.dv_xname);
555 	}
556 #endif
557 
558 	if (turbo)
559 		MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
560 	if (xsc->sc_tx_loaded) {
561 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
562 				0, xsc->sc_tx_dmamap->dm_mapsize,
563 				BUS_DMASYNC_POSTWRITE);
564 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
565 		m_freem(xsc->sc_tx_mb_head);
566 		xsc->sc_tx_mb_head = NULL;
567 
568 		xsc->sc_tx_loaded--;
569 	}
570 
571 #ifdef DIAGNOSTIC
572 	if (xsc->sc_tx_loaded != 0) {
573 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
574 		      xsc->sc_tx_loaded);
575 	}
576 #endif
577 
578 	ifp->if_timer = 0;
579 
580 #if 1
581 	if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
582 		void mb8795_start_dma(struct mb8795_softc *); /* XXXX */
583 		mb8795_start_dma(sc);
584 	}
585 #endif
586 
587 #if 0
588 	/* Enable ready interrupt */
589 	MB_WRITE_REG(sc, MB8795_TXMASK,
590 		     MB_READ_REG(sc, MB8795_TXMASK)
591 		     | MB8795_TXMASK_TXRXIE/* READYIE */);
592 #endif
593 }
594 
595 
596 void
597 xe_dma_rx_completed(bus_dmamap_t map, void *arg)
598 {
599 	struct mb8795_softc *sc = arg;
600 	struct xe_softc *xsc = (struct xe_softc *)sc;
601 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
602 
603 	if (ifp->if_flags & IFF_RUNNING) {
604 		xsc->sc_rx_completed_idx++;
605 		xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
606 
607 		DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
608 			 sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
609 
610 #if (defined(DIAGNOSTIC))
611 		if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
612 			panic("%s: Unexpected rx dmamap completed",
613 			      sc->sc_dev.dv_xname);
614 		}
615 #endif
616 	}
617 #ifdef DIAGNOSTIC
618 	else
619 		DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
620 			 sc->sc_dev.dv_xname));
621 #endif
622 }
623 
624 void
625 xe_dma_rx_shutdown(void *arg)
626 {
627 	struct mb8795_softc *sc = arg;
628 	struct xe_softc *xsc = (struct xe_softc *)sc;
629 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
630 
631 	if (ifp->if_flags & IFF_RUNNING) {
632 		DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
633 			 sc->sc_dev.dv_xname));
634 
635 		nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
636 		if (turbo)
637 			MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
638 	}
639 #ifdef DIAGNOSTIC
640 	else
641 		DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
642 			 sc->sc_dev.dv_xname));
643 #endif
644 }
645 
646 /*
647  * load a dmamap with a freshly allocated mbuf
648  */
649 struct mbuf *
650 xe_dma_rxmap_load(struct mb8795_softc *sc, bus_dmamap_t map)
651 {
652 	struct xe_softc *xsc = (struct xe_softc *)sc;
653 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
654 	struct mbuf *m;
655 	int error;
656 
657 	MGETHDR(m, M_DONTWAIT, MT_DATA);
658 	if (m) {
659 		MCLGET(m, M_DONTWAIT);
660 		if ((m->m_flags & M_EXT) == 0) {
661 			m_freem(m);
662 			m = NULL;
663 		} else {
664 			m->m_len = MCLBYTES;
665 		}
666 	}
667 	if (!m) {
668 		/* @@@ Handle this gracefully by reusing a scratch buffer
669 		 * or something.
670 		 */
671 		panic("Unable to get memory for incoming ethernet");
672 	}
673 
674 	/* Align buffer, @@@ next specific.
675 	 * perhaps should be using M_ALIGN here instead?
676 	 * First we give us a little room to align with.
677 	 */
678 	{
679 		u_char *buf = m->m_data;
680 		int buflen = m->m_len;
681 		buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
682 		REALIGN_DMABUF(buf, buflen);
683 		m->m_data = buf;
684 		m->m_len = buflen;
685 	}
686 
687 	m->m_pkthdr.rcvif = ifp;
688 	m->m_pkthdr.len = m->m_len;
689 
690 	error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
691 			map, m, BUS_DMA_NOWAIT);
692 
693 	bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
694 			map->dm_mapsize, BUS_DMASYNC_PREREAD);
695 
696 	if (error) {
697 		DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
698 				m->m_data, m->m_len));
699 		DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
700 				MCLBYTES, map->_dm_size));
701 
702 		panic("%s: can't load rx mbuf chain, error = %d",
703 				sc->sc_dev.dv_xname, error);
704 		m_freem(m);
705 		m = NULL;
706 	}
707 
708 	return(m);
709 }
710 
711 bus_dmamap_t
712 xe_dma_rx_continue(void *arg)
713 {
714 	struct mb8795_softc *sc = arg;
715 	struct xe_softc *xsc = (struct xe_softc *)sc;
716 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
717 	bus_dmamap_t map = NULL;
718 
719 	if (ifp->if_flags & IFF_RUNNING) {
720 		if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
721 			/* make space for one packet by dropping one */
722 			struct mbuf *m;
723 			m = xe_dma_rx_mbuf (sc);
724 			if (m)
725 				m_freem(m);
726 #if (defined(DIAGNOSTIC))
727 			DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
728 #endif
729 		}
730 		xsc->sc_rx_loaded_idx++;
731 		xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
732 		map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
733 
734 		DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
735 			 sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
736 	}
737 #ifdef DIAGNOSTIC
738 	else
739 		panic("%s: Unexpected rx DMA continue while if not running",
740 		      sc->sc_dev.dv_xname);
741 #endif
742 
743 	return(map);
744 }
745 
746 bus_dmamap_t
747 xe_dma_tx_continue(void *arg)
748 {
749 	struct mb8795_softc *sc = arg;
750 	struct xe_softc *xsc = (struct xe_softc *)sc;
751 	bus_dmamap_t map;
752 
753 	DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
754 
755 	if (xsc->sc_tx_loaded) {
756 		map = NULL;
757 	} else {
758 		map = xsc->sc_tx_dmamap;
759 		xsc->sc_tx_loaded++;
760 	}
761 
762 #ifdef DIAGNOSTIC
763 	if (xsc->sc_tx_loaded != 1) {
764 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
765 				xsc->sc_tx_loaded);
766 	}
767 #endif
768 
769 	return(map);
770 }
771