xref: /netbsd-src/sys/arch/next68k/dev/if_xe.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: if_xe.c,v 1.15 2004/08/11 01:23:47 perseant Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.15 2004/08/11 01:23:47 perseant Exp $");
34 
35 #include "opt_inet.h"
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/device.h>
44 
45 #include <net/if.h>
46 #include <net/if_ether.h>
47 #include <net/if_media.h>
48 
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/if_inarp.h>
52 #endif
53 
54 #include <machine/autoconf.h>
55 #include <machine/cpu.h>
56 #include <machine/intr.h>
57 #include <machine/bus.h>
58 
59 #include <next68k/next68k/isr.h>
60 
61 #include <next68k/dev/mb8795reg.h>
62 #include <next68k/dev/mb8795var.h>
63 
64 #include <next68k/dev/bmapreg.h>
65 #include <next68k/dev/intiovar.h>
66 #include <next68k/dev/nextdmareg.h>
67 #include <next68k/dev/nextdmavar.h>
68 
69 #include <next68k/dev/if_xevar.h>
70 #include <next68k/dev/if_xereg.h>
71 
72 #ifdef DEBUG
73 #define XE_DEBUG
74 #endif
75 
76 #ifdef XE_DEBUG
77 int xe_debug = 0;
78 #define DPRINTF(x) if (xe_debug) printf x;
79 extern char *ndtracep;
80 extern char ndtrace[];
81 extern int ndtraceshow;
82 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
83 #else
84 #define DPRINTF(x)
85 #define NDTRACEIF(x)
86 #endif
87 #define PRINTF(x) printf x;
88 
89 extern int turbo;
90 
91 int	xe_match __P((struct device *, struct cfdata *, void *));
92 void	xe_attach __P((struct device *, struct device *, void *));
93 int	xe_tint __P((void *));
94 int	xe_rint __P((void *));
95 
96 struct mbuf * xe_dma_rxmap_load __P((struct mb8795_softc *,
97 		bus_dmamap_t map));
98 
99 bus_dmamap_t xe_dma_rx_continue __P((void *));
100 void xe_dma_rx_completed __P((bus_dmamap_t,void *));
101 bus_dmamap_t xe_dma_tx_continue __P((void *));
102 void xe_dma_tx_completed __P((bus_dmamap_t,void *));
103 void xe_dma_rx_shutdown __P((void *));
104 void xe_dma_tx_shutdown __P((void *));
105 
106 static void	findchannel_defer __P((struct device *));
107 
108 CFATTACH_DECL(xe, sizeof(struct xe_softc),
109     xe_match, xe_attach, NULL, NULL);
110 
111 static int xe_dma_medias[] = {
112 	IFM_ETHER|IFM_AUTO,
113 	IFM_ETHER|IFM_10_T,
114 	IFM_ETHER|IFM_10_2,
115 };
116 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
117 
118 static int attached = 0;
119 
120 /*
121  * Functions and the switch for the MI code.
122  */
123 u_char		xe_read_reg __P((struct mb8795_softc *, int));
124 void		xe_write_reg __P((struct mb8795_softc *, int, u_char));
125 void		xe_dma_reset __P((struct mb8795_softc *));
126 void		xe_dma_rx_setup __P((struct mb8795_softc *));
127 void		xe_dma_rx_go __P((struct mb8795_softc *));
128 struct mbuf *	xe_dma_rx_mbuf __P((struct mb8795_softc *));
129 void		xe_dma_tx_setup __P((struct mb8795_softc *));
130 void		xe_dma_tx_go __P((struct mb8795_softc *));
131 int		xe_dma_tx_mbuf __P((struct mb8795_softc *, struct mbuf *));
132 int		xe_dma_tx_isactive __P((struct mb8795_softc *));
133 #if 0
134 int	xe_dma_setup __P((struct mb8795_softc *, caddr_t *,
135 	    size_t *, int, size_t *));
136 void	xe_dma_go __P((struct mb8795_softc *));
137 void	xe_dma_stop __P((struct mb8795_softc *));
138 int	xe_dma_isactive __P((struct mb8795_softc *));
139 #endif
140 
141 struct mb8795_glue xe_glue = {
142 	xe_read_reg,
143 	xe_write_reg,
144 	xe_dma_reset,
145 	xe_dma_rx_setup,
146 	xe_dma_rx_go,
147 	xe_dma_rx_mbuf,
148 	xe_dma_tx_setup,
149 	xe_dma_tx_go,
150 	xe_dma_tx_mbuf,
151 	xe_dma_tx_isactive,
152 #if 0
153 	xe_dma_setup,
154 	xe_dma_go,
155 	xe_dma_stop,
156 	xe_dma_isactive,
157 	NULL,			/* gl_clear_latched_intr */
158 #endif
159 };
160 
161 int
162 xe_match(parent, match, aux)
163 	struct device *parent;
164 	struct cfdata *match;
165 	void *aux;
166 {
167 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
168 
169 	if (attached)
170 		return (0);
171 
172 	ia->ia_addr = (void *)NEXT_P_ENET;
173 
174 	return (1);
175 }
176 
177 static void
178 findchannel_defer(self)
179 	struct device *self;
180 {
181 	struct xe_softc *xsc = (struct xe_softc *)self;
182 	struct mb8795_softc *sc = &xsc->sc_mb8795;
183 	int i, error;
184 
185 	if (!xsc->sc_txdma) {
186 		xsc->sc_txdma = nextdma_findchannel ("enetx");
187 		if (xsc->sc_txdma == NULL)
188 			panic ("%s: can't find enetx DMA channel",
189 			       sc->sc_dev.dv_xname);
190 	}
191 	if (!xsc->sc_rxdma) {
192 		xsc->sc_rxdma = nextdma_findchannel ("enetr");
193 		if (xsc->sc_rxdma == NULL)
194 			panic ("%s: can't find enetr DMA channel",
195 			       sc->sc_dev.dv_xname);
196 	}
197 	printf ("%s: using DMA channels %s %s\n", sc->sc_dev.dv_xname,
198 		xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
199 
200 	nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
201 	nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
202 	nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
203 	nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
204 
205 	nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
206 	nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
207 	nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
208 	nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
209 
210 	/* Initialize the DMA maps */
211 	error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
212 				  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
213 				  &xsc->sc_tx_dmamap);
214 	if (error) {
215 		panic("%s: can't create tx DMA map, error = %d",
216 		      sc->sc_dev.dv_xname, error);
217 	}
218 
219 	for(i = 0; i < MB8795_NRXBUFS; i++) {
220 		error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
221 					  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
222 					  &xsc->sc_rx_dmamap[i]);
223 		if (error) {
224 			panic("%s: can't create rx DMA map, error = %d",
225 			      sc->sc_dev.dv_xname, error);
226 		}
227 		xsc->sc_rx_mb_head[i] = NULL;
228 	}
229 	xsc->sc_rx_loaded_idx = 0;
230 	xsc->sc_rx_completed_idx = 0;
231 	xsc->sc_rx_handled_idx = 0;
232 
233 	/* @@@ more next hacks
234 	 * the  2000 covers at least a 1500 mtu + headers
235 	 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
236 	 */
237 	xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
238 	if (!xsc->sc_txbuf)
239 		panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
240 
241 	xsc->sc_tx_mb_head = NULL;
242 	xsc->sc_tx_loaded = 0;
243 
244 	mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
245 
246 	isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
247 	INTR_ENABLE(NEXT_I_ENETX);
248 	isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
249 	INTR_ENABLE(NEXT_I_ENETR);
250 }
251 
252 void
253 xe_attach(parent, self, aux)
254 	struct device *parent, *self;
255 	void *aux;
256 {
257 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
258 	struct xe_softc *xsc = (struct xe_softc *)self;
259 	struct mb8795_softc *sc = &xsc->sc_mb8795;
260 
261 	DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
262 
263 	{
264 		extern u_char rom_enetaddr[6];     /* kludge from machdep.c:next68k_bootargs() */
265 		int i;
266 		for(i=0;i<6;i++) {
267 			sc->sc_enaddr[i] = rom_enetaddr[i];
268 		}
269 	}
270 
271 	printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
272 	       sc->sc_dev.dv_xname,
273 	       sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
274 	       sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
275 
276 	xsc->sc_bst = ia->ia_bst;
277 	if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
278 			  XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
279 		panic("\n%s: can't map mb8795 registers",
280 		      sc->sc_dev.dv_xname);
281 	}
282 
283 	sc->sc_bmap_bst = ia->ia_bst;
284 	if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
285 			  BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
286 		panic("\n%s: can't map bmap registers",
287 		      sc->sc_dev.dv_xname);
288 	}
289 
290 	/*
291 	 * Set up glue for MI code.
292 	 */
293 	sc->sc_glue = &xe_glue;
294 
295 	xsc->sc_txdma = nextdma_findchannel ("enetx");
296 	xsc->sc_rxdma = nextdma_findchannel ("enetr");
297 	if (xsc->sc_rxdma && xsc->sc_txdma) {
298 		findchannel_defer (self);
299 	} else {
300 		config_defer (self, findchannel_defer);
301 	}
302 
303 	attached = 1;
304 }
305 
306 int
307 xe_tint(arg)
308 	void *arg;
309 {
310 	if (!INTR_OCCURRED(NEXT_I_ENETX))
311 		return 0;
312 	mb8795_tint((struct mb8795_softc *)arg);
313 	return(1);
314 }
315 
316 int
317 xe_rint(arg)
318 	void *arg;
319 {
320 	if (!INTR_OCCURRED(NEXT_I_ENETR))
321 		return(0);
322 	mb8795_rint((struct mb8795_softc *)arg);
323 	return(1);
324 }
325 
326 /*
327  * Glue functions.
328  */
329 
330 u_char
331 xe_read_reg(sc, reg)
332 	struct mb8795_softc *sc;
333 	int reg;
334 {
335 	struct xe_softc *xsc = (struct xe_softc *)sc;
336 
337 	return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
338 }
339 
340 void
341 xe_write_reg(sc, reg, val)
342 	struct mb8795_softc *sc;
343 	int reg;
344 	u_char val;
345 {
346 	struct xe_softc *xsc = (struct xe_softc *)sc;
347 
348 	bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
349 }
350 
351 void
352 xe_dma_reset(sc)
353 	struct mb8795_softc *sc;
354 {
355 	struct xe_softc *xsc = (struct xe_softc *)sc;
356 	int i;
357 
358 	DPRINTF(("xe DMA reset\n"));
359 
360 	nextdma_reset(xsc->sc_rxdma);
361 	nextdma_reset(xsc->sc_txdma);
362 
363 	if (xsc->sc_tx_loaded) {
364 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
365 				0, xsc->sc_tx_dmamap->dm_mapsize,
366 				BUS_DMASYNC_POSTWRITE);
367 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
368 		xsc->sc_tx_loaded = 0;
369 	}
370 	if (xsc->sc_tx_mb_head) {
371 		m_freem(xsc->sc_tx_mb_head);
372 		xsc->sc_tx_mb_head = NULL;
373 	}
374 
375 	for(i = 0; i < MB8795_NRXBUFS; i++) {
376 		if (xsc->sc_rx_mb_head[i]) {
377 			bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
378 			m_freem(xsc->sc_rx_mb_head[i]);
379 			xsc->sc_rx_mb_head[i] = NULL;
380 		}
381 	}
382 }
383 
384 void
385 xe_dma_rx_setup (sc)
386 	struct mb8795_softc *sc;
387 {
388 	struct xe_softc *xsc = (struct xe_softc *)sc;
389 	int i;
390 
391 	DPRINTF(("xe DMA rx setup\n"));
392 
393 	for(i = 0; i < MB8795_NRXBUFS; i++) {
394 		xsc->sc_rx_mb_head[i] =
395 			xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
396 	}
397 	xsc->sc_rx_loaded_idx = 0;
398 	xsc->sc_rx_completed_idx = 0;
399 	xsc->sc_rx_handled_idx = 0;
400 
401 	nextdma_init(xsc->sc_rxdma);
402 }
403 
404 void
405 xe_dma_rx_go (sc)
406 	struct mb8795_softc *sc;
407 {
408 	struct xe_softc *xsc = (struct xe_softc *)sc;
409 
410 	DPRINTF(("xe DMA rx go\n"));
411 
412 	nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
413 }
414 
415 struct mbuf *
416 xe_dma_rx_mbuf (sc)
417 	struct mb8795_softc *sc;
418 {
419 	struct xe_softc *xsc = (struct xe_softc *)sc;
420 	bus_dmamap_t map;
421 	struct mbuf *m;
422 
423 	m = NULL;
424 	if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
425 		xsc->sc_rx_handled_idx++;
426 		xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
427 
428 		map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
429 		m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
430 
431 		m->m_len = map->dm_xfer_len;
432 
433 		bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
434 				0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
435 
436 		bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
437 
438 		/* Install a fresh mbuf for next packet */
439 
440 		xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
441 			xe_dma_rxmap_load(sc,map);
442 
443 		/* Punt runt packets
444 		 * DMA restarts create 0 length packets for example
445 		 */
446 		if (m->m_len < ETHER_MIN_LEN) {
447 			m_freem(m);
448 			m = NULL;
449 		}
450 	}
451 	return (m);
452 }
453 
454 void
455 xe_dma_tx_setup (sc)
456 	struct mb8795_softc *sc;
457 {
458 	struct xe_softc *xsc = (struct xe_softc *)sc;
459 
460 	DPRINTF(("xe DMA tx setup\n"));
461 
462 	nextdma_init(xsc->sc_txdma);
463 }
464 
465 void
466 xe_dma_tx_go (sc)
467 	struct mb8795_softc *sc;
468 {
469 	struct xe_softc *xsc = (struct xe_softc *)sc;
470 
471 	DPRINTF(("xe DMA tx go\n"));
472 
473 	nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
474 }
475 
476 int
477 xe_dma_tx_mbuf (sc, m)
478 	struct mb8795_softc *sc;
479 	struct mbuf *m;
480 {
481 	struct xe_softc *xsc = (struct xe_softc *)sc;
482 	int error;
483 
484 	xsc->sc_tx_mb_head = m;
485 
486 /* The following is a next specific hack that should
487  * probably be moved out of MI code.
488  * This macro assumes it can move forward as needed
489  * in the buffer.  Perhaps it should zero the extra buffer.
490  */
491 #define REALIGN_DMABUF(s,l) \
492 	{ (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
493 			&~(DMA_BEGINALIGNMENT-1))); \
494     (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
495 				&~(DMA_ENDALIGNMENT-1)))-(s);}
496 
497 #if 0
498 	error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
499 				     xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
500 #else
501 	{
502 		u_char *buf = xsc->sc_txbuf;
503 		int buflen = 0;
504 
505 		buflen = m->m_pkthdr.len;
506 
507 		{
508 			u_char *p = buf;
509 			for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
510 				if (m->m_len == 0) continue;
511 				bcopy(mtod(m, u_char *), p, m->m_len);
512 				p += m->m_len;
513 			}
514 			/* Fix runt packets */
515 			if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
516 				memset(p, 0,
517 				    ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
518 				buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
519 			}
520 		}
521 
522 		error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
523 					buf,buflen,NULL,BUS_DMA_NOWAIT);
524 	}
525 #endif
526 	if (error) {
527 		printf("%s: can't load mbuf chain, error = %d\n",
528 		       sc->sc_dev.dv_xname, error);
529 		m_freem(xsc->sc_tx_mb_head);
530 		xsc->sc_tx_mb_head = NULL;
531 		return (error);
532 	}
533 
534 #ifdef DIAGNOSTIC
535 	if (xsc->sc_tx_loaded != 0) {
536 		panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
537 		      xsc->sc_tx_loaded);
538 	}
539 #endif
540 
541 	bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
542 			xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
543 
544 	return (0);
545 }
546 
547 int
548 xe_dma_tx_isactive (sc)
549 	struct mb8795_softc *sc;
550 {
551 	struct xe_softc *xsc = (struct xe_softc *)sc;
552 
553 	return (xsc->sc_tx_loaded != 0);
554 }
555 
556 /****************************************************************/
557 
558 void
559 xe_dma_tx_completed(map, arg)
560 	bus_dmamap_t map;
561 	void *arg;
562 {
563 #if defined (XE_DEBUG) || defined (DIAGNOSTIC)
564 	struct mb8795_softc *sc = arg;
565 #endif
566 #ifdef DIAGNOSTIC
567 	struct xe_softc *xsc = (struct xe_softc *)sc;
568 #endif
569 
570 	DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
571 
572 #ifdef DIAGNOSTIC
573 	if (!xsc->sc_tx_loaded) {
574 		panic("%s: tx completed never loaded",sc->sc_dev.dv_xname);
575 	}
576 	if (map != xsc->sc_tx_dmamap) {
577 		panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
578 	}
579 
580 #endif
581 }
582 
583 void
584 xe_dma_tx_shutdown(arg)
585 	void *arg;
586 {
587 	struct mb8795_softc *sc = arg;
588 	struct xe_softc *xsc = (struct xe_softc *)sc;
589 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
590 
591 	DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
592 
593 #ifdef DIAGNOSTIC
594 	if (!xsc->sc_tx_loaded) {
595 		panic("%s: tx shutdown never loaded",sc->sc_dev.dv_xname);
596 	}
597 #endif
598 
599 	if (turbo)
600 		MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
601 	if (xsc->sc_tx_loaded) {
602 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
603 				0, xsc->sc_tx_dmamap->dm_mapsize,
604 				BUS_DMASYNC_POSTWRITE);
605 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
606 		m_freem(xsc->sc_tx_mb_head);
607 		xsc->sc_tx_mb_head = NULL;
608 
609 		xsc->sc_tx_loaded--;
610 	}
611 
612 #ifdef DIAGNOSTIC
613 	if (xsc->sc_tx_loaded != 0) {
614 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
615 		      xsc->sc_tx_loaded);
616 	}
617 #endif
618 
619 	ifp->if_timer = 0;
620 
621 #if 1
622 	if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
623 		void mb8795_start_dma __P((struct mb8795_softc *)); /* XXXX */
624 		mb8795_start_dma(sc);
625 	}
626 #endif
627 
628 #if 0
629 	/* Enable ready interrupt */
630 	MB_WRITE_REG(sc, MB8795_TXMASK,
631 		     MB_READ_REG(sc, MB8795_TXMASK)
632 		     | MB8795_TXMASK_TXRXIE/* READYIE */);
633 #endif
634 }
635 
636 
637 void
638 xe_dma_rx_completed(map, arg)
639 	bus_dmamap_t map;
640 	void *arg;
641 {
642 	struct mb8795_softc *sc = arg;
643 	struct xe_softc *xsc = (struct xe_softc *)sc;
644 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
645 
646 	if (ifp->if_flags & IFF_RUNNING) {
647 		xsc->sc_rx_completed_idx++;
648 		xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
649 
650 		DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
651 			 sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
652 
653 #if (defined(DIAGNOSTIC))
654 		if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
655 			panic("%s: Unexpected rx dmamap completed",
656 			      sc->sc_dev.dv_xname);
657 		}
658 #endif
659 	}
660 #ifdef DIAGNOSTIC
661 	else
662 		DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
663 			 sc->sc_dev.dv_xname));
664 #endif
665 }
666 
667 void
668 xe_dma_rx_shutdown(arg)
669 	void *arg;
670 {
671 	struct mb8795_softc *sc = arg;
672 	struct xe_softc *xsc = (struct xe_softc *)sc;
673 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
674 
675 	if (ifp->if_flags & IFF_RUNNING) {
676 		DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
677 			 sc->sc_dev.dv_xname));
678 
679 		nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
680 		if (turbo)
681 			MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
682 	}
683 #ifdef DIAGNOSTIC
684 	else
685 		DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
686 			 sc->sc_dev.dv_xname));
687 #endif
688 }
689 
690 /*
691  * load a dmamap with a freshly allocated mbuf
692  */
693 struct mbuf *
694 xe_dma_rxmap_load(sc,map)
695 	struct mb8795_softc *sc;
696 	bus_dmamap_t map;
697 {
698 	struct xe_softc *xsc = (struct xe_softc *)sc;
699 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
700 	struct mbuf *m;
701 	int error;
702 
703 	MGETHDR(m, M_DONTWAIT, MT_DATA);
704 	if (m) {
705 		MCLGET(m, M_DONTWAIT);
706 		if ((m->m_flags & M_EXT) == 0) {
707 			m_freem(m);
708 			m = NULL;
709 		} else {
710 			m->m_len = MCLBYTES;
711 		}
712 	}
713 	if (!m) {
714 		/* @@@ Handle this gracefully by reusing a scratch buffer
715 		 * or something.
716 		 */
717 		panic("Unable to get memory for incoming ethernet");
718 	}
719 
720 	/* Align buffer, @@@ next specific.
721 	 * perhaps should be using M_ALIGN here instead?
722 	 * First we give us a little room to align with.
723 	 */
724 	{
725 		u_char *buf = m->m_data;
726 		int buflen = m->m_len;
727 		buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
728 		REALIGN_DMABUF(buf, buflen);
729 		m->m_data = buf;
730 		m->m_len = buflen;
731 	}
732 
733 	m->m_pkthdr.rcvif = ifp;
734 	m->m_pkthdr.len = m->m_len;
735 
736 	error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
737 			map, m, BUS_DMA_NOWAIT);
738 
739 	bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
740 			map->dm_mapsize, BUS_DMASYNC_PREREAD);
741 
742 	if (error) {
743 		DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
744 				m->m_data, m->m_len));
745 		DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
746 				MCLBYTES, map->_dm_size));
747 
748 		panic("%s: can't load rx mbuf chain, error = %d",
749 				sc->sc_dev.dv_xname, error);
750 		m_freem(m);
751 		m = NULL;
752 	}
753 
754 	return(m);
755 }
756 
757 bus_dmamap_t
758 xe_dma_rx_continue(arg)
759 	void *arg;
760 {
761 	struct mb8795_softc *sc = arg;
762 	struct xe_softc *xsc = (struct xe_softc *)sc;
763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
764 	bus_dmamap_t map = NULL;
765 
766 	if (ifp->if_flags & IFF_RUNNING) {
767 		if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
768 			/* make space for one packet by dropping one */
769 			struct mbuf *m;
770 			m = xe_dma_rx_mbuf (sc);
771 			if (m)
772 				m_freem(m);
773 #if (defined(DIAGNOSTIC))
774 			DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
775 #endif
776 		}
777 		xsc->sc_rx_loaded_idx++;
778 		xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
779 		map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
780 
781 		DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
782 			 sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
783 	}
784 #ifdef DIAGNOSTIC
785 	else
786 		panic("%s: Unexpected rx DMA continue while if not running",
787 		      sc->sc_dev.dv_xname);
788 #endif
789 
790 	return(map);
791 }
792 
793 bus_dmamap_t
794 xe_dma_tx_continue(arg)
795 	void *arg;
796 {
797 	struct mb8795_softc *sc = arg;
798 	struct xe_softc *xsc = (struct xe_softc *)sc;
799 	bus_dmamap_t map;
800 
801 	DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
802 
803 	if (xsc->sc_tx_loaded) {
804 		map = NULL;
805 	} else {
806 		map = xsc->sc_tx_dmamap;
807 		xsc->sc_tx_loaded++;
808 	}
809 
810 #ifdef DIAGNOSTIC
811 	if (xsc->sc_tx_loaded != 1) {
812 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
813 				xsc->sc_tx_loaded);
814 	}
815 #endif
816 
817 	return(map);
818 }
819