xref: /netbsd-src/sys/arch/next68k/dev/if_xe.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: if_xe.c,v 1.23 2014/03/25 19:41:32 christos Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.23 2014/03/25 19:41:32 christos Exp $");
29 
30 #include "opt_inet.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 
39 #include <net/if.h>
40 #include <net/if_ether.h>
41 #include <net/if_media.h>
42 
43 #ifdef INET
44 #include <netinet/in.h>
45 #include <netinet/if_inarp.h>
46 #endif
47 
48 #include <machine/autoconf.h>
49 #include <machine/cpu.h>
50 #include <machine/intr.h>
51 #include <machine/bus.h>
52 
53 #include <next68k/next68k/isr.h>
54 
55 #include <next68k/dev/mb8795reg.h>
56 #include <next68k/dev/mb8795var.h>
57 
58 #include <next68k/dev/bmapreg.h>
59 #include <next68k/dev/intiovar.h>
60 #include <next68k/dev/nextdmareg.h>
61 #include <next68k/dev/nextdmavar.h>
62 
63 #include <next68k/dev/if_xevar.h>
64 #include <next68k/dev/if_xereg.h>
65 
66 #ifdef DEBUG
67 #define XE_DEBUG
68 #endif
69 
70 #ifdef XE_DEBUG
71 int xe_debug = 0;
72 #define DPRINTF(x) if (xe_debug) printf x;
73 #else
74 #define DPRINTF(x)
75 #endif
76 #define PRINTF(x) printf x;
77 
78 extern int turbo;
79 
80 int	xe_match(device_t, cfdata_t, void *);
81 void	xe_attach(device_t, device_t, void *);
82 int	xe_tint(void *);
83 int	xe_rint(void *);
84 
85 struct mbuf * xe_dma_rxmap_load(struct mb8795_softc *, bus_dmamap_t);
86 
87 bus_dmamap_t xe_dma_rx_continue(void *);
88 void xe_dma_rx_completed(bus_dmamap_t, void *);
89 bus_dmamap_t xe_dma_tx_continue(void *);
90 void xe_dma_tx_completed(bus_dmamap_t, void *);
91 void xe_dma_rx_shutdown(void *);
92 void xe_dma_tx_shutdown(void *);
93 
94 static void	findchannel_defer(device_t);
95 
96 CFATTACH_DECL_NEW(xe, sizeof(struct xe_softc),
97     xe_match, xe_attach, NULL, NULL);
98 
99 static int xe_dma_medias[] = {
100 	IFM_ETHER|IFM_AUTO,
101 	IFM_ETHER|IFM_10_T,
102 	IFM_ETHER|IFM_10_2,
103 };
104 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
105 
106 static int attached = 0;
107 
108 /*
109  * Functions and the switch for the MI code.
110  */
111 u_char		xe_read_reg(struct mb8795_softc *, int);
112 void		xe_write_reg(struct mb8795_softc *, int, u_char);
113 void		xe_dma_reset(struct mb8795_softc *);
114 void		xe_dma_rx_setup(struct mb8795_softc *);
115 void		xe_dma_rx_go(struct mb8795_softc *);
116 struct mbuf *	xe_dma_rx_mbuf(struct mb8795_softc *);
117 void		xe_dma_tx_setup(struct mb8795_softc *);
118 void		xe_dma_tx_go(struct mb8795_softc *);
119 int		xe_dma_tx_mbuf(struct mb8795_softc *, struct mbuf *);
120 int		xe_dma_tx_isactive(struct mb8795_softc *);
121 
122 struct mb8795_glue xe_glue = {
123 	xe_read_reg,
124 	xe_write_reg,
125 	xe_dma_reset,
126 	xe_dma_rx_setup,
127 	xe_dma_rx_go,
128 	xe_dma_rx_mbuf,
129 	xe_dma_tx_setup,
130 	xe_dma_tx_go,
131 	xe_dma_tx_mbuf,
132 	xe_dma_tx_isactive,
133 };
134 
135 int
136 xe_match(device_t parent, cfdata_t match, void *aux)
137 {
138 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
139 
140 	if (attached)
141 		return (0);
142 
143 	ia->ia_addr = (void *)NEXT_P_ENET;
144 
145 	return (1);
146 }
147 
148 static void
149 findchannel_defer(device_t self)
150 {
151 	struct xe_softc *xsc = device_private(self);
152 	struct mb8795_softc *sc = &xsc->sc_mb8795;
153 	int i, error;
154 
155 	if (!xsc->sc_txdma) {
156 		xsc->sc_txdma = nextdma_findchannel ("enetx");
157 		if (xsc->sc_txdma == NULL)
158 			panic("%s: can't find enetx DMA channel",
159 			       device_xname(sc->sc_dev));
160 	}
161 	if (!xsc->sc_rxdma) {
162 		xsc->sc_rxdma = nextdma_findchannel ("enetr");
163 		if (xsc->sc_rxdma == NULL)
164 			panic ("%s: can't find enetr DMA channel",
165 			       device_xname(sc->sc_dev));
166 	}
167 	aprint_normal_dev(sc->sc_dev, "using DMA channels %s %s\n",
168 		device_xname(xsc->sc_txdma->sc_dev),
169 		device_xname(xsc->sc_rxdma->sc_dev));
170 
171 	nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
172 	nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
173 	nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
174 	nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
175 
176 	nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
177 	nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
178 	nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
179 	nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
180 
181 	/* Initialize the DMA maps */
182 	error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
183 				  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
184 				  &xsc->sc_tx_dmamap);
185 	if (error) {
186 		aprint_error_dev(sc->sc_dev, "can't create tx DMA map, error = %d",
187 		      error);
188 	}
189 
190 	for(i = 0; i < MB8795_NRXBUFS; i++) {
191 		error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
192 					  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
193 					  &xsc->sc_rx_dmamap[i]);
194 		if (error) {
195 			panic("%s: can't create rx DMA map, error = %d",
196 			      device_xname(sc->sc_dev), error);
197 		}
198 		xsc->sc_rx_mb_head[i] = NULL;
199 	}
200 	xsc->sc_rx_loaded_idx = 0;
201 	xsc->sc_rx_completed_idx = 0;
202 	xsc->sc_rx_handled_idx = 0;
203 
204 	/* @@@ more next hacks
205 	 * the  2000 covers at least a 1500 mtu + headers
206 	 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
207 	 */
208 	xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
209 	if (!xsc->sc_txbuf)
210 		panic("%s: can't malloc tx DMA buffer", device_xname(sc->sc_dev));
211 
212 	xsc->sc_tx_mb_head = NULL;
213 	xsc->sc_tx_loaded = 0;
214 
215 	mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
216 
217 	isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
218 	INTR_ENABLE(NEXT_I_ENETX);
219 	isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
220 	INTR_ENABLE(NEXT_I_ENETR);
221 }
222 
223 void
224 xe_attach(device_t parent, device_t self, void *aux)
225 {
226 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
227 	struct xe_softc *xsc = device_private(self);
228 	struct mb8795_softc *sc = &xsc->sc_mb8795;
229 
230 	sc->sc_dev = self;
231 	DPRINTF(("%s: xe_attach()\n", device_xname(self)));
232 
233 	{
234 		extern u_char rom_enetaddr[6];     /* kludge from machdep.c:next68k_bootargs() */
235 		int i;
236 		for(i=0;i<6;i++) {
237 			sc->sc_enaddr[i] = rom_enetaddr[i];
238 		}
239 	}
240 
241 	printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
242 	       device_xname(self),
243 	       sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
244 	       sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
245 
246 	xsc->sc_bst = ia->ia_bst;
247 	if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
248 			  XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
249 		panic("\n%s: can't map mb8795 registers",
250 		      device_xname(self));
251 	}
252 
253 	sc->sc_bmap_bst = ia->ia_bst;
254 	if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
255 			  BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
256 		panic("\n%s: can't map bmap registers",
257 		      device_xname(self));
258 	}
259 
260 	/*
261 	 * Set up glue for MI code.
262 	 */
263 	sc->sc_glue = &xe_glue;
264 
265 	xsc->sc_txdma = nextdma_findchannel("enetx");
266 	xsc->sc_rxdma = nextdma_findchannel("enetr");
267 	if (xsc->sc_rxdma && xsc->sc_txdma) {
268 		findchannel_defer(self);
269 	} else {
270 		config_defer(self, findchannel_defer);
271 	}
272 
273 	attached = 1;
274 }
275 
276 int
277 xe_tint(void *arg)
278 {
279 	if (!INTR_OCCURRED(NEXT_I_ENETX))
280 		return 0;
281 	mb8795_tint((struct mb8795_softc *)arg);
282 	return(1);
283 }
284 
285 int
286 xe_rint(void *arg)
287 {
288 	if (!INTR_OCCURRED(NEXT_I_ENETR))
289 		return(0);
290 	mb8795_rint((struct mb8795_softc *)arg);
291 	return(1);
292 }
293 
294 /*
295  * Glue functions.
296  */
297 
298 u_char
299 xe_read_reg(struct mb8795_softc *sc, int reg)
300 {
301 	struct xe_softc *xsc = (struct xe_softc *)sc;
302 
303 	return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
304 }
305 
306 void
307 xe_write_reg(struct mb8795_softc *sc, int reg, u_char val)
308 {
309 	struct xe_softc *xsc = (struct xe_softc *)sc;
310 
311 	bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
312 }
313 
314 void
315 xe_dma_reset(struct mb8795_softc *sc)
316 {
317 	struct xe_softc *xsc = (struct xe_softc *)sc;
318 	int i;
319 
320 	DPRINTF(("xe DMA reset\n"));
321 
322 	nextdma_reset(xsc->sc_rxdma);
323 	nextdma_reset(xsc->sc_txdma);
324 
325 	if (xsc->sc_tx_loaded) {
326 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
327 				0, xsc->sc_tx_dmamap->dm_mapsize,
328 				BUS_DMASYNC_POSTWRITE);
329 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
330 		xsc->sc_tx_loaded = 0;
331 	}
332 	if (xsc->sc_tx_mb_head) {
333 		m_freem(xsc->sc_tx_mb_head);
334 		xsc->sc_tx_mb_head = NULL;
335 	}
336 
337 	for(i = 0; i < MB8795_NRXBUFS; i++) {
338 		if (xsc->sc_rx_mb_head[i]) {
339 			bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
340 			m_freem(xsc->sc_rx_mb_head[i]);
341 			xsc->sc_rx_mb_head[i] = NULL;
342 		}
343 	}
344 }
345 
346 void
347 xe_dma_rx_setup(struct mb8795_softc *sc)
348 {
349 	struct xe_softc *xsc = (struct xe_softc *)sc;
350 	int i;
351 
352 	DPRINTF(("xe DMA rx setup\n"));
353 
354 	for(i = 0; i < MB8795_NRXBUFS; i++) {
355 		xsc->sc_rx_mb_head[i] =
356 			xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
357 	}
358 	xsc->sc_rx_loaded_idx = 0;
359 	xsc->sc_rx_completed_idx = 0;
360 	xsc->sc_rx_handled_idx = 0;
361 
362 	nextdma_init(xsc->sc_rxdma);
363 }
364 
365 void
366 xe_dma_rx_go(struct mb8795_softc *sc)
367 {
368 	struct xe_softc *xsc = (struct xe_softc *)sc;
369 
370 	DPRINTF(("xe DMA rx go\n"));
371 
372 	nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
373 }
374 
375 struct mbuf *
376 xe_dma_rx_mbuf(struct mb8795_softc *sc)
377 {
378 	struct xe_softc *xsc = (struct xe_softc *)sc;
379 	bus_dmamap_t map;
380 	struct mbuf *m;
381 
382 	m = NULL;
383 	if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
384 		xsc->sc_rx_handled_idx++;
385 		xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
386 
387 		map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
388 		m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
389 
390 		m->m_len = map->dm_xfer_len;
391 
392 		bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
393 				0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
394 
395 		bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
396 
397 		/* Install a fresh mbuf for next packet */
398 
399 		xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
400 			xe_dma_rxmap_load(sc,map);
401 
402 		/* Punt runt packets
403 		 * DMA restarts create 0 length packets for example
404 		 */
405 		if (m->m_len < ETHER_MIN_LEN) {
406 			m_freem(m);
407 			m = NULL;
408 		}
409 	}
410 	return (m);
411 }
412 
413 void
414 xe_dma_tx_setup(struct mb8795_softc *sc)
415 {
416 	struct xe_softc *xsc = (struct xe_softc *)sc;
417 
418 	DPRINTF(("xe DMA tx setup\n"));
419 
420 	nextdma_init(xsc->sc_txdma);
421 }
422 
423 void
424 xe_dma_tx_go(struct mb8795_softc *sc)
425 {
426 	struct xe_softc *xsc = (struct xe_softc *)sc;
427 
428 	DPRINTF(("xe DMA tx go\n"));
429 
430 	nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
431 }
432 
433 int
434 xe_dma_tx_mbuf(struct mb8795_softc *sc, struct mbuf *m)
435 {
436 	struct xe_softc *xsc = (struct xe_softc *)sc;
437 	int error;
438 
439 	xsc->sc_tx_mb_head = m;
440 
441 /* The following is a next specific hack that should
442  * probably be moved out of MI code.
443  * This macro assumes it can move forward as needed
444  * in the buffer.  Perhaps it should zero the extra buffer.
445  */
446 #define REALIGN_DMABUF(s,l) \
447 	{ (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
448 			&~(DMA_BEGINALIGNMENT-1))); \
449     (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
450 				&~(DMA_ENDALIGNMENT-1)))-(s);}
451 
452 #if 0
453 	error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
454 				     xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
455 #else
456 	{
457 		u_char *buf = xsc->sc_txbuf;
458 		int buflen = 0;
459 
460 		buflen = m->m_pkthdr.len;
461 
462 		{
463 			u_char *p = buf;
464 			for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
465 				if (m->m_len == 0) continue;
466 				memcpy(p, mtod(m, u_char *), m->m_len);
467 				p += m->m_len;
468 			}
469 			/* Fix runt packets */
470 			if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
471 				memset(p, 0,
472 				    ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
473 				buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
474 			}
475 		}
476 
477 		error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
478 					buf,buflen,NULL,BUS_DMA_NOWAIT);
479 	}
480 #endif
481 	if (error) {
482 		aprint_error_dev(sc->sc_dev, "can't load mbuf chain, error = %d\n",
483 		    error);
484 		m_freem(xsc->sc_tx_mb_head);
485 		xsc->sc_tx_mb_head = NULL;
486 		return (error);
487 	}
488 
489 #ifdef DIAGNOSTIC
490 	if (xsc->sc_tx_loaded != 0) {
491 		panic("%s: xsc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
492 		      xsc->sc_tx_loaded);
493 	}
494 #endif
495 
496 	bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
497 			xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
498 
499 	return (0);
500 }
501 
502 int
503 xe_dma_tx_isactive(struct mb8795_softc *sc)
504 {
505 	struct xe_softc *xsc = (struct xe_softc *)sc;
506 
507 	return (xsc->sc_tx_loaded != 0);
508 }
509 
510 /****************************************************************/
511 
512 void
513 xe_dma_tx_completed(bus_dmamap_t map, void *arg)
514 {
515 #if defined (XE_DEBUG) || defined (DIAGNOSTIC)
516 	struct mb8795_softc *sc = arg;
517 #endif
518 #ifdef DIAGNOSTIC
519 	struct xe_softc *xsc = (struct xe_softc *)sc;
520 #endif
521 
522 	DPRINTF(("%s: xe_dma_tx_completed()\n", device_xname(sc->sc_dev)));
523 
524 #ifdef DIAGNOSTIC
525 	if (!xsc->sc_tx_loaded) {
526 		panic("%s: tx completed never loaded", device_xname(sc->sc_dev));
527 	}
528 	if (map != xsc->sc_tx_dmamap) {
529 		panic("%s: unexpected tx completed map", device_xname(sc->sc_dev));
530 	}
531 
532 #endif
533 }
534 
535 void
536 xe_dma_tx_shutdown(void *arg)
537 {
538 	struct mb8795_softc *sc = arg;
539 	struct xe_softc *xsc = (struct xe_softc *)sc;
540 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
541 
542 	DPRINTF(("%s: xe_dma_tx_shutdown()\n", device_xname(sc->sc_dev)));
543 
544 #ifdef DIAGNOSTIC
545 	if (!xsc->sc_tx_loaded) {
546 		panic("%s: tx shutdown never loaded", device_xname(sc->sc_dev));
547 	}
548 #endif
549 
550 	if (turbo)
551 		MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
552 	if (xsc->sc_tx_loaded) {
553 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
554 				0, xsc->sc_tx_dmamap->dm_mapsize,
555 				BUS_DMASYNC_POSTWRITE);
556 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
557 		m_freem(xsc->sc_tx_mb_head);
558 		xsc->sc_tx_mb_head = NULL;
559 
560 		xsc->sc_tx_loaded--;
561 	}
562 
563 #ifdef DIAGNOSTIC
564 	if (xsc->sc_tx_loaded != 0) {
565 		panic("%s: sc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
566 		      xsc->sc_tx_loaded);
567 	}
568 #endif
569 
570 	ifp->if_timer = 0;
571 
572 #if 1
573 	if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
574 		void mb8795_start_dma(struct mb8795_softc *); /* XXXX */
575 		mb8795_start_dma(sc);
576 	}
577 #endif
578 
579 #if 0
580 	/* Enable ready interrupt */
581 	MB_WRITE_REG(sc, MB8795_TXMASK,
582 		     MB_READ_REG(sc, MB8795_TXMASK)
583 		     | MB8795_TXMASK_TXRXIE/* READYIE */);
584 #endif
585 }
586 
587 
588 void
589 xe_dma_rx_completed(bus_dmamap_t map, void *arg)
590 {
591 	struct mb8795_softc *sc = arg;
592 	struct xe_softc *xsc = (struct xe_softc *)sc;
593 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
594 
595 	if (ifp->if_flags & IFF_RUNNING) {
596 		xsc->sc_rx_completed_idx++;
597 		xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
598 
599 		DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
600 			 device_xname(sc->sc_dev), xsc->sc_rx_completed_idx));
601 
602 #if (defined(DIAGNOSTIC))
603 		if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
604 			panic("%s: Unexpected rx dmamap completed",
605 			      device_xname(sc->sc_dev));
606 		}
607 #endif
608 	}
609 #ifdef DIAGNOSTIC
610 	else
611 		DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
612 			 device_xname(sc->sc_dev)));
613 #endif
614 }
615 
616 void
617 xe_dma_rx_shutdown(void *arg)
618 {
619 	struct mb8795_softc *sc = arg;
620 	struct xe_softc *xsc = (struct xe_softc *)sc;
621 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
622 
623 	if (ifp->if_flags & IFF_RUNNING) {
624 		DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
625 			 device_xname(sc->sc_dev)));
626 
627 		nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
628 		if (turbo)
629 			MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
630 	}
631 #ifdef DIAGNOSTIC
632 	else
633 		DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
634 			 device_xname(sc->sc_dev)));
635 #endif
636 }
637 
638 /*
639  * load a dmamap with a freshly allocated mbuf
640  */
641 struct mbuf *
642 xe_dma_rxmap_load(struct mb8795_softc *sc, bus_dmamap_t map)
643 {
644 	struct xe_softc *xsc = (struct xe_softc *)sc;
645 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
646 	struct mbuf *m;
647 	int error;
648 
649 	MGETHDR(m, M_DONTWAIT, MT_DATA);
650 	if (m) {
651 		MCLGET(m, M_DONTWAIT);
652 		if ((m->m_flags & M_EXT) == 0) {
653 			m_freem(m);
654 			m = NULL;
655 		} else {
656 			m->m_len = MCLBYTES;
657 		}
658 	}
659 	if (!m) {
660 		/* @@@ Handle this gracefully by reusing a scratch buffer
661 		 * or something.
662 		 */
663 		panic("Unable to get memory for incoming ethernet");
664 	}
665 
666 	/* Align buffer, @@@ next specific.
667 	 * perhaps should be using M_ALIGN here instead?
668 	 * First we give us a little room to align with.
669 	 */
670 	{
671 		u_char *buf = m->m_data;
672 		int buflen = m->m_len;
673 		buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
674 		REALIGN_DMABUF(buf, buflen);
675 		m->m_data = buf;
676 		m->m_len = buflen;
677 	}
678 
679 	m->m_pkthdr.rcvif = ifp;
680 	m->m_pkthdr.len = m->m_len;
681 
682 	error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
683 			map, m, BUS_DMA_NOWAIT);
684 
685 	bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
686 			map->dm_mapsize, BUS_DMASYNC_PREREAD);
687 
688 	if (error) {
689 		DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
690 				m->m_data, m->m_len));
691 		DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
692 				MCLBYTES, map->_dm_size));
693 
694 		panic("%s: can't load rx mbuf chain, error = %d",
695 				device_xname(sc->sc_dev), error);
696 		m_freem(m);
697 		m = NULL;
698 	}
699 
700 	return(m);
701 }
702 
703 bus_dmamap_t
704 xe_dma_rx_continue(void *arg)
705 {
706 	struct mb8795_softc *sc = arg;
707 	struct xe_softc *xsc = (struct xe_softc *)sc;
708 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
709 	bus_dmamap_t map = NULL;
710 
711 	if (ifp->if_flags & IFF_RUNNING) {
712 		if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
713 			/* make space for one packet by dropping one */
714 			struct mbuf *m;
715 			m = xe_dma_rx_mbuf (sc);
716 			if (m)
717 				m_freem(m);
718 #if (defined(DIAGNOSTIC))
719 			DPRINTF(("%s: out of receive DMA buffers\n", device_xname(sc->sc_dev)));
720 #endif
721 		}
722 		xsc->sc_rx_loaded_idx++;
723 		xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
724 		map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
725 
726 		DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
727 			 device_xname(sc->sc_dev), xsc->sc_rx_loaded_idx));
728 	}
729 #ifdef DIAGNOSTIC
730 	else
731 		panic("%s: Unexpected rx DMA continue while if not running",
732 		      device_xname(sc->sc_dev));
733 #endif
734 
735 	return(map);
736 }
737 
738 bus_dmamap_t
739 xe_dma_tx_continue(void *arg)
740 {
741 	struct mb8795_softc *sc = arg;
742 	struct xe_softc *xsc = (struct xe_softc *)sc;
743 	bus_dmamap_t map;
744 
745 	DPRINTF(("%s: xe_dma_tx_continue()\n", device_xname(sc->sc_dev)));
746 
747 	if (xsc->sc_tx_loaded) {
748 		map = NULL;
749 	} else {
750 		map = xsc->sc_tx_dmamap;
751 		xsc->sc_tx_loaded++;
752 	}
753 
754 #ifdef DIAGNOSTIC
755 	if (xsc->sc_tx_loaded != 1) {
756 		panic("%s: sc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
757 				xsc->sc_tx_loaded);
758 	}
759 #endif
760 
761 	return(map);
762 }
763