xref: /netbsd-src/sys/arch/next68k/dev/if_xe.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: if_xe.c,v 1.25 2019/04/25 08:31:33 msaitoh Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.25 2019/04/25 08:31:33 msaitoh Exp $");
29 
30 #include "opt_inet.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 
39 #include <net/if.h>
40 #include <net/if_ether.h>
41 #include <net/if_media.h>
42 
43 #ifdef INET
44 #include <netinet/in.h>
45 #include <netinet/if_inarp.h>
46 #endif
47 
48 #include <machine/autoconf.h>
49 #include <machine/cpu.h>
50 #include <machine/intr.h>
51 #include <machine/bus.h>
52 
53 #include <next68k/next68k/isr.h>
54 
55 #include <next68k/dev/mb8795reg.h>
56 #include <next68k/dev/mb8795var.h>
57 
58 #include <next68k/dev/bmapreg.h>
59 #include <next68k/dev/intiovar.h>
60 #include <next68k/dev/nextdmareg.h>
61 #include <next68k/dev/nextdmavar.h>
62 
63 #include <next68k/dev/if_xevar.h>
64 #include <next68k/dev/if_xereg.h>
65 
66 #ifdef DEBUG
67 #define XE_DEBUG
68 #endif
69 
70 #ifdef XE_DEBUG
71 int xe_debug = 0;
72 #define DPRINTF(x) if (xe_debug) printf x;
73 #else
74 #define DPRINTF(x)
75 #endif
76 #define PRINTF(x) printf x;
77 
78 extern int turbo;
79 
80 int	xe_match(device_t, cfdata_t, void *);
81 void	xe_attach(device_t, device_t, void *);
82 int	xe_tint(void *);
83 int	xe_rint(void *);
84 
85 struct mbuf *xe_dma_rxmap_load(struct mb8795_softc *, bus_dmamap_t);
86 
87 bus_dmamap_t xe_dma_rx_continue(void *);
88 void	xe_dma_rx_completed(bus_dmamap_t, void *);
89 bus_dmamap_t xe_dma_tx_continue(void *);
90 void	xe_dma_tx_completed(bus_dmamap_t, void *);
91 void	xe_dma_rx_shutdown(void *);
92 void	xe_dma_tx_shutdown(void *);
93 
94 static void findchannel_defer(device_t);
95 
96 CFATTACH_DECL_NEW(xe, sizeof(struct xe_softc),
97     xe_match, xe_attach, NULL, NULL);
98 
99 static int xe_dma_medias[] = {
100 	IFM_ETHER | IFM_AUTO,
101 	IFM_ETHER | IFM_10_T,
102 	IFM_ETHER | IFM_10_2,
103 };
104 static int nxe_dma_medias = __arraycount(xe_dma_medias);
105 
106 static int attached = 0;
107 
108 /*
109  * Functions and the switch for the MI code.
110  */
111 u_char		xe_read_reg(struct mb8795_softc *, int);
112 void		xe_write_reg(struct mb8795_softc *, int, u_char);
113 void		xe_dma_reset(struct mb8795_softc *);
114 void		xe_dma_rx_setup(struct mb8795_softc *);
115 void		xe_dma_rx_go(struct mb8795_softc *);
116 struct mbuf *	xe_dma_rx_mbuf(struct mb8795_softc *);
117 void		xe_dma_tx_setup(struct mb8795_softc *);
118 void		xe_dma_tx_go(struct mb8795_softc *);
119 int		xe_dma_tx_mbuf(struct mb8795_softc *, struct mbuf *);
120 int		xe_dma_tx_isactive(struct mb8795_softc *);
121 
122 struct mb8795_glue xe_glue = {
123 	xe_read_reg,
124 	xe_write_reg,
125 	xe_dma_reset,
126 	xe_dma_rx_setup,
127 	xe_dma_rx_go,
128 	xe_dma_rx_mbuf,
129 	xe_dma_tx_setup,
130 	xe_dma_tx_go,
131 	xe_dma_tx_mbuf,
132 	xe_dma_tx_isactive,
133 };
134 
135 int
136 xe_match(device_t parent, cfdata_t match, void *aux)
137 {
138 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
139 
140 	if (attached)
141 		return 0;
142 
143 	ia->ia_addr = (void *)NEXT_P_ENET;
144 
145 	return 1;
146 }
147 
148 static void
149 findchannel_defer(device_t self)
150 {
151 	struct xe_softc *xsc = device_private(self);
152 	struct mb8795_softc *sc = &xsc->sc_mb8795;
153 	int i, error;
154 
155 	if (!xsc->sc_txdma) {
156 		xsc->sc_txdma = nextdma_findchannel ("enetx");
157 		if (xsc->sc_txdma == NULL)
158 			panic("%s: can't find enetx DMA channel",
159 			       device_xname(sc->sc_dev));
160 	}
161 	if (!xsc->sc_rxdma) {
162 		xsc->sc_rxdma = nextdma_findchannel ("enetr");
163 		if (xsc->sc_rxdma == NULL)
164 			panic ("%s: can't find enetr DMA channel",
165 			       device_xname(sc->sc_dev));
166 	}
167 	aprint_normal_dev(sc->sc_dev, "using DMA channels %s %s\n",
168 		device_xname(xsc->sc_txdma->sc_dev),
169 		device_xname(xsc->sc_rxdma->sc_dev));
170 
171 	nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
172 	nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
173 	nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
174 	nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
175 
176 	nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
177 	nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
178 	nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
179 	nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
180 
181 	/* Initialize the DMA maps */
182 	error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
183 	    (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
184 	    &xsc->sc_tx_dmamap);
185 	if (error) {
186 		aprint_error_dev(sc->sc_dev,
187 		    "can't create tx DMA map, error = %d", error);
188 	}
189 
190 	for(i = 0; i < MB8795_NRXBUFS; i++) {
191 		error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
192 		    (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
193 		    &xsc->sc_rx_dmamap[i]);
194 		if (error) {
195 			panic("%s: can't create rx DMA map, error = %d",
196 			      device_xname(sc->sc_dev), error);
197 		}
198 		xsc->sc_rx_mb_head[i] = NULL;
199 	}
200 	xsc->sc_rx_loaded_idx = 0;
201 	xsc->sc_rx_completed_idx = 0;
202 	xsc->sc_rx_handled_idx = 0;
203 
204 	/* @@@ more next hacks
205 	 * the  2000 covers at least a 1500 mtu + headers
206 	 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
207 	 */
208 	xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
209 	if (!xsc->sc_txbuf)
210 		panic("%s: can't malloc tx DMA buffer",
211 		    device_xname(sc->sc_dev));
212 
213 	xsc->sc_tx_mb_head = NULL;
214 	xsc->sc_tx_loaded = 0;
215 
216 	mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
217 
218 	isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
219 	INTR_ENABLE(NEXT_I_ENETX);
220 	isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
221 	INTR_ENABLE(NEXT_I_ENETR);
222 }
223 
224 void
225 xe_attach(device_t parent, device_t self, void *aux)
226 {
227 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
228 	struct xe_softc *xsc = device_private(self);
229 	struct mb8795_softc *sc = &xsc->sc_mb8795;
230 
231 	sc->sc_dev = self;
232 	DPRINTF(("%s: xe_attach()\n", device_xname(self)));
233 
234 	{
235 		/* kludge from machdep.c:next68k_bootargs() */
236 		extern u_char rom_enetaddr[6];
237 		int i;
238 
239 		for (i = 0; i < 6; i++)
240 			sc->sc_enaddr[i] = rom_enetaddr[i];
241 	}
242 
243 	printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
244 	       device_xname(self),
245 	       sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
246 	       sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
247 
248 	xsc->sc_bst = ia->ia_bst;
249 	if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
250 			  XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
251 		panic("\n%s: can't map mb8795 registers",
252 		      device_xname(self));
253 	}
254 
255 	sc->sc_bmap_bst = ia->ia_bst;
256 	if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
257 	    BMAP_SIZE, 0, &sc->sc_bmap_bsh))
258 		panic("\n%s: can't map bmap registers", device_xname(self));
259 
260 	/* Set up glue for MI code. */
261 	sc->sc_glue = &xe_glue;
262 
263 	xsc->sc_txdma = nextdma_findchannel("enetx");
264 	xsc->sc_rxdma = nextdma_findchannel("enetr");
265 	if (xsc->sc_rxdma && xsc->sc_txdma)
266 		findchannel_defer(self);
267 	else
268 		config_defer(self, findchannel_defer);
269 
270 	attached = 1;
271 }
272 
273 int
274 xe_tint(void *arg)
275 {
276 	if (!INTR_OCCURRED(NEXT_I_ENETX))
277 		return 0;
278 	mb8795_tint((struct mb8795_softc *)arg);
279 	return 1;
280 }
281 
282 int
283 xe_rint(void *arg)
284 {
285 	if (!INTR_OCCURRED(NEXT_I_ENETR))
286 		return 0;
287 	mb8795_rint((struct mb8795_softc *)arg);
288 	return 1;
289 }
290 
291 /*
292  * Glue functions.
293  */
294 
295 u_char
296 xe_read_reg(struct mb8795_softc *sc, int reg)
297 {
298 	struct xe_softc *xsc = (struct xe_softc *)sc;
299 
300 	return bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg);
301 }
302 
303 void
304 xe_write_reg(struct mb8795_softc *sc, int reg, u_char val)
305 {
306 	struct xe_softc *xsc = (struct xe_softc *)sc;
307 
308 	bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
309 }
310 
311 void
312 xe_dma_reset(struct mb8795_softc *sc)
313 {
314 	struct xe_softc *xsc = (struct xe_softc *)sc;
315 	int i;
316 
317 	DPRINTF(("xe DMA reset\n"));
318 
319 	nextdma_reset(xsc->sc_rxdma);
320 	nextdma_reset(xsc->sc_txdma);
321 
322 	if (xsc->sc_tx_loaded) {
323 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
324 		    0, xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
325 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
326 		xsc->sc_tx_loaded = 0;
327 	}
328 	if (xsc->sc_tx_mb_head) {
329 		m_freem(xsc->sc_tx_mb_head);
330 		xsc->sc_tx_mb_head = NULL;
331 	}
332 
333 	for(i = 0; i < MB8795_NRXBUFS; i++) {
334 		if (xsc->sc_rx_mb_head[i]) {
335 			bus_dmamap_unload(xsc->sc_rxdma->sc_dmat,
336 			    xsc->sc_rx_dmamap[i]);
337 			m_freem(xsc->sc_rx_mb_head[i]);
338 			xsc->sc_rx_mb_head[i] = NULL;
339 		}
340 	}
341 }
342 
343 void
344 xe_dma_rx_setup(struct mb8795_softc *sc)
345 {
346 	struct xe_softc *xsc = (struct xe_softc *)sc;
347 	int i;
348 
349 	DPRINTF(("xe DMA rx setup\n"));
350 
351 	for(i = 0; i < MB8795_NRXBUFS; i++)
352 		xsc->sc_rx_mb_head[i] =
353 			xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
354 
355 	xsc->sc_rx_loaded_idx = 0;
356 	xsc->sc_rx_completed_idx = 0;
357 	xsc->sc_rx_handled_idx = 0;
358 
359 	nextdma_init(xsc->sc_rxdma);
360 }
361 
362 void
363 xe_dma_rx_go(struct mb8795_softc *sc)
364 {
365 	struct xe_softc *xsc = (struct xe_softc *)sc;
366 
367 	DPRINTF(("xe DMA rx go\n"));
368 
369 	nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
370 }
371 
372 struct mbuf *
373 xe_dma_rx_mbuf(struct mb8795_softc *sc)
374 {
375 	struct xe_softc *xsc = (struct xe_softc *)sc;
376 	bus_dmamap_t map;
377 	struct mbuf *m;
378 
379 	m = NULL;
380 	if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
381 		xsc->sc_rx_handled_idx++;
382 		xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
383 
384 		map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
385 		m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
386 
387 		m->m_len = map->dm_xfer_len;
388 
389 		bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
390 				0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
391 
392 		bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
393 
394 		/* Install a fresh mbuf for next packet */
395 
396 		xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
397 			xe_dma_rxmap_load(sc,map);
398 
399 		/* Punt runt packets
400 		 * DMA restarts create 0 length packets for example
401 		 */
402 		if (m->m_len < ETHER_MIN_LEN) {
403 			m_freem(m);
404 			m = NULL;
405 		}
406 	}
407 	return m;
408 }
409 
410 void
411 xe_dma_tx_setup(struct mb8795_softc *sc)
412 {
413 	struct xe_softc *xsc = (struct xe_softc *)sc;
414 
415 	DPRINTF(("xe DMA tx setup\n"));
416 
417 	nextdma_init(xsc->sc_txdma);
418 }
419 
420 void
421 xe_dma_tx_go(struct mb8795_softc *sc)
422 {
423 	struct xe_softc *xsc = (struct xe_softc *)sc;
424 
425 	DPRINTF(("xe DMA tx go\n"));
426 
427 	nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
428 }
429 
430 int
431 xe_dma_tx_mbuf(struct mb8795_softc *sc, struct mbuf *m)
432 {
433 	struct xe_softc *xsc = (struct xe_softc *)sc;
434 	int error;
435 
436 	xsc->sc_tx_mb_head = m;
437 
438 /* The following is a next specific hack that should
439  * probably be moved out of MI code.
440  * This macro assumes it can move forward as needed
441  * in the buffer.  Perhaps it should zero the extra buffer.
442  */
443 #define REALIGN_DMABUF(s,l) \
444 	{ (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
445 			&~(DMA_BEGINALIGNMENT-1))); \
446     (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
447 				&~(DMA_ENDALIGNMENT-1)))-(s);}
448 
449 #if 0
450 	error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
451 	    xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
452 #else
453 	{
454 		u_char *buf = xsc->sc_txbuf;
455 		int buflen = 0;
456 
457 		buflen = m->m_pkthdr.len;
458 
459 		{
460 			u_char *p = buf;
461 			for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
462 				if (m->m_len == 0) continue;
463 				memcpy(p, mtod(m, u_char *), m->m_len);
464 				p += m->m_len;
465 			}
466 			/* Fix runt packets */
467 			if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
468 				memset(p, 0,
469 				    ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
470 				buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
471 			}
472 		}
473 
474 		error = bus_dmamap_load(xsc->sc_txdma->sc_dmat,
475 		    xsc->sc_tx_dmamap, buf, buflen, NULL, BUS_DMA_NOWAIT);
476 	}
477 #endif
478 	if (error) {
479 		aprint_error_dev(sc->sc_dev,
480 		    "can't load mbuf chain, error = %d\n", error);
481 		m_freem(xsc->sc_tx_mb_head);
482 		xsc->sc_tx_mb_head = NULL;
483 		return error;
484 	}
485 
486 #ifdef DIAGNOSTIC
487 	if (xsc->sc_tx_loaded != 0) {
488 		panic("%s: xsc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
489 		      xsc->sc_tx_loaded);
490 	}
491 #endif
492 
493 	bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
494 			xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
495 
496 	return 0;
497 }
498 
499 int
500 xe_dma_tx_isactive(struct mb8795_softc *sc)
501 {
502 	struct xe_softc *xsc = (struct xe_softc *)sc;
503 
504 	return (xsc->sc_tx_loaded != 0);
505 }
506 
507 /****************************************************************/
508 
509 void
510 xe_dma_tx_completed(bus_dmamap_t map, void *arg)
511 {
512 #if defined (XE_DEBUG) || defined (DIAGNOSTIC)
513 	struct mb8795_softc *sc = arg;
514 #endif
515 #ifdef DIAGNOSTIC
516 	struct xe_softc *xsc = (struct xe_softc *)sc;
517 #endif
518 
519 	DPRINTF(("%s: xe_dma_tx_completed()\n", device_xname(sc->sc_dev)));
520 
521 #ifdef DIAGNOSTIC
522 	if (!xsc->sc_tx_loaded)
523 		panic("%s: tx completed never loaded",
524 		    device_xname(sc->sc_dev));
525 
526 	if (map != xsc->sc_tx_dmamap)
527 		panic("%s: unexpected tx completed map",
528 		    device_xname(sc->sc_dev));
529 
530 #endif
531 }
532 
533 void
534 xe_dma_tx_shutdown(void *arg)
535 {
536 	struct mb8795_softc *sc = arg;
537 	struct xe_softc *xsc = (struct xe_softc *)sc;
538 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
539 
540 	DPRINTF(("%s: xe_dma_tx_shutdown()\n", device_xname(sc->sc_dev)));
541 
542 #ifdef DIAGNOSTIC
543 	if (!xsc->sc_tx_loaded)
544 		panic("%s: tx shutdown never loaded",
545 		    device_xname(sc->sc_dev));
546 #endif
547 
548 	if (turbo)
549 		MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
550 	if (xsc->sc_tx_loaded) {
551 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
552 		    0, xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
553 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
554 		m_freem(xsc->sc_tx_mb_head);
555 		xsc->sc_tx_mb_head = NULL;
556 
557 		xsc->sc_tx_loaded--;
558 	}
559 
560 #ifdef DIAGNOSTIC
561 	if (xsc->sc_tx_loaded != 0)
562 		panic("%s: sc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
563 		      xsc->sc_tx_loaded);
564 #endif
565 
566 	ifp->if_timer = 0;
567 
568 #if 1
569 	if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
570 		void mb8795_start_dma(struct mb8795_softc *); /* XXXX */
571 		mb8795_start_dma(sc);
572 	}
573 #endif
574 
575 #if 0
576 	/* Enable ready interrupt */
577 	MB_WRITE_REG(sc, MB8795_TXMASK,
578 		     MB_READ_REG(sc, MB8795_TXMASK)
579 		     | MB8795_TXMASK_TXRXIE/* READYIE */);
580 #endif
581 }
582 
583 
584 void
585 xe_dma_rx_completed(bus_dmamap_t map, void *arg)
586 {
587 	struct mb8795_softc *sc = arg;
588 	struct xe_softc *xsc = (struct xe_softc *)sc;
589 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
590 
591 	if (ifp->if_flags & IFF_RUNNING) {
592 		xsc->sc_rx_completed_idx++;
593 		xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
594 
595 		DPRINTF(("%s: xe_dma_rx_completed(), "
596 			"sc->sc_rx_completed_idx = %d\n",
597 			 device_xname(sc->sc_dev), xsc->sc_rx_completed_idx));
598 
599 #if (defined(DIAGNOSTIC))
600 		if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx])
601 			panic("%s: Unexpected rx dmamap completed",
602 			      device_xname(sc->sc_dev));
603 #endif
604 	}
605 #ifdef DIAGNOSTIC
606 	else
607 		DPRINTF(("%s: Unexpected rx dmamap completed while if not "
608 			"running\n", device_xname(sc->sc_dev)));
609 #endif
610 }
611 
612 void
613 xe_dma_rx_shutdown(void *arg)
614 {
615 	struct mb8795_softc *sc = arg;
616 	struct xe_softc *xsc = (struct xe_softc *)sc;
617 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
618 
619 	if (ifp->if_flags & IFF_RUNNING) {
620 		DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
621 			 device_xname(sc->sc_dev)));
622 
623 		nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
624 		if (turbo)
625 			MB_WRITE_REG(sc, MB8795_RXMODE,
626 			    MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
627 	}
628 #ifdef DIAGNOSTIC
629 	else
630 		DPRINTF(("%s: Unexpected rx DMA shutdown while if not "
631 			"running\n", device_xname(sc->sc_dev)));
632 #endif
633 }
634 
635 /*
636  * load a dmamap with a freshly allocated mbuf
637  */
638 struct mbuf *
639 xe_dma_rxmap_load(struct mb8795_softc *sc, bus_dmamap_t map)
640 {
641 	struct xe_softc *xsc = (struct xe_softc *)sc;
642 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
643 	struct mbuf *m;
644 	int error;
645 
646 	MGETHDR(m, M_DONTWAIT, MT_DATA);
647 	if (m) {
648 		MCLGET(m, M_DONTWAIT);
649 		if ((m->m_flags & M_EXT) == 0) {
650 			m_freem(m);
651 			m = NULL;
652 		} else
653 			m->m_len = MCLBYTES;
654 	}
655 	if (!m) {
656 		/*
657 		 * @@@ Handle this gracefully by reusing a scratch buffer
658 		 * or something.
659 		 */
660 		panic("Unable to get memory for incoming ethernet");
661 	}
662 
663 	/*
664 	 * Align buffer, @@@ next specific.
665 	 * perhaps should be using M_ALIGN here instead?
666 	 * First we give us a little room to align with.
667 	 */
668 	{
669 		u_char *buf = m->m_data;
670 		int buflen = m->m_len;
671 		buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
672 		REALIGN_DMABUF(buf, buflen);
673 		m->m_data = buf;
674 		m->m_len = buflen;
675 	}
676 
677 	m_set_rcvif(m, ifp);
678 	m->m_pkthdr.len = m->m_len;
679 
680 	error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
681 			map, m, BUS_DMA_NOWAIT);
682 
683 	bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
684 			map->dm_mapsize, BUS_DMASYNC_PREREAD);
685 
686 	if (error) {
687 		DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
688 				m->m_data, m->m_len));
689 		DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
690 				MCLBYTES, map->_dm_size));
691 
692 		panic("%s: can't load rx mbuf chain, error = %d",
693 				device_xname(sc->sc_dev), error);
694 		m_freem(m);
695 		m = NULL;
696 	}
697 
698 	return m;
699 }
700 
701 bus_dmamap_t
702 xe_dma_rx_continue(void *arg)
703 {
704 	struct mb8795_softc *sc = arg;
705 	struct xe_softc *xsc = (struct xe_softc *)sc;
706 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
707 	bus_dmamap_t map = NULL;
708 
709 	if (ifp->if_flags & IFF_RUNNING) {
710 		if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS)
711 		    == xsc->sc_rx_handled_idx) {
712 			/* Make space for one packet by dropping one */
713 			struct mbuf *m;
714 			m = xe_dma_rx_mbuf (sc);
715 			if (m)
716 				m_freem(m);
717 #if (defined(DIAGNOSTIC))
718 			DPRINTF(("%s: out of receive DMA buffers\n",
719 				device_xname(sc->sc_dev)));
720 #endif
721 		}
722 		xsc->sc_rx_loaded_idx++;
723 		xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
724 		map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
725 
726 		DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx "
727 			"= %d\n", device_xname(sc->sc_dev),
728 			xsc->sc_rx_loaded_idx));
729 	}
730 #ifdef DIAGNOSTIC
731 	else
732 		panic("%s: Unexpected rx DMA continue while if not running",
733 		      device_xname(sc->sc_dev));
734 #endif
735 
736 	return map;
737 }
738 
739 bus_dmamap_t
740 xe_dma_tx_continue(void *arg)
741 {
742 	struct mb8795_softc *sc = arg;
743 	struct xe_softc *xsc = (struct xe_softc *)sc;
744 	bus_dmamap_t map;
745 
746 	DPRINTF(("%s: xe_dma_tx_continue()\n", device_xname(sc->sc_dev)));
747 
748 	if (xsc->sc_tx_loaded)
749 		map = NULL;
750 	else {
751 		map = xsc->sc_tx_dmamap;
752 		xsc->sc_tx_loaded++;
753 	}
754 
755 #ifdef DIAGNOSTIC
756 	if (xsc->sc_tx_loaded != 1)
757 		panic("%s: sc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
758 				xsc->sc_tx_loaded);
759 #endif
760 
761 	return map;
762 }
763