xref: /netbsd-src/sys/dev/qbus/if_qe.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /*      $NetBSD: if_qe.c,v 1.42 2000/06/05 00:09:18 matt Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed at Ludd, University of
16  *      Lule}, Sweden and its contributors.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for DEQNA/DELQA ethernet cards.
34  * Things that is still to do:
35  *	Have a timeout check for hang transmit logic.
36  *	Handle ubaresets. Does not work at all right now.
37  *	Fix ALLMULTI reception. But someone must tell me how...
38  *	Collect statistics.
39  */
40 
41 #include "opt_inet.h"
42 #include "bpfilter.h"
43 
44 #include <sys/param.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/device.h>
48 #include <sys/systm.h>
49 #include <sys/sockio.h>
50 
51 #include <net/if.h>
52 #include <net/if_ether.h>
53 #include <net/if_dl.h>
54 
55 #include <netinet/in.h>
56 #include <netinet/if_inarp.h>
57 
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #include <net/bpfdesc.h>
61 #endif
62 
63 #include <machine/bus.h>
64 
65 #include <dev/qbus/ubavar.h>
66 #include <dev/qbus/if_qereg.h>
67 
68 #include "ioconf.h"
69 
70 #define RXDESCS	30	/* # of receive descriptors */
71 #define TXDESCS	60	/* # transmit descs */
72 
73 /*
74  * Structure containing the elements that must be in DMA-safe memory.
75  */
76 struct qe_cdata {
77 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
78 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
79 	u_int8_t	qc_setup[128];		/* Setup packet layout */
80 };
81 
82 struct	qe_softc {
83 	struct device	sc_dev;		/* Configuration common part	*/
84 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
85 	struct ethercom sc_ec;		/* Ethernet common part		*/
86 #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
87 	bus_space_tag_t sc_iot;
88 	bus_addr_t	sc_ioh;
89 	bus_dma_tag_t	sc_dmat;
90 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
91 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
92 	bus_dmamap_t	sc_cmap;	/* Map for control structures	*/
93 	struct mbuf*	sc_txmbuf[TXDESCS];
94 	struct mbuf*	sc_rxmbuf[RXDESCS];
95 	bus_dmamap_t	sc_xmtmap[TXDESCS];
96 	bus_dmamap_t	sc_rcvmap[RXDESCS];
97 	int		sc_intvec;	/* Interrupt vector		*/
98 	int		sc_nexttx;
99 	int		sc_inq;
100 	int		sc_lastack;
101 	int		sc_nextrx;
102 	int		sc_setup;	/* Setup packet in queue	*/
103 };
104 
105 static	int	qematch __P((struct device *, struct cfdata *, void *));
106 static	void	qeattach __P((struct device *, struct device *, void *));
107 static	void	qeinit __P((struct qe_softc *));
108 static	void	qestart __P((struct ifnet *));
109 static	void	qeintr __P((void *));
110 static	int	qeioctl __P((struct ifnet *, u_long, caddr_t));
111 static	int	qe_add_rxbuf __P((struct qe_softc *, int));
112 static	void	qe_setup __P((struct qe_softc *));
113 static	void	qetimeout __P((struct ifnet *));
114 
115 struct	cfattach qe_ca = {
116 	sizeof(struct qe_softc), qematch, qeattach
117 };
118 
119 #define	QE_WCSR(csr, val) \
120 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
121 #define	QE_RCSR(csr) \
122 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
123 
124 #define	LOWORD(x)	((int)(x) & 0xffff)
125 #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
126 
127 /*
128  * Check for present DEQNA. Done by sending a fake setup packet
129  * and wait for interrupt.
130  */
131 int
132 qematch(parent, cf, aux)
133 	struct	device *parent;
134 	struct	cfdata *cf;
135 	void	*aux;
136 {
137 	bus_dmamap_t	cmap;
138 	struct	qe_softc ssc;
139 	struct	qe_softc *sc = &ssc;
140 	struct	uba_attach_args *ua = aux;
141 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
142 
143 #define	PROBESIZE	(sizeof(struct qe_ring) * 4 + 128)
144 	struct	qe_ring ring[15]; /* For diag purposes only */
145 	struct	qe_ring *rp;
146 	int error;
147 
148 	bzero(sc, sizeof(struct qe_softc));
149 	bzero(ring, PROBESIZE);
150 	sc->sc_iot = ua->ua_iot;
151 	sc->sc_ioh = ua->ua_ioh;
152 	sc->sc_dmat = ua->ua_dmat;
153 
154 	ubasc->uh_lastiv -= 4;
155 	QE_WCSR(QE_CSR_CSR, QE_RESET);
156 	QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
157 
158 	/*
159 	 * Map the ring area. Actually this is done only to be able to
160 	 * send and receive a internal packet; some junk is loopbacked
161 	 * so that the DEQNA has a reason to interrupt.
162 	 */
163 	if ((error = bus_dmamap_create(sc->sc_dmat, PROBESIZE, 1, PROBESIZE, 0,
164 	    BUS_DMA_NOWAIT, &cmap))) {
165 		printf("qematch: bus_dmamap_create failed = %d\n", error);
166 		return 0;
167 	}
168 	if ((error = bus_dmamap_load(sc->sc_dmat, cmap, ring, PROBESIZE, 0,
169 	    BUS_DMA_NOWAIT))) {
170 		printf("qematch: bus_dmamap_load failed = %d\n", error);
171 		bus_dmamap_destroy(sc->sc_dmat, cmap);
172 		return 0;
173 	}
174 
175 	/*
176 	 * Init a simple "fake" receive and transmit descriptor that
177 	 * points to some unused area. Send a fake setup packet.
178 	 */
179 	rp = (void *)cmap->dm_segs[0].ds_addr;
180 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
181 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
182 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
183 	ring[0].qe_buf_len = 128;
184 
185 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
186 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
187 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
188 	ring[2].qe_buf_len = 128;
189 
190 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
191 	DELAY(1000);
192 
193 	/*
194 	 * Start the interface and wait for the packet.
195 	 */
196 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
197 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
198 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
199 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
200 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
201 	DELAY(10000);
202 
203 	/*
204 	 * All done with the bus resources.
205 	 */
206 	bus_dmamap_unload(sc->sc_dmat, cmap);
207 	bus_dmamap_destroy(sc->sc_dmat, cmap);
208 	return 1;
209 }
210 
211 /*
212  * Interface exists: make available by filling in network interface
213  * record.  System will initialize the interface when it is ready
214  * to accept packets.
215  */
216 void
217 qeattach(parent, self, aux)
218 	struct	device *parent, *self;
219 	void	*aux;
220 {
221 	struct	uba_attach_args *ua = aux;
222 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
223 	struct	qe_softc *sc = (struct qe_softc *)self;
224 	struct	ifnet *ifp = (struct ifnet *)&sc->sc_if;
225 	struct	qe_ring *rp;
226 	u_int8_t enaddr[ETHER_ADDR_LEN];
227 	bus_dma_segment_t seg;
228 	int i, rseg, error;
229 
230 	sc->sc_iot = ua->ua_iot;
231 	sc->sc_ioh = ua->ua_ioh;
232 	sc->sc_dmat = ua->ua_dmat;
233 
234         /*
235          * Allocate DMA safe memory for descriptors and setup memory.
236          */
237 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
238 	    sizeof(struct qe_cdata), NBPG, 0, &seg, 1, &rseg,
239 	    BUS_DMA_NOWAIT)) != 0) {
240 		printf(": unable to allocate control data, error = %d\n",
241 		    error);
242 		goto fail_0;
243 	}
244 
245 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
246 	    sizeof(struct qe_cdata), (caddr_t *)&sc->sc_qedata,
247 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
248 		printf(": unable to map control data, error = %d\n", error);
249 		goto fail_1;
250 	}
251 
252 	if ((error = bus_dmamap_create(sc->sc_dmat,
253 	    sizeof(struct qe_cdata), 1,
254 	    sizeof(struct qe_cdata), 0, BUS_DMA_NOWAIT,
255 	    &sc->sc_cmap)) != 0) {
256 		printf(": unable to create control data DMA map, error = %d\n",
257 		    error);
258 		goto fail_2;
259 	}
260 
261 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
262 	    sc->sc_qedata, sizeof(struct qe_cdata), NULL,
263 	    BUS_DMA_NOWAIT)) != 0) {
264 		printf(": unable to load control data DMA map, error = %d\n",
265 		    error);
266 		goto fail_3;
267 	}
268 
269 	/*
270 	 * Zero the newly allocated memory.
271 	 */
272 	bzero(sc->sc_qedata, sizeof(struct qe_cdata));
273 	/*
274 	 * Create the transmit descriptor DMA maps. We take advantage
275 	 * of the fact that the Qbus address space is big, and therefore
276 	 * allocate map registers for all transmit descriptors also,
277 	 * so that we can avoid this each time we send a packet.
278 	 */
279 	for (i = 0; i < TXDESCS; i++) {
280 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
281 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
282 		    &sc->sc_xmtmap[i]))) {
283 			printf(": unable to create tx DMA map %d, error = %d\n",
284 			    i, error);
285 			goto fail_4;
286 		}
287 	}
288 
289 	/*
290 	 * Create receive buffer DMA maps.
291 	 */
292 	for (i = 0; i < RXDESCS; i++) {
293 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
294 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
295 		    &sc->sc_rcvmap[i]))) {
296 			printf(": unable to create rx DMA map %d, error = %d\n",
297 			    i, error);
298 			goto fail_5;
299 		}
300 	}
301 	/*
302 	 * Pre-allocate the receive buffers.
303 	 */
304 	for (i = 0; i < RXDESCS; i++) {
305 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
306 			printf(": unable to allocate or map rx buffer %d\n,"
307 			    " error = %d\n", i, error);
308 			goto fail_6;
309 		}
310 	}
311 
312 	/*
313 	 * Create ring loops of the buffer chains.
314 	 * This is only done once.
315 	 */
316 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
317 
318 	rp = sc->sc_qedata->qc_recv;
319 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
320 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
321 	    QE_VALID | QE_CHAIN;
322 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
323 
324 	rp = sc->sc_qedata->qc_xmit;
325 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
326 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
327 	    QE_VALID | QE_CHAIN;
328 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
329 
330 	/*
331 	 * Get the vector that were set at match time, and remember it.
332 	 */
333 	sc->sc_intvec = ubasc->uh_lastiv;
334 	QE_WCSR(QE_CSR_CSR, QE_RESET);
335 	DELAY(1000);
336 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
337 
338 	/*
339 	 * Read out ethernet address and tell which type this card is.
340 	 */
341 	for (i = 0; i < 6; i++)
342 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
343 
344 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
345 	printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
346 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
347 		ether_sprintf(enaddr));
348 
349 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
350 
351 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
352 		sc, &sc->sc_intrcnt);
353 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
354 		sc->sc_dev.dv_xname, "intr");
355 
356 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
357 	ifp->if_softc = sc;
358 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
359 	ifp->if_start = qestart;
360 	ifp->if_ioctl = qeioctl;
361 	ifp->if_watchdog = qetimeout;
362 
363 	/*
364 	 * Attach the interface.
365 	 */
366 	if_attach(ifp);
367 	ether_ifattach(ifp, enaddr);
368 
369 #if NBPFILTER > 0
370 	bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
371 #endif
372 	return;
373 
374 	/*
375 	 * Free any resources we've allocated during the failed attach
376 	 * attempt.  Do this in reverse order and fall through.
377 	 */
378  fail_6:
379 	for (i = 0; i < RXDESCS; i++) {
380 		if (sc->sc_rxmbuf[i] != NULL) {
381 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
382 			m_freem(sc->sc_rxmbuf[i]);
383 		}
384 	}
385  fail_5:
386 	for (i = 0; i < RXDESCS; i++) {
387 		if (sc->sc_xmtmap[i] != NULL)
388 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
389 	}
390  fail_4:
391 	for (i = 0; i < TXDESCS; i++) {
392 		if (sc->sc_rcvmap[i] != NULL)
393 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
394 	}
395 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
396  fail_3:
397 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
398  fail_2:
399 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_qedata,
400 	    sizeof(struct qe_cdata));
401  fail_1:
402 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
403  fail_0:
404 	return;
405 }
406 
407 /*
408  * Initialization of interface.
409  */
410 void
411 qeinit(sc)
412 	struct qe_softc *sc;
413 {
414 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
415 	struct qe_cdata *qc = sc->sc_qedata;
416 	int i;
417 
418 
419 	/*
420 	 * Reset the interface.
421 	 */
422 	QE_WCSR(QE_CSR_CSR, QE_RESET);
423 	DELAY(1000);
424 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
425 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
426 
427 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
428 	/*
429 	 * Release and init transmit descriptors.
430 	 */
431 	for (i = 0; i < TXDESCS; i++) {
432 		if (sc->sc_txmbuf[i]) {
433 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
434 			m_freem(sc->sc_txmbuf[i]);
435 			sc->sc_txmbuf[i] = 0;
436 		}
437 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
438 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
439 	}
440 
441 
442 	/*
443 	 * Init receive descriptors.
444 	 */
445 	for (i = 0; i < RXDESCS; i++)
446 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
447 	sc->sc_nextrx = 0;
448 
449 	/*
450 	 * Write the descriptor addresses to the device.
451 	 * Receiving packets will be enabled in the interrupt routine.
452 	 */
453 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
454 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
455 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
456 
457 	ifp->if_flags |= IFF_RUNNING;
458 	ifp->if_flags &= ~IFF_OACTIVE;
459 
460 	/*
461 	 * Send a setup frame.
462 	 * This will start the transmit machinery as well.
463 	 */
464 	qe_setup(sc);
465 
466 }
467 
468 /*
469  * Start output on interface.
470  */
471 void
472 qestart(ifp)
473 	struct ifnet *ifp;
474 {
475 	struct qe_softc *sc = ifp->if_softc;
476 	struct qe_cdata *qc = sc->sc_qedata;
477 	paddr_t	buffer;
478 	struct mbuf *m, *m0;
479 	int idx, len, s, i, totlen, error;
480 	short orword;
481 
482 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
483 		return;
484 
485 	s = splimp();
486 	while (sc->sc_inq < (TXDESCS - 1)) {
487 
488 		if (sc->sc_setup) {
489 			qe_setup(sc);
490 			continue;
491 		}
492 		idx = sc->sc_nexttx;
493 		IF_DEQUEUE(&sc->sc_if.if_snd, m);
494 		if (m == 0)
495 			goto out;
496 		/*
497 		 * Count number of mbufs in chain.
498 		 * Always do DMA directly from mbufs, therefore the transmit
499 		 * ring is really big.
500 		 */
501 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
502 			if (m0->m_len)
503 				i++;
504 		if (i >= TXDESCS)
505 			panic("qestart");
506 
507 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
508 			IF_PREPEND(&sc->sc_if.if_snd, m);
509 			ifp->if_flags |= IFF_OACTIVE;
510 			goto out;
511 		}
512 
513 #if NBPFILTER > 0
514 		if (ifp->if_bpf)
515 			bpf_mtap(ifp->if_bpf, m);
516 #endif
517 		/*
518 		 * m now points to a mbuf chain that can be loaded.
519 		 * Loop around and set it.
520 		 */
521 		totlen = 0;
522 		for (m0 = m; m0; m0 = m0->m_next) {
523 			error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
524 			    mtod(m0, void *), m0->m_len, 0, 0);
525 			buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
526 			len = m0->m_len;
527 			if (len == 0)
528 				continue;
529 
530 			totlen += len;
531 			/* Word alignment calc */
532 			orword = 0;
533 			if (totlen == m->m_pkthdr.len) {
534 				if (totlen < ETHER_MIN_LEN)
535 					len += (ETHER_MIN_LEN - totlen);
536 				orword |= QE_EOMSG;
537 				sc->sc_txmbuf[idx] = m;
538 			}
539 			if ((buffer & 1) || (len & 1))
540 				len += 2;
541 			if (buffer & 1)
542 				orword |= QE_ODDBEGIN;
543 			if ((buffer + len) & 1)
544 				orword |= QE_ODDEND;
545 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
546 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
547 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
548 			qc->qc_xmit[idx].qe_flag =
549 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
550 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
551 			if (++idx == TXDESCS)
552 				idx = 0;
553 			sc->sc_inq++;
554 		}
555 #ifdef DIAGNOSTIC
556 		if (totlen != m->m_pkthdr.len)
557 			panic("qestart: len fault");
558 #endif
559 
560 		/*
561 		 * Kick off the transmit logic, if it is stopped.
562 		 */
563 		if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
564 			QE_WCSR(QE_CSR_XMTL,
565 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
566 			QE_WCSR(QE_CSR_XMTH,
567 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
568 		}
569 		sc->sc_nexttx = idx;
570 	}
571 	if (sc->sc_inq == (TXDESCS - 1))
572 		ifp->if_flags |= IFF_OACTIVE;
573 
574 out:	if (sc->sc_inq)
575 		ifp->if_timer = 5; /* If transmit logic dies */
576 	splx(s);
577 }
578 
579 static void
580 qeintr(arg)
581 	void *arg;
582 {
583 	struct qe_softc *sc = arg;
584 	struct qe_cdata *qc = sc->sc_qedata;
585 	struct ifnet *ifp = &sc->sc_if;
586 	struct ether_header *eh;
587 	struct mbuf *m;
588 	int csr, status1, status2, len;
589 
590 	csr = QE_RCSR(QE_CSR_CSR);
591 
592 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
593 	    QE_RCV_INT | QE_ILOOP);
594 
595 	if (csr & QE_RCV_INT)
596 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
597 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
598 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
599 			m = sc->sc_rxmbuf[sc->sc_nextrx];
600 			len = ((status1 & QE_RBL_HI) |
601 			    (status2 & QE_RBL_LO)) + 60;
602 			qe_add_rxbuf(sc, sc->sc_nextrx);
603 			m->m_pkthdr.rcvif = ifp;
604 			m->m_pkthdr.len = m->m_len = len;
605 			if (++sc->sc_nextrx == RXDESCS)
606 				sc->sc_nextrx = 0;
607 			eh = mtod(m, struct ether_header *);
608 #if NBPFILTER > 0
609 			if (ifp->if_bpf) {
610 				bpf_mtap(ifp->if_bpf, m);
611 				if ((ifp->if_flags & IFF_PROMISC) != 0 &&
612 				    bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
613 				    ETHER_ADDR_LEN) != 0 &&
614 				    ((eh->ether_dhost[0] & 1) == 0)) {
615 					m_freem(m);
616 					continue;
617 				}
618 			}
619 #endif
620 			/*
621 			 * ALLMULTI means PROMISC in this driver.
622 			 */
623 			if ((ifp->if_flags & IFF_ALLMULTI) &&
624 			    ((eh->ether_dhost[0] & 1) == 0) &&
625 			    bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
626 			    ETHER_ADDR_LEN)) {
627 				m_freem(m);
628 				continue;
629 			}
630 			(*ifp->if_input)(ifp, m);
631 		}
632 
633 	if (csr & QE_XMIT_INT) {
634 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
635 			int idx = sc->sc_lastack;
636 
637 			sc->sc_inq--;
638 			if (++sc->sc_lastack == TXDESCS)
639 				sc->sc_lastack = 0;
640 
641 			/* XXX collect statistics */
642 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
643 			qc->qc_xmit[idx].qe_status1 =
644 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
645 
646 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
647 				continue;
648 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
649 			if (sc->sc_txmbuf[idx]) {
650 				m_freem(sc->sc_txmbuf[idx]);
651 				sc->sc_txmbuf[idx] = 0;
652 			}
653 		}
654 		ifp->if_timer = 0;
655 		ifp->if_flags &= ~IFF_OACTIVE;
656 		qestart(ifp); /* Put in more in queue */
657 	}
658 	/*
659 	 * How can the receive list get invalid???
660 	 * Verified that it happens anyway.
661 	 */
662 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
663 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
664 		QE_WCSR(QE_CSR_RCLL,
665 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
666 		QE_WCSR(QE_CSR_RCLH,
667 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
668 	}
669 }
670 
671 /*
672  * Process an ioctl request.
673  */
674 int
675 qeioctl(ifp, cmd, data)
676 	struct ifnet *ifp;
677 	u_long cmd;
678 	caddr_t data;
679 {
680 	struct qe_softc *sc = ifp->if_softc;
681 	struct ifreq *ifr = (struct ifreq *)data;
682 	struct ifaddr *ifa = (struct ifaddr *)data;
683 	int s = splnet(), error = 0;
684 
685 	switch (cmd) {
686 
687 	case SIOCSIFADDR:
688 		ifp->if_flags |= IFF_UP;
689 		switch(ifa->ifa_addr->sa_family) {
690 #ifdef INET
691 		case AF_INET:
692 			qeinit(sc);
693 			arp_ifinit(ifp, ifa);
694 			break;
695 #endif
696 		}
697 		break;
698 
699 	case SIOCSIFFLAGS:
700 		if ((ifp->if_flags & IFF_UP) == 0 &&
701 		    (ifp->if_flags & IFF_RUNNING) != 0) {
702 			/*
703 			 * If interface is marked down and it is running,
704 			 * stop it. (by disabling receive mechanism).
705 			 */
706 			QE_WCSR(QE_CSR_CSR,
707 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
708 			ifp->if_flags &= ~IFF_RUNNING;
709 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
710 			   (ifp->if_flags & IFF_RUNNING) == 0) {
711 			/*
712 			 * If interface it marked up and it is stopped, then
713 			 * start it.
714 			 */
715 			qeinit(sc);
716 		} else if ((ifp->if_flags & IFF_UP) != 0) {
717 			/*
718 			 * Send a new setup packet to match any new changes.
719 			 * (Like IFF_PROMISC etc)
720 			 */
721 			qe_setup(sc);
722 		}
723 		break;
724 
725 	case SIOCADDMULTI:
726 	case SIOCDELMULTI:
727 		/*
728 		 * Update our multicast list.
729 		 */
730 		error = (cmd == SIOCADDMULTI) ?
731 			ether_addmulti(ifr, &sc->sc_ec):
732 			ether_delmulti(ifr, &sc->sc_ec);
733 
734 		if (error == ENETRESET) {
735 			/*
736 			 * Multicast list has changed; set the hardware filter
737 			 * accordingly.
738 			 */
739 			qe_setup(sc);
740 			error = 0;
741 		}
742 		break;
743 
744 	default:
745 		error = EINVAL;
746 
747 	}
748 	splx(s);
749 	return (error);
750 }
751 
752 /*
753  * Add a receive buffer to the indicated descriptor.
754  */
755 int
756 qe_add_rxbuf(sc, i)
757 	struct qe_softc *sc;
758 	int i;
759 {
760 	struct mbuf *m;
761 	struct qe_ring *rp;
762 	vaddr_t addr;
763 	int error;
764 
765 	MGETHDR(m, M_DONTWAIT, MT_DATA);
766 	if (m == NULL)
767 		return (ENOBUFS);
768 
769 	MCLGET(m, M_DONTWAIT);
770 	if ((m->m_flags & M_EXT) == 0) {
771 		m_freem(m);
772 		return (ENOBUFS);
773 	}
774 
775 	if (sc->sc_rxmbuf[i] != NULL)
776 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
777 
778 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
779 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
780 	if (error)
781 		panic("%s: can't load rx DMA map %d, error = %d\n",
782 		    sc->sc_dev.dv_xname, i, error);
783 	sc->sc_rxmbuf[i] = m;
784 
785 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
786 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
787 
788 	/*
789 	 * We know that the mbuf cluster is page aligned. Also, be sure
790 	 * that the IP header will be longword aligned.
791 	 */
792 	m->m_data += 2;
793 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
794 	rp = &sc->sc_qedata->qc_recv[i];
795 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
796 	rp->qe_addr_lo = LOWORD(addr);
797 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
798 	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
799 
800 	return (0);
801 }
802 
803 /*
804  * Create a setup packet and put in queue for sending.
805  */
806 void
807 qe_setup(sc)
808 	struct qe_softc *sc;
809 {
810 	struct ether_multi *enm;
811 	struct ether_multistep step;
812 	struct qe_cdata *qc = sc->sc_qedata;
813 	struct ifnet *ifp = &sc->sc_if;
814 	u_int8_t *enaddr = LLADDR(ifp->if_sadl);
815 	int i, j, k, idx, s;
816 
817 	s = splimp();
818 	if (sc->sc_inq == (TXDESCS - 1)) {
819 		sc->sc_setup = 1;
820 		splx(s);
821 		return;
822 	}
823 	sc->sc_setup = 0;
824 	/*
825 	 * Init the setup packet with valid info.
826 	 */
827 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
828 	for (i = 0; i < ETHER_ADDR_LEN; i++)
829 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
830 
831 	/*
832 	 * Multicast handling. The DEQNA can handle up to 12 direct
833 	 * ethernet addresses.
834 	 */
835 	j = 3; k = 0;
836 	ifp->if_flags &= ~IFF_ALLMULTI;
837 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
838 	while (enm != NULL) {
839 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
840 			ifp->if_flags |= IFF_ALLMULTI;
841 			break;
842 		}
843 		for (i = 0; i < ETHER_ADDR_LEN; i++)
844 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
845 		j++;
846 		if (j == 8) {
847 			j = 1; k += 64;
848 		}
849 		if (k > 64) {
850 			ifp->if_flags |= IFF_ALLMULTI;
851 			break;
852 		}
853 		ETHER_NEXT_MULTI(step, enm);
854 	}
855 	idx = sc->sc_nexttx;
856 	qc->qc_xmit[idx].qe_buf_len = -64;
857 
858 	/*
859 	 * How is the DEQNA turned in ALLMULTI mode???
860 	 * Until someone tells me, fall back to PROMISC when more than
861 	 * 12 ethernet addresses.
862 	 */
863 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
864 		qc->qc_xmit[idx].qe_buf_len = -65;
865 
866 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
867 	qc->qc_xmit[idx].qe_addr_hi =
868 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
869 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
870 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
871 
872 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
873 		QE_WCSR(QE_CSR_XMTL,
874 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
875 		QE_WCSR(QE_CSR_XMTH,
876 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
877 	}
878 
879 	sc->sc_inq++;
880 	if (++sc->sc_nexttx == TXDESCS)
881 		sc->sc_nexttx = 0;
882 	splx(s);
883 }
884 
885 /*
886  * Check for dead transmit logic. Not uncommon.
887  */
888 void
889 qetimeout(ifp)
890 	struct ifnet *ifp;
891 {
892 	struct qe_softc *sc = ifp->if_softc;
893 
894 	if (sc->sc_inq == 0)
895 		return;
896 
897 	printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
898 	/*
899 	 * Do a reset of interface, to get it going again.
900 	 * Will it work by just restart the transmit logic?
901 	 */
902 	qeinit(sc);
903 }
904