xref: /netbsd-src/sys/dev/qbus/if_qe.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*      $NetBSD: if_qe.c,v 1.58 2004/10/30 18:10:06 thorpej Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed at Ludd, University of
16  *      Lule}, Sweden and its contributors.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for DEQNA/DELQA ethernet cards.
34  * Things that is still to do:
35  *	Handle ubaresets. Does not work at all right now.
36  *	Fix ALLMULTI reception. But someone must tell me how...
37  *	Collect statistics.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.58 2004/10/30 18:10:06 thorpej Exp $");
42 
43 #include "opt_inet.h"
44 #include "bpfilter.h"
45 
46 #include <sys/param.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/device.h>
50 #include <sys/systm.h>
51 #include <sys/sockio.h>
52 
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56 
57 #include <netinet/in.h>
58 #include <netinet/if_inarp.h>
59 
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #include <net/bpfdesc.h>
63 #endif
64 
65 #include <machine/bus.h>
66 
67 #include <dev/qbus/ubavar.h>
68 #include <dev/qbus/if_qereg.h>
69 
70 #include "ioconf.h"
71 
72 #define RXDESCS	30	/* # of receive descriptors */
73 #define TXDESCS	60	/* # transmit descs */
74 
75 /*
76  * Structure containing the elements that must be in DMA-safe memory.
77  */
78 struct qe_cdata {
79 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
80 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
81 	u_int8_t	qc_setup[128];		/* Setup packet layout */
82 };
83 
84 struct	qe_softc {
85 	struct device	sc_dev;		/* Configuration common part	*/
86 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
87 	struct ethercom sc_ec;		/* Ethernet common part		*/
88 #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
89 	bus_space_tag_t sc_iot;
90 	bus_addr_t	sc_ioh;
91 	bus_dma_tag_t	sc_dmat;
92 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
93 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
94 	struct mbuf*	sc_txmbuf[TXDESCS];
95 	struct mbuf*	sc_rxmbuf[RXDESCS];
96 	bus_dmamap_t	sc_xmtmap[TXDESCS];
97 	bus_dmamap_t	sc_rcvmap[RXDESCS];
98 	bus_dmamap_t	sc_nulldmamap;	/* ethernet padding buffer	*/
99 	struct ubinfo	sc_ui;
100 	int		sc_intvec;	/* Interrupt vector		*/
101 	int		sc_nexttx;
102 	int		sc_inq;
103 	int		sc_lastack;
104 	int		sc_nextrx;
105 	int		sc_setup;	/* Setup packet in queue	*/
106 };
107 
108 static	int	qematch(struct device *, struct cfdata *, void *);
109 static	void	qeattach(struct device *, struct device *, void *);
110 static	void	qeinit(struct qe_softc *);
111 static	void	qestart(struct ifnet *);
112 static	void	qeintr(void *);
113 static	int	qeioctl(struct ifnet *, u_long, caddr_t);
114 static	int	qe_add_rxbuf(struct qe_softc *, int);
115 static	void	qe_setup(struct qe_softc *);
116 static	void	qetimeout(struct ifnet *);
117 
118 CFATTACH_DECL(qe, sizeof(struct qe_softc),
119     qematch, qeattach, NULL, NULL);
120 
121 #define	QE_WCSR(csr, val) \
122 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
123 #define	QE_RCSR(csr) \
124 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
125 
126 #define	LOWORD(x)	((int)(x) & 0xffff)
127 #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
128 
129 #define	ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
130 
131 /*
132  * Check for present DEQNA. Done by sending a fake setup packet
133  * and wait for interrupt.
134  */
135 int
136 qematch(struct device *parent, struct cfdata *cf, void *aux)
137 {
138 	struct	qe_softc ssc;
139 	struct	qe_softc *sc = &ssc;
140 	struct	uba_attach_args *ua = aux;
141 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
142 	struct ubinfo ui;
143 
144 #define	PROBESIZE	4096
145 	struct qe_ring *ring;
146 	struct	qe_ring *rp;
147 	int error;
148 
149 	ring = malloc(PROBESIZE, M_TEMP, M_WAITOK);
150 	bzero(sc, sizeof(struct qe_softc));
151 	bzero(ring, PROBESIZE);
152 	sc->sc_iot = ua->ua_iot;
153 	sc->sc_ioh = ua->ua_ioh;
154 	sc->sc_dmat = ua->ua_dmat;
155 
156 	ubasc->uh_lastiv -= 4;
157 	QE_WCSR(QE_CSR_CSR, QE_RESET);
158 	QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
159 
160 	/*
161 	 * Map the ring area. Actually this is done only to be able to
162 	 * send and receive a internal packet; some junk is loopbacked
163 	 * so that the DEQNA has a reason to interrupt.
164 	 */
165 	ui.ui_size = PROBESIZE;
166 	ui.ui_vaddr = (caddr_t)&ring[0];
167 	if ((error = uballoc((void *)parent, &ui, UBA_CANTWAIT)))
168 		return 0;
169 
170 	/*
171 	 * Init a simple "fake" receive and transmit descriptor that
172 	 * points to some unused area. Send a fake setup packet.
173 	 */
174 	rp = (void *)ui.ui_baddr;
175 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
176 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
177 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
178 	ring[0].qe_buf_len = -64;
179 
180 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
181 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
182 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
183 	ring[2].qe_buf_len = -(1500/2);
184 
185 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
186 	DELAY(1000);
187 
188 	/*
189 	 * Start the interface and wait for the packet.
190 	 */
191 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
192 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
193 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
194 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
195 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
196 	DELAY(10000);
197 
198 	/*
199 	 * All done with the bus resources.
200 	 */
201 	ubfree((void *)parent, &ui);
202 	free(ring, M_TEMP);
203 	return 1;
204 }
205 
206 /*
207  * Interface exists: make available by filling in network interface
208  * record.  System will initialize the interface when it is ready
209  * to accept packets.
210  */
211 void
212 qeattach(struct device *parent, struct device *self, void *aux)
213 {
214 	struct	uba_attach_args *ua = aux;
215 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
216 	struct	qe_softc *sc = (struct qe_softc *)self;
217 	struct	ifnet *ifp = (struct ifnet *)&sc->sc_if;
218 	struct	qe_ring *rp;
219 	u_int8_t enaddr[ETHER_ADDR_LEN];
220 	int i, error;
221 	char *nullbuf;
222 
223 	sc->sc_iot = ua->ua_iot;
224 	sc->sc_ioh = ua->ua_ioh;
225 	sc->sc_dmat = ua->ua_dmat;
226 
227         /*
228          * Allocate DMA safe memory for descriptors and setup memory.
229          */
230 
231 	sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
232 	if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) {
233 		printf(": unable to ubmemalloc(), error = %d\n", error);
234 		return;
235 	}
236 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
237 	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
238 
239 	/*
240 	 * Zero the newly allocated memory.
241 	 */
242 	bzero(sc->sc_qedata, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
243 	nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
244 	/*
245 	 * Create the transmit descriptor DMA maps. We take advantage
246 	 * of the fact that the Qbus address space is big, and therefore
247 	 * allocate map registers for all transmit descriptors also,
248 	 * so that we can avoid this each time we send a packet.
249 	 */
250 	for (i = 0; i < TXDESCS; i++) {
251 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
252 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
253 		    &sc->sc_xmtmap[i]))) {
254 			printf(": unable to create tx DMA map %d, error = %d\n",
255 			    i, error);
256 			goto fail_4;
257 		}
258 	}
259 
260 	/*
261 	 * Create receive buffer DMA maps.
262 	 */
263 	for (i = 0; i < RXDESCS; i++) {
264 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
265 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
266 		    &sc->sc_rcvmap[i]))) {
267 			printf(": unable to create rx DMA map %d, error = %d\n",
268 			    i, error);
269 			goto fail_5;
270 		}
271 	}
272 	/*
273 	 * Pre-allocate the receive buffers.
274 	 */
275 	for (i = 0; i < RXDESCS; i++) {
276 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
277 			printf(": unable to allocate or map rx buffer %d\n,"
278 			    " error = %d\n", i, error);
279 			goto fail_6;
280 		}
281 	}
282 
283 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
284 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
285 		printf("%s: unable to create pad buffer DMA map, "
286 		    "error = %d\n", sc->sc_dev.dv_xname, error);
287 		goto fail_6;
288 	}
289 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
290 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
291 		printf("%s: unable to load pad buffer DMA map, "
292 		    "error = %d\n", sc->sc_dev.dv_xname, error);
293 		goto fail_7;
294 	}
295 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
296 	    BUS_DMASYNC_PREWRITE);
297 
298 	/*
299 	 * Create ring loops of the buffer chains.
300 	 * This is only done once.
301 	 */
302 
303 	rp = sc->sc_qedata->qc_recv;
304 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
305 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
306 	    QE_VALID | QE_CHAIN;
307 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
308 
309 	rp = sc->sc_qedata->qc_xmit;
310 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
311 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
312 	    QE_VALID | QE_CHAIN;
313 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
314 
315 	/*
316 	 * Get the vector that were set at match time, and remember it.
317 	 */
318 	sc->sc_intvec = ubasc->uh_lastiv;
319 	QE_WCSR(QE_CSR_CSR, QE_RESET);
320 	DELAY(1000);
321 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
322 
323 	/*
324 	 * Read out ethernet address and tell which type this card is.
325 	 */
326 	for (i = 0; i < 6; i++)
327 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
328 
329 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
330 	printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
331 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
332 		ether_sprintf(enaddr));
333 
334 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
335 
336 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
337 		sc, &sc->sc_intrcnt);
338 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
339 		sc->sc_dev.dv_xname, "intr");
340 
341 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
342 	ifp->if_softc = sc;
343 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
344 	ifp->if_start = qestart;
345 	ifp->if_ioctl = qeioctl;
346 	ifp->if_watchdog = qetimeout;
347 	IFQ_SET_READY(&ifp->if_snd);
348 
349 	/*
350 	 * Attach the interface.
351 	 */
352 	if_attach(ifp);
353 	ether_ifattach(ifp, enaddr);
354 
355 	return;
356 
357 	/*
358 	 * Free any resources we've allocated during the failed attach
359 	 * attempt.  Do this in reverse order and fall through.
360 	 */
361  fail_7:
362 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
363  fail_6:
364 	for (i = 0; i < RXDESCS; i++) {
365 		if (sc->sc_rxmbuf[i] != NULL) {
366 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
367 			m_freem(sc->sc_rxmbuf[i]);
368 		}
369 	}
370  fail_5:
371 	for (i = 0; i < RXDESCS; i++) {
372 		if (sc->sc_xmtmap[i] != NULL)
373 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
374 	}
375  fail_4:
376 	for (i = 0; i < TXDESCS; i++) {
377 		if (sc->sc_rcvmap[i] != NULL)
378 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
379 	}
380 }
381 
382 /*
383  * Initialization of interface.
384  */
385 void
386 qeinit(struct qe_softc *sc)
387 {
388 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
389 	struct qe_cdata *qc = sc->sc_qedata;
390 	int i;
391 
392 
393 	/*
394 	 * Reset the interface.
395 	 */
396 	QE_WCSR(QE_CSR_CSR, QE_RESET);
397 	DELAY(1000);
398 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
399 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
400 
401 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
402 	/*
403 	 * Release and init transmit descriptors.
404 	 */
405 	for (i = 0; i < TXDESCS; i++) {
406 		if (sc->sc_txmbuf[i]) {
407 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
408 			m_freem(sc->sc_txmbuf[i]);
409 			sc->sc_txmbuf[i] = 0;
410 		}
411 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
412 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
413 	}
414 
415 
416 	/*
417 	 * Init receive descriptors.
418 	 */
419 	for (i = 0; i < RXDESCS; i++)
420 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
421 	sc->sc_nextrx = 0;
422 
423 	/*
424 	 * Write the descriptor addresses to the device.
425 	 * Receiving packets will be enabled in the interrupt routine.
426 	 */
427 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
428 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
429 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
430 
431 	ifp->if_flags |= IFF_RUNNING;
432 	ifp->if_flags &= ~IFF_OACTIVE;
433 
434 	/*
435 	 * Send a setup frame.
436 	 * This will start the transmit machinery as well.
437 	 */
438 	qe_setup(sc);
439 
440 }
441 
442 /*
443  * Start output on interface.
444  */
445 void
446 qestart(struct ifnet *ifp)
447 {
448 	struct qe_softc *sc = ifp->if_softc;
449 	struct qe_cdata *qc = sc->sc_qedata;
450 	paddr_t	buffer;
451 	struct mbuf *m, *m0;
452 	int idx, len, s, i, totlen, buflen, error;
453 	short orword, csr;
454 
455 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
456 		return;
457 
458 	s = splnet();
459 	while (sc->sc_inq < (TXDESCS - 1)) {
460 
461 		if (sc->sc_setup) {
462 			qe_setup(sc);
463 			continue;
464 		}
465 		idx = sc->sc_nexttx;
466 		IFQ_POLL(&ifp->if_snd, m);
467 		if (m == 0)
468 			goto out;
469 		/*
470 		 * Count number of mbufs in chain.
471 		 * Always do DMA directly from mbufs, therefore the transmit
472 		 * ring is really big.
473 		 */
474 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
475 			if (m0->m_len)
476 				i++;
477 		if (m->m_pkthdr.len < ETHER_PAD_LEN) {
478 			buflen = ETHER_PAD_LEN;
479 			i++;
480 		} else
481 			buflen = m->m_pkthdr.len;
482 		if (i >= TXDESCS)
483 			panic("qestart");
484 
485 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
486 			ifp->if_flags |= IFF_OACTIVE;
487 			goto out;
488 		}
489 
490 		IFQ_DEQUEUE(&ifp->if_snd, m);
491 
492 #if NBPFILTER > 0
493 		if (ifp->if_bpf)
494 			bpf_mtap(ifp->if_bpf, m);
495 #endif
496 		/*
497 		 * m now points to a mbuf chain that can be loaded.
498 		 * Loop around and set it.
499 		 */
500 		totlen = 0;
501 		for (m0 = m; ; m0 = m0->m_next) {
502 			if (m0) {
503 				if (m0->m_len == 0)
504 					continue;
505 				error = bus_dmamap_load(sc->sc_dmat,
506 				    sc->sc_xmtmap[idx], mtod(m0, void *),
507 				    m0->m_len, 0, 0);
508 				buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
509 				len = m0->m_len;
510 			} else if (totlen < ETHER_PAD_LEN) {
511 				buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
512 				len = ETHER_PAD_LEN - totlen;
513 			} else {
514 				break;
515 			}
516 
517 			totlen += len;
518 			/* Word alignment calc */
519 			orword = 0;
520 			if (totlen == buflen) {
521 				orword |= QE_EOMSG;
522 				sc->sc_txmbuf[idx] = m;
523 			}
524 			if ((buffer & 1) || (len & 1))
525 				len += 2;
526 			if (buffer & 1)
527 				orword |= QE_ODDBEGIN;
528 			if ((buffer + len) & 1)
529 				orword |= QE_ODDEND;
530 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
531 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
532 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
533 			qc->qc_xmit[idx].qe_flag =
534 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
535 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
536 			if (++idx == TXDESCS)
537 				idx = 0;
538 			sc->sc_inq++;
539 			if (m0 == NULL)
540 				break;
541 		}
542 #ifdef DIAGNOSTIC
543 		if (totlen != buflen)
544 			panic("qestart: len fault");
545 #endif
546 
547 		/*
548 		 * Kick off the transmit logic, if it is stopped.
549 		 */
550 		csr = QE_RCSR(QE_CSR_CSR);
551 		if (csr & QE_XL_INVALID) {
552 			QE_WCSR(QE_CSR_XMTL,
553 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
554 			QE_WCSR(QE_CSR_XMTH,
555 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
556 		}
557 		sc->sc_nexttx = idx;
558 	}
559 	if (sc->sc_inq == (TXDESCS - 1))
560 		ifp->if_flags |= IFF_OACTIVE;
561 
562 out:	if (sc->sc_inq)
563 		ifp->if_timer = 5; /* If transmit logic dies */
564 	splx(s);
565 }
566 
567 static void
568 qeintr(void *arg)
569 {
570 	struct qe_softc *sc = arg;
571 	struct qe_cdata *qc = sc->sc_qedata;
572 	struct ifnet *ifp = &sc->sc_if;
573 	struct mbuf *m;
574 	int csr, status1, status2, len;
575 
576 	csr = QE_RCSR(QE_CSR_CSR);
577 
578 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
579 	    QE_RCV_INT | QE_ILOOP);
580 
581 	if (csr & QE_RCV_INT)
582 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
583 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
584 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
585 
586 			m = sc->sc_rxmbuf[sc->sc_nextrx];
587 			len = ((status1 & QE_RBL_HI) |
588 			    (status2 & QE_RBL_LO)) + 60;
589 			qe_add_rxbuf(sc, sc->sc_nextrx);
590 			m->m_pkthdr.rcvif = ifp;
591 			m->m_pkthdr.len = m->m_len = len;
592 			if (++sc->sc_nextrx == RXDESCS)
593 				sc->sc_nextrx = 0;
594 #if NBPFILTER > 0
595 			if (ifp->if_bpf)
596 				bpf_mtap(ifp->if_bpf, m);
597 #endif
598 			if ((status1 & QE_ESETUP) == 0)
599 				(*ifp->if_input)(ifp, m);
600 			else
601 				m_freem(m);
602 		}
603 
604 	if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
605 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
606 			int idx = sc->sc_lastack;
607 
608 			sc->sc_inq--;
609 			if (++sc->sc_lastack == TXDESCS)
610 				sc->sc_lastack = 0;
611 
612 			/* XXX collect statistics */
613 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
614 			qc->qc_xmit[idx].qe_status1 =
615 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
616 
617 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
618 				continue;
619 			if (sc->sc_txmbuf[idx] == NULL ||
620 			    sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
621 				bus_dmamap_unload(sc->sc_dmat,
622 				    sc->sc_xmtmap[idx]);
623 			if (sc->sc_txmbuf[idx]) {
624 				m_freem(sc->sc_txmbuf[idx]);
625 				sc->sc_txmbuf[idx] = NULL;
626 			}
627 		}
628 		ifp->if_timer = 0;
629 		ifp->if_flags &= ~IFF_OACTIVE;
630 		qestart(ifp); /* Put in more in queue */
631 	}
632 	/*
633 	 * How can the receive list get invalid???
634 	 * Verified that it happens anyway.
635 	 */
636 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
637 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
638 		QE_WCSR(QE_CSR_RCLL,
639 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
640 		QE_WCSR(QE_CSR_RCLH,
641 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
642 	}
643 }
644 
645 /*
646  * Process an ioctl request.
647  */
648 int
649 qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
650 {
651 	struct qe_softc *sc = ifp->if_softc;
652 	struct ifreq *ifr = (struct ifreq *)data;
653 	struct ifaddr *ifa = (struct ifaddr *)data;
654 	int s = splnet(), error = 0;
655 
656 	switch (cmd) {
657 
658 	case SIOCSIFADDR:
659 		ifp->if_flags |= IFF_UP;
660 		switch(ifa->ifa_addr->sa_family) {
661 #ifdef INET
662 		case AF_INET:
663 			qeinit(sc);
664 			arp_ifinit(ifp, ifa);
665 			break;
666 #endif
667 		}
668 		break;
669 
670 	case SIOCSIFFLAGS:
671 		if ((ifp->if_flags & IFF_UP) == 0 &&
672 		    (ifp->if_flags & IFF_RUNNING) != 0) {
673 			/*
674 			 * If interface is marked down and it is running,
675 			 * stop it. (by disabling receive mechanism).
676 			 */
677 			QE_WCSR(QE_CSR_CSR,
678 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
679 			ifp->if_flags &= ~IFF_RUNNING;
680 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
681 			   (ifp->if_flags & IFF_RUNNING) == 0) {
682 			/*
683 			 * If interface it marked up and it is stopped, then
684 			 * start it.
685 			 */
686 			qeinit(sc);
687 		} else if ((ifp->if_flags & IFF_UP) != 0) {
688 			/*
689 			 * Send a new setup packet to match any new changes.
690 			 * (Like IFF_PROMISC etc)
691 			 */
692 			qe_setup(sc);
693 		}
694 		break;
695 
696 	case SIOCADDMULTI:
697 	case SIOCDELMULTI:
698 		/*
699 		 * Update our multicast list.
700 		 */
701 		error = (cmd == SIOCADDMULTI) ?
702 			ether_addmulti(ifr, &sc->sc_ec):
703 			ether_delmulti(ifr, &sc->sc_ec);
704 
705 		if (error == ENETRESET) {
706 			/*
707 			 * Multicast list has changed; set the hardware filter
708 			 * accordingly.
709 			 */
710 			if (ifp->if_flags & IFF_RUNNING)
711 				qe_setup(sc);
712 			error = 0;
713 		}
714 		break;
715 
716 	default:
717 		error = EINVAL;
718 
719 	}
720 	splx(s);
721 	return (error);
722 }
723 
724 /*
725  * Add a receive buffer to the indicated descriptor.
726  */
727 int
728 qe_add_rxbuf(struct qe_softc *sc, int i)
729 {
730 	struct mbuf *m;
731 	struct qe_ring *rp;
732 	vaddr_t addr;
733 	int error;
734 
735 	MGETHDR(m, M_DONTWAIT, MT_DATA);
736 	if (m == NULL)
737 		return (ENOBUFS);
738 
739 	MCLGET(m, M_DONTWAIT);
740 	if ((m->m_flags & M_EXT) == 0) {
741 		m_freem(m);
742 		return (ENOBUFS);
743 	}
744 
745 	if (sc->sc_rxmbuf[i] != NULL)
746 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
747 
748 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
749 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
750 	if (error)
751 		panic("%s: can't load rx DMA map %d, error = %d",
752 		    sc->sc_dev.dv_xname, i, error);
753 	sc->sc_rxmbuf[i] = m;
754 
755 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
756 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
757 
758 	/*
759 	 * We know that the mbuf cluster is page aligned. Also, be sure
760 	 * that the IP header will be longword aligned.
761 	 */
762 	m->m_data += 2;
763 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
764 	rp = &sc->sc_qedata->qc_recv[i];
765 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
766 	rp->qe_addr_lo = LOWORD(addr);
767 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
768 	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
769 
770 	return (0);
771 }
772 
773 /*
774  * Create a setup packet and put in queue for sending.
775  */
776 void
777 qe_setup(struct qe_softc *sc)
778 {
779 	struct ether_multi *enm;
780 	struct ether_multistep step;
781 	struct qe_cdata *qc = sc->sc_qedata;
782 	struct ifnet *ifp = &sc->sc_if;
783 	u_int8_t *enaddr = LLADDR(ifp->if_sadl);
784 	int i, j, k, idx, s;
785 
786 	s = splnet();
787 	if (sc->sc_inq == (TXDESCS - 1)) {
788 		sc->sc_setup = 1;
789 		splx(s);
790 		return;
791 	}
792 	sc->sc_setup = 0;
793 	/*
794 	 * Init the setup packet with valid info.
795 	 */
796 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
797 	for (i = 0; i < ETHER_ADDR_LEN; i++)
798 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
799 
800 	/*
801 	 * Multicast handling. The DEQNA can handle up to 12 direct
802 	 * ethernet addresses.
803 	 */
804 	j = 3; k = 0;
805 	ifp->if_flags &= ~IFF_ALLMULTI;
806 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
807 	while (enm != NULL) {
808 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
809 			ifp->if_flags |= IFF_ALLMULTI;
810 			break;
811 		}
812 		for (i = 0; i < ETHER_ADDR_LEN; i++)
813 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
814 		j++;
815 		if (j == 8) {
816 			j = 1; k += 64;
817 		}
818 		if (k > 64) {
819 			ifp->if_flags |= IFF_ALLMULTI;
820 			break;
821 		}
822 		ETHER_NEXT_MULTI(step, enm);
823 	}
824 	idx = sc->sc_nexttx;
825 	qc->qc_xmit[idx].qe_buf_len = -64;
826 
827 	/*
828 	 * How is the DEQNA turned in ALLMULTI mode???
829 	 * Until someone tells me, fall back to PROMISC when more than
830 	 * 12 ethernet addresses.
831 	 */
832 	if (ifp->if_flags & IFF_ALLMULTI)
833 		ifp->if_flags |= IFF_PROMISC;
834 	else if (ifp->if_pcount == 0)
835 		ifp->if_flags &= ~IFF_PROMISC;
836 	if (ifp->if_flags & IFF_PROMISC)
837 		qc->qc_xmit[idx].qe_buf_len = -65;
838 
839 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
840 	qc->qc_xmit[idx].qe_addr_hi =
841 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
842 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
843 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
844 
845 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
846 		QE_WCSR(QE_CSR_XMTL,
847 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
848 		QE_WCSR(QE_CSR_XMTH,
849 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
850 	}
851 
852 	sc->sc_inq++;
853 	if (++sc->sc_nexttx == TXDESCS)
854 		sc->sc_nexttx = 0;
855 	splx(s);
856 }
857 
858 /*
859  * Check for dead transmit logic. Not uncommon.
860  */
861 void
862 qetimeout(struct ifnet *ifp)
863 {
864 	struct qe_softc *sc = ifp->if_softc;
865 
866 	if (sc->sc_inq == 0)
867 		return;
868 
869 	printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
870 	/*
871 	 * Do a reset of interface, to get it going again.
872 	 * Will it work by just restart the transmit logic?
873 	 */
874 	qeinit(sc);
875 }
876